hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72a6cb5cc483a8ad619bf7be7299edfe87310a0 | 592 | py | Python | testing/test_is_infected.py | GalBenZvi/EpidemicSimulation | 7aa551e18ad27e977a73452e708026ea85804a21 | [
"MIT"
] | 1 | 2020-07-15T07:11:55.000Z | 2020-07-15T07:11:55.000Z | testing/test_is_infected.py | Hershkovitz-hub/EpidemicSimulation | 7aa551e18ad27e977a73452e708026ea85804a21 | [
"MIT"
] | 2 | 2021-06-08T22:07:26.000Z | 2021-09-08T02:22:40.000Z | testing/test_is_infected.py | GalBenZvi/EpidemicSimulation | 7aa551e18ad27e977a73452e708026ea85804a21 | [
"MIT"
] | null | null | null | from epidemic_simulation.simulation import SimulationManager
import pytest
@pytest.fixture
def test_data():
test_calc=SimulationManager([],{'infection_r':100,'infection_p':0.99,'sickness_duration':6})
return test_calc
def test_infection_prob_between_0_1(test_data):
"""
infection_prob must be between 0 and 1
"""
try:
test_data.infection_prob=-0.5
test_data.is_infected()
except ValueError:
try:
test_data.infection_prob=1.5
test_data.is_infected()
except ValueError:
return True
return False | 26.909091 | 96 | 0.682432 | from epidemic_simulation.simulation import SimulationManager
import pytest
@pytest.fixture
def test_data():
test_calc=SimulationManager([],{'infection_r':100,'infection_p':0.99,'sickness_duration':6})
return test_calc
def test_infection_prob_between_0_1(test_data):
try:
test_data.infection_prob=-0.5
test_data.is_infected()
except ValueError:
try:
test_data.infection_prob=1.5
test_data.is_infected()
except ValueError:
return True
return False | true | true |
f72a6dcb2a0ebf8ec3765416a9db9ad3c3e0ae5e | 1,253 | py | Python | src/lm/metrics.py | source-data/soda-roberta | 28f23ae68a1bb17c9844815a7c36d4c590e8c3d0 | [
"MIT"
] | 3 | 2021-04-23T05:06:23.000Z | 2021-09-03T21:10:25.000Z | src/lm/metrics.py | source-data/soda-roberta | 28f23ae68a1bb17c9844815a7c36d4c590e8c3d0 | [
"MIT"
] | 1 | 2021-12-14T15:21:16.000Z | 2021-12-14T15:21:16.000Z | src/lm/metrics.py | source-data/soda-roberta | 28f23ae68a1bb17c9844815a7c36d4c590e8c3d0 | [
"MIT"
] | 1 | 2022-03-29T08:03:25.000Z | 2022-03-29T08:03:25.000Z | from transformers import EvalPrediction
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
def compute_metrics(pred: EvalPrediction):
"""Compute recall at the masked position
"""
mask = pred.label_ids != -100
# filter everything except the masked position and flatten tensors
labels = pred.label_ids[mask].flatten()
preds = pred.predictions[mask].flatten()
_, recall, _, _ = precision_recall_fscore_support(y_true=labels, y_pred=preds, average='micro')
return {'recall': recall}
def self_test():
pred = EvalPrediction(
label_ids=np.array([
[-100, 1, -100],
[ 2, -100, -100],
[-100, -100, 3],
[-100, -100, 4]
]),
predictions=np.array([
[-100, 1, -100], # 1 true positive
[ 2, -100, -100], # 1 true positive
[ 2, 6, 8], # 1 false positive, irrelevant pos will be ignored
[ 1, 7, 4] # 1 true positive, irrelevant pos will be ignored
])
)
m = compute_metrics(pred)
print(f"recall={m['recall']}")
assert m['recall'] == 0.75
print("Looks like it is working!")
if __name__ == "__main__":
self_test()
| 31.325 | 99 | 0.581006 | from transformers import EvalPrediction
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
def compute_metrics(pred: EvalPrediction):
mask = pred.label_ids != -100
labels = pred.label_ids[mask].flatten()
preds = pred.predictions[mask].flatten()
_, recall, _, _ = precision_recall_fscore_support(y_true=labels, y_pred=preds, average='micro')
return {'recall': recall}
def self_test():
pred = EvalPrediction(
label_ids=np.array([
[-100, 1, -100],
[ 2, -100, -100],
[-100, -100, 3],
[-100, -100, 4]
]),
predictions=np.array([
[-100, 1, -100],
[ 2, -100, -100],
[ 2, 6, 8],
[ 1, 7, 4]
])
)
m = compute_metrics(pred)
print(f"recall={m['recall']}")
assert m['recall'] == 0.75
print("Looks like it is working!")
if __name__ == "__main__":
self_test()
| true | true |
f72a6ec3d3794ebec344131fa4c80510f1e78360 | 2,375 | py | Python | VORDInstance.py | Daviddddl/i3d_pytorch | 595172379bc669a30468119f629180141c2cbae2 | [
"Apache-2.0"
] | 3 | 2019-03-15T14:28:57.000Z | 2020-06-18T04:00:46.000Z | VORDInstance.py | Daviddddl/i3d_pytorch | 595172379bc669a30468119f629180141c2cbae2 | [
"Apache-2.0"
] | 1 | 2020-04-01T22:24:07.000Z | 2020-04-01T22:24:07.000Z | VORDInstance.py | Daviddddl/I3D_pytorch | 595172379bc669a30468119f629180141c2cbae2 | [
"Apache-2.0"
] | null | null | null |
class VORDInstance:
def __init__(self, video_id, video_path, frame_count, fps, width, height,
subject_objects, trajectories, relation_instances):
self.video_id = video_id
self.video_path = video_path
self.frame_count = frame_count
self.fps = fps
self.height = height
self.width = width
self.subject_objects = subject_objects
self.trajectories = trajectories
self.relation_instances = relation_instances
def __repr__(self):
return "VORD Instance: video_id=" + str(self.video_id)
def include_object(self, object_label):
for each_so in self.subject_objects:
if each_so['category'].lower() == object_label.lower():
return True
return False
def get_object_trajs(self, object_label):
if self.include_object(object_label):
trajs_list = []
for each_so in self.subject_objects:
if object_label == each_so['category']:
obj_tid = each_so['tid']
for each_traj in self.trajectories:
for each_traj_obj in each_traj:
if obj_tid == each_traj_obj['tid']:
trajs_list.append(each_traj_obj)
return trajs_list
else:
return None
def get_object_relations_list(self):
objects_list = []
relations_list = []
for each_so in self.subject_objects:
objects_list.append(each_so['category'])
for each_rel in self.relation_instances:
relations_list.append(each_rel['predicate'])
# print("Video " + str(self.video_id) + " has "
# + str(len(objects_list)) + " objects and " +
# str(len(relations_list)) + " relations.")
return objects_list, relations_list
def get_triplet_list(self):
categorys = {}
for each_os in self.subject_objects:
categorys[each_os['tid']] = each_os['category']
triplet_list = []
for each_pred in self.relation_instances:
each_trip = (categorys[each_pred['subject_tid']],
each_pred['predicate'],
categorys[each_pred['object_tid']])
triplet_list.append(each_trip)
return triplet_list
| 36.538462 | 77 | 0.586105 |
class VORDInstance:
def __init__(self, video_id, video_path, frame_count, fps, width, height,
subject_objects, trajectories, relation_instances):
self.video_id = video_id
self.video_path = video_path
self.frame_count = frame_count
self.fps = fps
self.height = height
self.width = width
self.subject_objects = subject_objects
self.trajectories = trajectories
self.relation_instances = relation_instances
def __repr__(self):
return "VORD Instance: video_id=" + str(self.video_id)
def include_object(self, object_label):
for each_so in self.subject_objects:
if each_so['category'].lower() == object_label.lower():
return True
return False
def get_object_trajs(self, object_label):
if self.include_object(object_label):
trajs_list = []
for each_so in self.subject_objects:
if object_label == each_so['category']:
obj_tid = each_so['tid']
for each_traj in self.trajectories:
for each_traj_obj in each_traj:
if obj_tid == each_traj_obj['tid']:
trajs_list.append(each_traj_obj)
return trajs_list
else:
return None
def get_object_relations_list(self):
objects_list = []
relations_list = []
for each_so in self.subject_objects:
objects_list.append(each_so['category'])
for each_rel in self.relation_instances:
relations_list.append(each_rel['predicate'])
return objects_list, relations_list
def get_triplet_list(self):
categorys = {}
for each_os in self.subject_objects:
categorys[each_os['tid']] = each_os['category']
triplet_list = []
for each_pred in self.relation_instances:
each_trip = (categorys[each_pred['subject_tid']],
each_pred['predicate'],
categorys[each_pred['object_tid']])
triplet_list.append(each_trip)
return triplet_list
| true | true |
f72a6f1a0b1f6bc7143103cd94ad22fb05c13141 | 334 | py | Python | school/extensions.py | leyyin/university-SE | 7cc3625bda787d2e79ab22f30d6f6e732ca9abb3 | [
"MIT"
] | 3 | 2015-03-12T15:50:58.000Z | 2015-05-04T12:55:19.000Z | school/extensions.py | leyyin/university-SE | 7cc3625bda787d2e79ab22f30d6f6e732ca9abb3 | [
"MIT"
] | 2 | 2015-05-01T18:24:04.000Z | 2015-05-15T15:58:47.000Z | school/extensions.py | leyyin/university-SE | 7cc3625bda787d2e79ab22f30d6f6e732ca9abb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# init all the extensions instances
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask.ext.mail import Mail
mail = Mail()
from flask.ext.login import LoginManager
login_manager = LoginManager()
from flask_debugtoolbar import DebugToolbarExtension
toolbar = DebugToolbarExtension()
| 18.555556 | 52 | 0.778443 |
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask.ext.mail import Mail
mail = Mail()
from flask.ext.login import LoginManager
login_manager = LoginManager()
from flask_debugtoolbar import DebugToolbarExtension
toolbar = DebugToolbarExtension()
| true | true |
f72a7184ee9e741944510976bfd4ac5d2d98ed14 | 9,090 | py | Python | tectosaur/fmm/ts_terms.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 17 | 2017-06-29T16:48:56.000Z | 2021-10-03T18:31:41.000Z | tectosaur/fmm/ts_terms.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 4 | 2018-05-29T08:21:13.000Z | 2021-04-01T01:28:50.000Z | tectosaur/fmm/ts_terms.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 8 | 2019-06-10T22:19:40.000Z | 2022-01-12T20:55:37.000Z | from math import factorial
import scipy.special
import numpy as np
def sloppy_spherical(y):
r = np.linalg.norm(y)
costheta = y[2] / r
theta = np.arccos(costheta)
phi = np.arccos(y[0] / r / np.sin(theta))
return r, theta, phi
def Rdirect(n_max, y):
r, theta, phi = sloppy_spherical(y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]
for i in range(n_max + 1):
for j in range(-i, i + 1):
if j < 0:
lp = (
((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))
* Pmn[-j, i] / ((-1) ** -j)
)
else:
lp = Pmn[j, i] / ((-1) ** j)
factor = (r ** i) * lp / factorial(i + j)
real[i, n_max + j] = factor * np.cos(j * phi)
imag[i, n_max + j] = factor * np.sin(j * phi)
return real, imag
def Sdirect(n_max, y):
r, theta, phi = sloppy_spherical(y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]
for i in range(n_max + 1):
for j in range(-i, i + 1):
if j < 0:
lp = (
((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))
* Pmn[-j, i] / ((-1) ** -j)
)
else:
lp = Pmn[j, i] / ((-1) ** j)
factor = factorial(i - j) * lp / (r ** (i + 1))
real[i, n_max + j] = factor * np.cos(j * phi)
imag[i, n_max + j] = factor * np.sin(j * phi)
return real, imag
def R(n_max, y):
y1, y2, y3 = y
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
real[0, n_max] = 1.0
for i in range(0, n_max):
real[i + 1, n_max + i + 1] = (
(y1 * real[i, n_max + i] - y2 * imag[i, n_max + i])
/ (2 * (i + 1))
)
imag[i + 1, n_max + i + 1] = (
(y1 * imag[i, n_max + i] + y2 * real[i, n_max + i])
/ (2 * (i + 1))
)
t2f = np.linalg.norm(y) ** 2
for j in range(n_max + 1):
for i in range(j, n_max):
factor = 1.0 / ((i + 1) ** 2 - j ** 2)
t1f = (2 * i + 1) * y3
real[i + 1, n_max + j] = factor * (t1f * real[i, n_max + j] - t2f * real[i - 1, n_max + j])
imag[i + 1, n_max + j] = factor * (t1f * imag[i, n_max + j] - t2f * imag[i - 1, n_max + j])
for i in range(n_max + 1):
for j in range(1, n_max + 1):
real[i, n_max - j] = ((-1) ** j) * real[i, n_max + j]
imag[i, n_max - j] = ((-1) ** (j + 1)) * imag[i, n_max + j]
return real, imag
def R_storagefree(n_max, y):
def neg(real, imag, mi):
return (
((-1) ** mi) * real,
((-1) ** (mi + 1)) * imag
)
y1, y2, y3 = y
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
t2f = np.linalg.norm(y) ** 2
Rsr = 1.0
Rsi = 0.0
for mi in range(0, n_max + 1):
real[mi, n_max + mi] = Rsr
imag[mi, n_max + mi] = Rsi
real[mi, n_max - mi], imag[mi, n_max - mi] = neg(Rsr, Rsi, mi)
Rm2r = 0.0
Rm2i = 0.0
Rm1r = Rsr
Rm1i = Rsi
for ni in range(mi, n_max):
factor = 1.0 / ((ni + 1) ** 2 - mi ** 2)
t1f = (2 * ni + 1) * y3
Rvr = factor * (t1f * Rm1r - t2f * Rm2r)
Rvi = factor * (t1f * Rm1i - t2f * Rm2i)
real[ni + 1, n_max + mi] = Rvr
imag[ni + 1, n_max + mi] = Rvi
real[ni + 1, n_max - mi], imag[ni + 1, n_max - mi] = neg(Rvr, Rvi, mi)
Rm2r = Rm1r
Rm2i = Rm1i
Rm1r = Rvr
Rm1i = Rvi
Rsrold = Rsr
Rsiold = Rsi
Rsr = (y1 * Rsrold - y2 * Rsiold) / (2 * (mi + 1))
Rsi = (y1 * Rsiold + y2 * Rsrold) / (2 * (mi + 1))
return real, imag
def Rderivs(n_max, y, d):
Rvr, Rvi = R(n_max + 1, y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
if d == 0:
for i in range(n_max):
for j in range(-i, i + 1):
real[i, n_max + j] = 0.5 * (
Rvr[i - 1, (n_max + 1) + j - 1]
- Rvr[i - 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Rvi[i - 1, (n_max + 1) + j - 1]
- Rvi[i - 1, (n_max + 1) + j + 1]
)
elif d == 1:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = -0.5 * (
Rvi[i - 1, (n_max + 1) + j - 1]
+ Rvi[i - 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Rvr[i - 1, (n_max + 1) + j - 1]
+ Rvr[i - 1, (n_max + 1) + j + 1]
)
else:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = Rvr[i - 1, (n_max + 1) + j]
imag[i, n_max + j] = Rvi[i - 1, (n_max + 1) + j]
return real, imag
def S(n_max, y):
y1, y2, y3 = y
ynorm = np.linalg.norm(y)
ynorm2 = ynorm ** 2
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
real[0, n_max] = 1.0 / ynorm
for i in range(0, n_max):
factor = (2 * i + 1) / ynorm2
real[i + 1, n_max + i + 1] = factor * (
(y1 * real[i, n_max + i] - y2 * imag[i, n_max + i])
)
imag[i + 1, n_max + i + 1] = factor * (
(y1 * imag[i, n_max + i] + y2 * real[i, n_max + i])
)
for j in range(n_max + 1):
for i in range(j, n_max):
factor = 1.0 / ynorm2
t1f = (2 * i + 1) * y3
t2f = i ** 2 - j ** 2
real[i + 1, n_max + j] = factor * (
t1f * real[i, n_max + j] - t2f * real[i - 1, n_max + j]
)
imag[i + 1, n_max + j] = factor * (
t1f * imag[i, n_max + j] - t2f * imag[i - 1, n_max + j]
)
for i in range(n_max + 1):
for j in range(1, n_max + 1):
real[i, n_max - j] = ((-1) ** j) * real[i, n_max + j]
imag[i, n_max - j] = ((-1) ** (j + 1)) * imag[i, n_max + j]
return real, imag
def S_storagefree(n_max, y):
def neg(real, imag, mi):
return (
((-1) ** mi) * real,
((-1) ** (mi + 1)) * imag
)
y1, y2, y3 = y
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
ynorm = np.linalg.norm(y)
ynorm2 = ynorm ** 2
Ssr = 1.0 / ynorm
Ssi = 0.0
for mi in range(0, n_max + 1):
real[mi, n_max + mi] = Ssr
imag[mi, n_max + mi] = Ssi
real[mi, n_max - mi], imag[mi, n_max - mi] = neg(Ssr, Ssi, mi)
Sm2r = 0.0
Sm2i = 0.0
Sm1r = Ssr
Sm1i = Ssi
for ni in range(mi, n_max):
factor = 1.0 / ynorm2
t1f = (2 * ni + 1) * y3
t2f = ni ** 2 - mi ** 2
Svr = factor * (t1f * Sm1r - t2f * Sm2r)
Svi = factor * (t1f * Sm1i - t2f * Sm2i)
real[ni + 1, n_max + mi] = Svr
imag[ni + 1, n_max + mi] = Svi
real[ni + 1, n_max - mi], imag[ni + 1, n_max - mi] = neg(Svr, Svi, mi)
Sm2r = Sm1r
Sm2i = Sm1i
Sm1r = Svr
Sm1i = Svi
Ssrold = Ssr
Ssiold = Ssi
factor = (2 * mi + 1) / ynorm2
Ssr = factor * (y1 * Ssrold - y2 * Ssiold)
Ssi = factor * (y1 * Ssiold + y2 * Ssrold)
return real, imag
def Sderivs(n_max, y, d):
Svr, Svi = S(n_max + 1, y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
if d == 0:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = 0.5 * (
Svr[i + 1, (n_max + 1) + j - 1]
- Svr[i + 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Svi[i + 1, (n_max + 1) + j - 1]
- Svi[i + 1, (n_max + 1) + j + 1]
)
elif d == 1:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = -0.5 * (
Svi[i + 1, (n_max + 1) + j - 1]
+ Svi[i + 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Svr[i + 1, (n_max + 1) + j - 1]
+ Svr[i + 1, (n_max + 1) + j + 1]
)
else:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = -Svr[i + 1, (n_max + 1) + j]
imag[i, n_max + j] = -Svi[i + 1, (n_max + 1) + j]
return real, imag
| 34.431818 | 103 | 0.394719 | from math import factorial
import scipy.special
import numpy as np
def sloppy_spherical(y):
r = np.linalg.norm(y)
costheta = y[2] / r
theta = np.arccos(costheta)
phi = np.arccos(y[0] / r / np.sin(theta))
return r, theta, phi
def Rdirect(n_max, y):
r, theta, phi = sloppy_spherical(y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]
for i in range(n_max + 1):
for j in range(-i, i + 1):
if j < 0:
lp = (
((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))
* Pmn[-j, i] / ((-1) ** -j)
)
else:
lp = Pmn[j, i] / ((-1) ** j)
factor = (r ** i) * lp / factorial(i + j)
real[i, n_max + j] = factor * np.cos(j * phi)
imag[i, n_max + j] = factor * np.sin(j * phi)
return real, imag
def Sdirect(n_max, y):
r, theta, phi = sloppy_spherical(y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]
for i in range(n_max + 1):
for j in range(-i, i + 1):
if j < 0:
lp = (
((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))
* Pmn[-j, i] / ((-1) ** -j)
)
else:
lp = Pmn[j, i] / ((-1) ** j)
factor = factorial(i - j) * lp / (r ** (i + 1))
real[i, n_max + j] = factor * np.cos(j * phi)
imag[i, n_max + j] = factor * np.sin(j * phi)
return real, imag
def R(n_max, y):
y1, y2, y3 = y
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
real[0, n_max] = 1.0
for i in range(0, n_max):
real[i + 1, n_max + i + 1] = (
(y1 * real[i, n_max + i] - y2 * imag[i, n_max + i])
/ (2 * (i + 1))
)
imag[i + 1, n_max + i + 1] = (
(y1 * imag[i, n_max + i] + y2 * real[i, n_max + i])
/ (2 * (i + 1))
)
t2f = np.linalg.norm(y) ** 2
for j in range(n_max + 1):
for i in range(j, n_max):
factor = 1.0 / ((i + 1) ** 2 - j ** 2)
t1f = (2 * i + 1) * y3
real[i + 1, n_max + j] = factor * (t1f * real[i, n_max + j] - t2f * real[i - 1, n_max + j])
imag[i + 1, n_max + j] = factor * (t1f * imag[i, n_max + j] - t2f * imag[i - 1, n_max + j])
for i in range(n_max + 1):
for j in range(1, n_max + 1):
real[i, n_max - j] = ((-1) ** j) * real[i, n_max + j]
imag[i, n_max - j] = ((-1) ** (j + 1)) * imag[i, n_max + j]
return real, imag
def R_storagefree(n_max, y):
def neg(real, imag, mi):
return (
((-1) ** mi) * real,
((-1) ** (mi + 1)) * imag
)
y1, y2, y3 = y
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
t2f = np.linalg.norm(y) ** 2
Rsr = 1.0
Rsi = 0.0
for mi in range(0, n_max + 1):
real[mi, n_max + mi] = Rsr
imag[mi, n_max + mi] = Rsi
real[mi, n_max - mi], imag[mi, n_max - mi] = neg(Rsr, Rsi, mi)
Rm2r = 0.0
Rm2i = 0.0
Rm1r = Rsr
Rm1i = Rsi
for ni in range(mi, n_max):
factor = 1.0 / ((ni + 1) ** 2 - mi ** 2)
t1f = (2 * ni + 1) * y3
Rvr = factor * (t1f * Rm1r - t2f * Rm2r)
Rvi = factor * (t1f * Rm1i - t2f * Rm2i)
real[ni + 1, n_max + mi] = Rvr
imag[ni + 1, n_max + mi] = Rvi
real[ni + 1, n_max - mi], imag[ni + 1, n_max - mi] = neg(Rvr, Rvi, mi)
Rm2r = Rm1r
Rm2i = Rm1i
Rm1r = Rvr
Rm1i = Rvi
Rsrold = Rsr
Rsiold = Rsi
Rsr = (y1 * Rsrold - y2 * Rsiold) / (2 * (mi + 1))
Rsi = (y1 * Rsiold + y2 * Rsrold) / (2 * (mi + 1))
return real, imag
def Rderivs(n_max, y, d):
Rvr, Rvi = R(n_max + 1, y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
if d == 0:
for i in range(n_max):
for j in range(-i, i + 1):
real[i, n_max + j] = 0.5 * (
Rvr[i - 1, (n_max + 1) + j - 1]
- Rvr[i - 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Rvi[i - 1, (n_max + 1) + j - 1]
- Rvi[i - 1, (n_max + 1) + j + 1]
)
elif d == 1:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = -0.5 * (
Rvi[i - 1, (n_max + 1) + j - 1]
+ Rvi[i - 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Rvr[i - 1, (n_max + 1) + j - 1]
+ Rvr[i - 1, (n_max + 1) + j + 1]
)
else:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = Rvr[i - 1, (n_max + 1) + j]
imag[i, n_max + j] = Rvi[i - 1, (n_max + 1) + j]
return real, imag
def S(n_max, y):
y1, y2, y3 = y
ynorm = np.linalg.norm(y)
ynorm2 = ynorm ** 2
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
real[0, n_max] = 1.0 / ynorm
for i in range(0, n_max):
factor = (2 * i + 1) / ynorm2
real[i + 1, n_max + i + 1] = factor * (
(y1 * real[i, n_max + i] - y2 * imag[i, n_max + i])
)
imag[i + 1, n_max + i + 1] = factor * (
(y1 * imag[i, n_max + i] + y2 * real[i, n_max + i])
)
for j in range(n_max + 1):
for i in range(j, n_max):
factor = 1.0 / ynorm2
t1f = (2 * i + 1) * y3
t2f = i ** 2 - j ** 2
real[i + 1, n_max + j] = factor * (
t1f * real[i, n_max + j] - t2f * real[i - 1, n_max + j]
)
imag[i + 1, n_max + j] = factor * (
t1f * imag[i, n_max + j] - t2f * imag[i - 1, n_max + j]
)
for i in range(n_max + 1):
for j in range(1, n_max + 1):
real[i, n_max - j] = ((-1) ** j) * real[i, n_max + j]
imag[i, n_max - j] = ((-1) ** (j + 1)) * imag[i, n_max + j]
return real, imag
def S_storagefree(n_max, y):
def neg(real, imag, mi):
return (
((-1) ** mi) * real,
((-1) ** (mi + 1)) * imag
)
y1, y2, y3 = y
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
ynorm = np.linalg.norm(y)
ynorm2 = ynorm ** 2
Ssr = 1.0 / ynorm
Ssi = 0.0
for mi in range(0, n_max + 1):
real[mi, n_max + mi] = Ssr
imag[mi, n_max + mi] = Ssi
real[mi, n_max - mi], imag[mi, n_max - mi] = neg(Ssr, Ssi, mi)
Sm2r = 0.0
Sm2i = 0.0
Sm1r = Ssr
Sm1i = Ssi
for ni in range(mi, n_max):
factor = 1.0 / ynorm2
t1f = (2 * ni + 1) * y3
t2f = ni ** 2 - mi ** 2
Svr = factor * (t1f * Sm1r - t2f * Sm2r)
Svi = factor * (t1f * Sm1i - t2f * Sm2i)
real[ni + 1, n_max + mi] = Svr
imag[ni + 1, n_max + mi] = Svi
real[ni + 1, n_max - mi], imag[ni + 1, n_max - mi] = neg(Svr, Svi, mi)
Sm2r = Sm1r
Sm2i = Sm1i
Sm1r = Svr
Sm1i = Svi
Ssrold = Ssr
Ssiold = Ssi
factor = (2 * mi + 1) / ynorm2
Ssr = factor * (y1 * Ssrold - y2 * Ssiold)
Ssi = factor * (y1 * Ssiold + y2 * Ssrold)
return real, imag
def Sderivs(n_max, y, d):
Svr, Svi = S(n_max + 1, y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
if d == 0:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = 0.5 * (
Svr[i + 1, (n_max + 1) + j - 1]
- Svr[i + 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Svi[i + 1, (n_max + 1) + j - 1]
- Svi[i + 1, (n_max + 1) + j + 1]
)
elif d == 1:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = -0.5 * (
Svi[i + 1, (n_max + 1) + j - 1]
+ Svi[i + 1, (n_max + 1) + j + 1]
)
imag[i, n_max + j] = 0.5 * (
Svr[i + 1, (n_max + 1) + j - 1]
+ Svr[i + 1, (n_max + 1) + j + 1]
)
else:
for i in range(n_max + 1):
for j in range(-i, i + 1):
real[i, n_max + j] = -Svr[i + 1, (n_max + 1) + j]
imag[i, n_max + j] = -Svi[i + 1, (n_max + 1) + j]
return real, imag
| true | true |
f72a71fdb4445df86e6a963308c53723cb7372ed | 43 | py | Python | src/util/__init__.py | megemini/DataCastle2017 | 261134f760d8c1bbfc3e65e1362b7710e601947d | [
"MIT"
] | null | null | null | src/util/__init__.py | megemini/DataCastle2017 | 261134f760d8c1bbfc3e65e1362b7710e601947d | [
"MIT"
] | null | null | null | src/util/__init__.py | megemini/DataCastle2017 | 261134f760d8c1bbfc3e65e1362b7710e601947d | [
"MIT"
] | null | null | null | # import pandas as pd
# import numpy as np
| 14.333333 | 21 | 0.72093 | true | true | |
f72a72d605a3a44af713856c6084a9b2d5e7bef2 | 6,046 | py | Python | src/attacks/base.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
] | null | null | null | src/attacks/base.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
] | null | null | null | src/attacks/base.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
] | null | null | null | import torch
class Attack(object):
r"""
Base class for all attacks.
.. note::
It automatically set device to the device where given model is.
It temporarily changes the original model's training mode to `test`
by `.eval()` only during an attack process.
"""
def __init__(self, name, model):
r"""
Initializes internal attack state.
Arguments:
name (str) : name of an attack.
model (torch.nn.Module): model to attack.
"""
self.attack = name
self.model = model
self.model_name = str(model).split("(")[0]
self.training = model.training
self.device = next(model.parameters()).device
self._targeted = 1
self._attack_mode = "original"
self._return_type = "float"
def forward(self, *input):
r"""
It defines the computation performed at every call.
Should be overridden by all subclasses.
"""
raise NotImplementedError
def set_attack_mode(self, mode):
r"""
Set the attack mode.
Arguments:
mode (str) : 'original' (DEFAULT)
'targeted' - Use input labels as targeted labels.
'least_likely' - Use least likely labels as targeted labels.
"""
if self._attack_mode is "only_original":
raise ValueError(
"Changing attack mode is not supported in this attack method."
)
if mode == "original":
self._attack_mode = "original"
self._targeted = 1
self._transform_label = self._get_label
elif mode == "targeted":
self._attack_mode = "targeted"
self._targeted = -1
self._transform_label = self._get_label
elif mode == "least_likely":
self._attack_mode = "least_likely"
self._targeted = -1
self._transform_label = self._get_least_likely_label
else:
raise ValueError(
mode
+ " is not a valid mode. [Options : original, targeted, least_likely]"
)
def set_return_type(self, type):
r"""
Set the return type of adversarial images: `int` or `float`.
Arguments:
type (str) : 'float' or 'int'. (DEFAULT : 'float')
"""
if type == "float":
self._return_type = "float"
elif type == "int":
self._return_type = "int"
else:
raise ValueError(type + " is not a valid type. [Options : float, int]")
def save(self, save_path, data_loader, verbose=True):
r"""
Save adversarial images as torch.tensor from given torch.utils.data.DataLoader.
Arguments:
save_path (str) : save_path.
data_loader (torch.utils.data.DataLoader) : data loader.
verbose (bool) : True for displaying detailed information. (DEFAULT : True)
"""
self.model.eval()
image_list = []
label_list = []
correct = 0
total = 0
total_batch = len(data_loader)
for step, (images, labels) in enumerate(data_loader):
adv_images = self.__call__(images, labels)
image_list.append(adv_images.cpu())
label_list.append(labels.cpu())
if self._return_type == "int":
adv_images = adv_images.float() / 255
if verbose:
outputs = self.model(adv_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(self.device)).sum()
acc = 100 * float(correct) / total
print(
"- Save Progress : %2.2f %% / Accuracy : %2.2f %%"
% ((step + 1) / total_batch * 100, acc),
end="\r",
)
x = torch.cat(image_list, 0)
y = torch.cat(label_list, 0)
torch.save((x, y), save_path)
print("\n- Save Complete!")
self._switch_model()
def _transform_label(self, images, labels):
r"""
Function for changing the attack mode.
"""
return labels
def _get_label(self, images, labels):
r"""
Function for changing the attack mode.
Return input labels.
"""
return labels
def _get_least_likely_label(self, images, labels):
r"""
Function for changing the attack mode.
Return least likely labels.
"""
outputs = self.model(images)
_, labels = torch.min(outputs.data, 1)
labels = labels.detach_()
return labels
def _to_uint(self, images):
r"""
Function for changing the return type.
Return images as int.
"""
return (images * 255).type(torch.uint8)
def _switch_model(self):
r"""
Function for changing the training mode of the model.
"""
if self.training:
self.model.train()
else:
self.model.eval()
def __str__(self):
info = self.__dict__.copy()
del_keys = ["model", "attack"]
for key in info.keys():
if key[0] == "_":
del_keys.append(key)
for key in del_keys:
del info[key]
info["attack_mode"] = self._attack_mode
if info["attack_mode"] == "only_original":
info["attack_mode"] = "original"
info["return_type"] = self._return_type
return (
self.attack
+ "("
+ ", ".join("{}={}".format(key, val) for key, val in info.items())
+ ")"
)
def __call__(self, *input, **kwargs):
self.model.eval()
images = self.forward(*input, **kwargs)
self._switch_model()
if self._return_type == "int":
images = self._to_uint(images)
return images
| 28.654028 | 87 | 0.531757 | import torch
class Attack(object):
def __init__(self, name, model):
self.attack = name
self.model = model
self.model_name = str(model).split("(")[0]
self.training = model.training
self.device = next(model.parameters()).device
self._targeted = 1
self._attack_mode = "original"
self._return_type = "float"
def forward(self, *input):
raise NotImplementedError
def set_attack_mode(self, mode):
if self._attack_mode is "only_original":
raise ValueError(
"Changing attack mode is not supported in this attack method."
)
if mode == "original":
self._attack_mode = "original"
self._targeted = 1
self._transform_label = self._get_label
elif mode == "targeted":
self._attack_mode = "targeted"
self._targeted = -1
self._transform_label = self._get_label
elif mode == "least_likely":
self._attack_mode = "least_likely"
self._targeted = -1
self._transform_label = self._get_least_likely_label
else:
raise ValueError(
mode
+ " is not a valid mode. [Options : original, targeted, least_likely]"
)
def set_return_type(self, type):
if type == "float":
self._return_type = "float"
elif type == "int":
self._return_type = "int"
else:
raise ValueError(type + " is not a valid type. [Options : float, int]")
def save(self, save_path, data_loader, verbose=True):
self.model.eval()
image_list = []
label_list = []
correct = 0
total = 0
total_batch = len(data_loader)
for step, (images, labels) in enumerate(data_loader):
adv_images = self.__call__(images, labels)
image_list.append(adv_images.cpu())
label_list.append(labels.cpu())
if self._return_type == "int":
adv_images = adv_images.float() / 255
if verbose:
outputs = self.model(adv_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(self.device)).sum()
acc = 100 * float(correct) / total
print(
"- Save Progress : %2.2f %% / Accuracy : %2.2f %%"
% ((step + 1) / total_batch * 100, acc),
end="\r",
)
x = torch.cat(image_list, 0)
y = torch.cat(label_list, 0)
torch.save((x, y), save_path)
print("\n- Save Complete!")
self._switch_model()
def _transform_label(self, images, labels):
return labels
def _get_label(self, images, labels):
return labels
def _get_least_likely_label(self, images, labels):
outputs = self.model(images)
_, labels = torch.min(outputs.data, 1)
labels = labels.detach_()
return labels
def _to_uint(self, images):
return (images * 255).type(torch.uint8)
def _switch_model(self):
if self.training:
self.model.train()
else:
self.model.eval()
def __str__(self):
info = self.__dict__.copy()
del_keys = ["model", "attack"]
for key in info.keys():
if key[0] == "_":
del_keys.append(key)
for key in del_keys:
del info[key]
info["attack_mode"] = self._attack_mode
if info["attack_mode"] == "only_original":
info["attack_mode"] = "original"
info["return_type"] = self._return_type
return (
self.attack
+ "("
+ ", ".join("{}={}".format(key, val) for key, val in info.items())
+ ")"
)
def __call__(self, *input, **kwargs):
self.model.eval()
images = self.forward(*input, **kwargs)
self._switch_model()
if self._return_type == "int":
images = self._to_uint(images)
return images
| true | true |
f72a72f956c5cffe74f5d5fac3101a4868adb5bf | 28,213 | py | Python | src/test/isolation2/sql_isolation_testcase.py | fanfuxiaoran/gpdb | 84e73a9eb2d4a7aff8ab66c0ee76e47b51676be6 | [
"PostgreSQL",
"Apache-2.0"
] | 4 | 2017-11-28T08:12:58.000Z | 2020-10-28T04:15:52.000Z | src/test/isolation2/sql_isolation_testcase.py | fanfuxiaoran/gpdb | 84e73a9eb2d4a7aff8ab66c0ee76e47b51676be6 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | src/test/isolation2/sql_isolation_testcase.py | fanfuxiaoran/gpdb | 84e73a9eb2d4a7aff8ab66c0ee76e47b51676be6 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2004-Present VMware, Inc. or its affiliates.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def null_notice_receiver(notice):
'''
Tests ignore notice messages when analyzing results,
so silently drop notices from the pg.connection
'''
return
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print(file=self.out_file)
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print(r.rstrip(), file=self.out_file)
def fork(self, command, blocking):
print(" <waiting ...>", file=self.out_file)
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print(" <... completed>", file=self.out_file)
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print(r.rstrip(), file=self.out_file)
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print(" ... <quitting>", file=self.out_file)
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
# as mirror. This is useful for scenarios where a
# test needs to promote a standby without using
# gpactivatestandby.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
con.set_notice_receiver(null_notice_receiver)
return con
def get_hostname_port(self, contentid, role):
"""
Gets the port number/hostname combination of the
contentid and role
"""
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname, given_opt="-c gp_role=utility")
r = con.query(query).getresult()
con.close()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
Print out a pygresql result set (a Query object, after the query
has been executed), in a format that imitates the default
formatting of psql. This isn't a perfect imitation: we left-justify
all the fields and headers, whereas psql centers the header, and
right-justifies numeric fields. But this is close enough, to make
gpdiff.pl recognize the result sets as such. (We used to just call
str(r), and let PyGreSQL do the formatting. But even though
PyGreSQL's default formatting is close to psql's, it's not close
enough.)
"""
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if isinstance(col, float):
col = format(col, "g")
elif isinstance(col, bool):
if col:
col = 't'
else:
col = 'f'
elif col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) + " rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r is not None:
if type(r) == str:
# INSERT, UPDATE, etc that returns row count but not result set
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
else:
# SELECT or similar, print the result set without the command (type pg.Query)
return self.printout_result(r)
else:
# CREATE or other DDL without a result set or count
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print(file=output_file)
if mode == '\\retcode':
print('-- start_ignore', file=output_file)
print(stdout.decode(), file=output_file)
if mode == '\\retcode':
print('-- end_ignore', file=output_file)
print('(exited with code {})'.format(cmd_output.returncode), file=output_file)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
newline = False
for line in sql_file:
# this logic replicates the python2 behavior of a trailing comma at the end of print
# i.e. ''' print >>output_file, line.strip(), '''
print((" " if command and not newline else "") + line.strip(), end="", file=output_file)
newline = False
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
elif re.match(r";.*--", line) or re.match(r"^--", line):
command_part = line.partition("--")[0] # remove comment from line
else:
command_part = line
if command_part == "" or command_part == "\n":
print(file=output_file)
newline = True
elif re.match(r".*;\s*$", command_part) or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print("FAILED: ", e, file=output_file)
command = ""
else:
command += command_part
for process in list(self.processes.values()):
process.stop()
except:
for process in list(self.processes.values()):
process.terminate()
raise
finally:
for process in list(self.processes.values()):
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Line continuation:
If a line is not ended by a semicolon ';' which is followed by 0 or more spaces, the line will be combined with next line and
sent together as a single statement.
e.g.: Send to the server separately:
1: SELECT * FROM t1; -> send "SELECT * FROM t1;"
SELECT * FROM t2; -> send "SELECT * FROM t2;"
e.g.: Send to the server once:
1: SELECT * FROM
t1; SELECT * FROM t2; -> "send SELECT * FROM t1; SELECT * FROM t2;"
ATTENTION:
Send multi SQL statements once:
Multi SQL statements can be sent at once, but there are some known issues. Generally only the last query result will be printed.
But due to the difficulties of dealing with semicolons insides quotes, we always echo the first SQL command instead of the last
one if query() returns None. This created some strange issues like:
CREATE TABLE t1 (a INT); INSERT INTO t1 SELECT generate_series(1,1000);
CREATE 1000 (Should be INSERT 1000, but here the CREATE is taken due to the limitation)
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
| 41.007267 | 193 | 0.543296 |
import pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def null_notice_receiver(notice):
return
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print(file=self.out_file)
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print(r.rstrip(), file=self.out_file)
def fork(self, command, blocking):
print(" <waiting ...>", file=self.out_file)
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print(" <... completed>", file=self.out_file)
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print(r.rstrip(), file=self.out_file)
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print(" ... <quitting>", file=self.out_file)
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
con.set_notice_receiver(null_notice_receiver)
return con
def get_hostname_port(self, contentid, role):
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname, given_opt="-c gp_role=utility")
r = con.query(query).getresult()
con.close()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
widths = []
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if isinstance(col, float):
col = format(col, "g")
elif isinstance(col, bool):
if col:
col = 't'
else:
col = 'f'
elif col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) + " rows)\n"
return result
def execute_command(self, command):
try:
r = self.con.query(command)
if r is not None:
if type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
else:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
if not dbname:
dbname = self.dbname
con = pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
sql = m.groups()[2]
sql = sql.lstrip()
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print(file=output_file)
if mode == '\\retcode':
print('-- start_ignore', file=output_file)
print(stdout.decode(), file=output_file)
if mode == '\\retcode':
print('-- end_ignore', file=output_file)
print('(exited with code {})'.format(cmd_output.returncode), file=output_file)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
try:
command = ""
newline = False
for line in sql_file:
# this logic replicates the python2 behavior of a trailing comma at the end of print
# i.e. ''' print >>output_file, line.strip(), '''
print((" " if command and not newline else "") + line.strip(), end="", file=output_file)
newline = False
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
elif re.match(r";.*--", line) or re.match(r"^--", line):
command_part = line.partition("--")[0] # remove comment from line
else:
command_part = line
if command_part == "" or command_part == "\n":
print(file=output_file)
newline = True
elif re.match(r".*;\s*$", command_part) or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print("FAILED: ", e, file=output_file)
command = ""
else:
command += command_part
for process in list(self.processes.values()):
process.stop()
except:
for process in list(self.processes.values()):
process.terminate()
raise
finally:
for process in list(self.processes.values()):
process.terminate()
class SQLIsolationTestCase:
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
| true | true |
f72a7465c1ba7ec4989c9a8c22e6229787aa0733 | 18,818 | py | Python | log_complete/model_89.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_89.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_89.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 22250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.349515 | 710 | 0.806515 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 22250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
f72a758a140f6a7e6dd8dfc6108d52fdad330f2c | 76,634 | py | Python | hp4controller/compilers/p4_hp4.py | MNCHr/Hyper4-Controller | 3da4956d29af805d933937e545cbb33fecc4c082 | [
"MIT"
] | null | null | null | hp4controller/compilers/p4_hp4.py | MNCHr/Hyper4-Controller | 3da4956d29af805d933937e545cbb33fecc4c082 | [
"MIT"
] | null | null | null | hp4controller/compilers/p4_hp4.py | MNCHr/Hyper4-Controller | 3da4956d29af805d933937e545cbb33fecc4c082 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from p4_hlir.main import HLIR
from p4_hlir.hlir.p4_parser import p4_parse_state
import p4_hlir
from p4_hlir.hlir.p4_tables import p4_table
from compiler import HP4Compiler, CodeRepresentation
import argparse
import itertools
import code
from inspect import currentframe, getframeinfo
import sys
import math
from math import ceil
import json
import pkg_resources
SEB = 320
METADATA_WIDTH = 256
PS_RET_TYPE = 0
PS_RET_CRITERIA = 1
PS_RET_BRANCHES = 2
PS_RET_IMM_STATE = 1
PS_CALL_TYPE = 0
PS_CALL_H_INST = 1
OFFSET = 0
WIDTH = 1
BRANCH_VALUES = 0
BRANCH_STATE = 1
VAL_TYPE = 0
VAL_VALUE = 1
MAX_BYTE = 100
T_NAME = 0
L_BOUND = 1
U_BOUND = 2
HIGHEST_PRIORITY = '0'
LOWEST_PRIORITY = '2147483646'
VBITS_WIDTH = 80
MATCH_TYPE = 1
MATCH_FIELD = 0
PRIM_TYPE = 0
PRIM_SUBTYPE = 1
P4_CALL_PRIMITIVE = 0
P4_CALL_PARAMS = 1
PARAM = 0
PARAM_TYPE = 1
MATCH_OBJECT = 0
MATCH_TYPE = 1
EXT_FIRST_WIDTH = 40 # in bytes
EXT_START_INDEX = 2
parse_select_table_boundaries = [0, 20, 30, 40, 50, 60, 70, 80, 90, 100]
primitive_ID = {'modify_field': '[MODIFY_FIELD]',
'add_header': '[ADD_HEADER]',
'copy_header': '[COPY_HEADER]',
'remove_header': '[REMOVE_HEADER]',
'modify_field_with_hash_based_offset': '[MODIFY_FIELD_WITH_HBO]',
'modify_field_rng_uniform': '[MODIFY_FIELD_RNG_U]',
'truncate': '[TRUNCATE]',
'drop': '[DROP]',
'no_op': '[NO_OP]',
'push': '[PUSH]',
'pop': '[POP]',
'count': '[COUNT]',
'execute_meter': '[METER]',
'generate_digest': '[GENERATE_DIGEST]',
'recirculate': '[RECIRCULATE]',
'resubmit': '[RESUBMIT]',
'clone_ingress_pkt_to_egress': '[CLONE_INGRESS_EGRESS]',
'clone_egress_pkt_to_egress': '[CLONE_EGRESS_EGRESS]',
'multicast': '[MULTICAST]',
'add_to_field': '[MATH_ON_FIELD]',
'bit_xor': '[BIT_XOR]'}
primitive_tnames = {'modify_field': 'mod',
'add_header': 'addh',
'copy_header': '',
'remove_header': 'removeh',
'modify_field_with_hash_based_offset': '',
'modify_field_rng_uniform': 'mod_rng',
'truncate' : 'truncate',
'drop' : 'drop',
'no_op' : '',
'push' : '',
'pop' : '',
'count' : '',
'execute_meter': '',
'generate_digest': '',
'recirculate': '',
'resubmit': '',
'clone_ingress_pkt_to_egress': '',
'clone_egress_pkt_to_egress': '',
'multicast': 'multicast',
'add_to_field': 'math_on_field',
'bit_xor': 'bit_xor'}
mf_prim_subtype_ID = {('meta', 'ingress_port'): '1',
('meta', 'packet_length'): '2',
('meta', 'egress_spec'): '3',
('meta', 'egress_port'): '4',
('meta', 'egress_instance'): '5',
('meta', 'instance_type'): '6',
('egress_spec', 'meta'): '7',
('meta', 'const'): '8',
('egress_spec', 'const'): '9',
('ext', 'const'): '10',
('egress_spec', 'ingress_port'): '11',
('ext', 'ext'): '12',
('meta', 'ext'): '13',
('ext', 'meta'): '14'}
mf_prim_subtype_action = {'1': 'mod_meta_stdmeta_ingressport',
'2': 'mod_meta_stdmeta_packetlength',
'3': 'mod_meta_stdmeta_egressspec',
'4': 'mod_meta_stdmeta_egressport',
'5': 'mod_meta_stdmeta_egressinst',
'6': 'mod_meta_stdmeta_insttype',
'7': 'mod_stdmeta_egressspec_meta',
'8': 'mod_meta_const',
'9': 'mod_stdmeta_egressspec_const',
'10': 'mod_extracted_const',
'11': 'mod_stdmeta_egressspec_stdmeta_ingressport',
'12': 'mod_extracted_extracted',
'13': 'mod_meta_extracted',
'14': 'mod_extracted_meta'}
a2f_prim_subtype_ID = {'add': '1', 'sub': '2'}
a2f_prim_subtype_action = {'1': 'a_add2f_extracted_const_u',
'2': 'a_subff_extracted_const_u'}
bx_prim_subtype_ID = {('meta', 'meta', 'const'): '1',
('ext', 'ext', 'const'): '2',
('meta', 'ext', 'const'): '3'}
bx_prim_subtype_action = {'1': 'bit_xor_meta_meta_const',
'2': 'bit_xor_extracted_extracted_const',
'3': 'bit_xor_meta_extracted_const'}
gen_prim_subtype_action = {'add_header': 'a_addh',
'copy_header': '',
'remove_header': 'a_removeh',
'modify_field_with_hash_based_offset': '',
'modify_field_rng_uniform': 'mod_extracted_rng',
'truncate': 'a_truncate',
'drop': 'a_drop',
'no_op': '',
'push': '',
'pop': '',
'count': '',
'execute_meter': '',
'recirculate': '',
'resubmit': '',
'clone_ingress_pkt_to_egress': '',
'clone_egress_pkt_to_egress': '',
'multicast': 'a_multicast'}
current_call = tuple
def debug():
""" Break and enter interactive method after printing location info """
# written before I knew about the pdb module
caller = currentframe().f_back
method_name = caller.f_code.co_name
line_no = getframeinfo(caller).lineno
print(method_name + ": line " + str(line_no))
code.interact(local=dict(globals(), **caller.f_locals))
def unsupported(msg):
print(msg)
exit()
def convert_to_builtin_type(obj):
d = { '__class__':obj.__class__.__name__, '__module__':obj.__module__, }
d.update(obj.__dict__)
return d
class HP4_Command(object):
def __init__(self, command='table_add',
table='',
action='',
match_params=[],
action_params=[]):
self.command = command
self.table = table
self.action = action
self.match_params = match_params
self.action_params = action_params
def __str__(self):
""" assumes command is \'table_add\' """
if self.command != 'table_add':
debug()
raise Exception("Incorrect table command %s, table %s" % (self.command, self.table))
ret = self.table + ' ' + self.action + ' :'
ret += ' '.join(self.match_params)
ret += ':'
ret += ' '.join(self.action_params)
return ret
class HP4_Match_Command(HP4_Command):
def __init__(self, source_table='',
source_action='',
**kwargs):
super(HP4_Match_Command, self).__init__(**kwargs)
self.source_table = source_table
self.source_action = source_action
class HP4_Primitive_Command(HP4_Command):
def __init__(self, source_table, source_action, command, table, action, mparams, aparams, src_aparam_id):
HP4_Command.__init__(self, command, table, action, mparams, aparams)
self.source_table = source_table
self.source_action = source_action
self.src_aparam_id = src_aparam_id
class DAG_Topo_Sorter():
def __init__(self, p4_tables):
self.unmarked = []
self.tempmarked = []
self.permmarked = []
self.L = []
for key in p4_tables:
self.unmarked.append(p4_tables[key])
def visit(self, n):
if n.control_flow_parent == 'egress':
unsupported("ERROR: Not yet supported: tables in egress (" + n.name + ")")
if n in self.tempmarked:
unsupported("ERROR: not a DAG")
if n in self.unmarked:
self.unmarked.remove(n)
self.tempmarked.append(n)
for m in n.next_.values():
if m != None:
self.visit(m)
self.permmarked.append(n)
self.tempmarked.remove(n)
self.L.insert(0, n)
def sort(self):
while len(self.unmarked) > 0: # while there are unmarked nodes do
n = self.unmarked[0]
self.visit(n)
return self.L
class Table_Rep():
def __init__(self, stage, match_type, source_type, field_name):
self.stage = stage # int
self.match_type = match_type
self.source_type = source_type
self.field_name = field_name
self.name = 't' + str(self.stage) + '_'
if source_type == 'standard_metadata':
self.name += 'stdmeta_' + field_name + '_'
elif source_type == 'metadata':
self.name += 'metadata_'
elif source_type == 'extracted':
self.name += 'extracted_'
if match_type == 'P4_MATCH_EXACT':
self.name += 'exact'
elif match_type == 'P4_MATCH_VALID':
self.name += 'valid'
elif match_type == 'P4_MATCH_TERNARY':
self.name += 'ternary'
elif match_type == 'MATCHLESS':
self.name += 'matchless'
def table_type(self):
if self.source_type == 'standard_metadata':
if self.match_type == 'P4_MATCH_EXACT':
if self.field_name == 'ingress_port':
return '[STDMETA_INGRESS_PORT_EXACT]'
elif self.field_name == 'packet_length':
return '[STDMETA_PACKET_LENGTH_EXACT]'
elif self.field_name == 'instance_type':
return '[STDMETA_INSTANCE_TYPE_EXACT]'
elif self.field_name == 'egress_spec':
return '[STDMETA_EGRESS_SPEC_EXACT]'
else:
unsupported("Not supported: standard_metadata field %s" \
% self.field_name)
else:
unsupported("Not supported: standard_metadata with %s match type" \
% self.match_type)
elif self.source_type == 'metadata':
if self.match_type == 'P4_MATCH_EXACT':
return '[METADATA_EXACT]'
elif self.match_type == 'P4_MATCH_TERNARY':
return '[METADATA_TERNARY]'
else:
unsupported("Not supported: metadata with %s match type" \
% self.match_type)
elif self.source_type == 'extracted':
if self.match_type == 'P4_MATCH_EXACT':
return '[EXTRACTED_EXACT]'
elif self.match_type == 'P4_MATCH_VALID':
return '[EXTRACTED_VALID]'
elif self.match_type == 'P4_MATCH_TERNARY':
return '[EXTRACTED_TERNARY]'
else:
unsupported("Not supported: extracted with %s match type" \
% self.match_type)
elif self.source_type == '':
if self.match_type == 'MATCHLESS':
return '[MATCHLESS]'
else:
unsupported("Not supported: [no source] with %s match type" \
% self.match_type)
else:
unsupported("Not supported: source type %s, match type %s" \
% (self.source_type, self.match_type))
def __str__(self):
return self.name
class Action_Rep():
def __init__(self):
self.stages = set()
self.tables = {} # {stage (int) : table_name (str)}
self.next = {} # {table_name (str) : (next_stage (int), next_table_code (int))}
self.call_sequence = []
class PC_State(object):
newid = itertools.count().next
def __init__(self, hp4_bits_extracted=SEB,
p4_bits_extracted=0,
ps_path=[],
pcs_path=[],
parse_state=None,
entry_table='tset_parse_control',
**kwargs):
self.hp4_bits_extracted = hp4_bits_extracted
self.p4_bits_extracted = p4_bits_extracted
self.ps_path = ps_path
self.pcs_path = pcs_path
self.pcs_id = PC_State.newid()
self.parse_state = parse_state
self.entry_table = entry_table # TODO: Delete if we don't need this
self.children = []
self.header_offsets = {} # header name (str) : hp4 bit offset (int)
for pcs in self.pcs_path:
self.header_offsets.update(pcs.header_offsets)
self.select_criteria = [] # list of (offset, width) tuples, each
# element corresponding to a criteria in the
# select statement, representing the hp4 view
self.select_values = [] # list of lists: each member a list of values,
# each value corresponding to a criteria in
# select_criteria
def __str__(self):
ret = 'ID: ' + str(self.pcs_id) + '; ' + self.parse_state.name + '\n'
ret += 'hp4_bits_extracted: ' + str(self.hp4_bits_extracted) + '\n'
ret += 'p4_bits_extracted: ' + str(self.p4_bits_extracted) + '\n'
ret += 'ps_path: ' + str(self.ps_path) + '\n'
ret += 'pcs_path: '
for pcs in self.pcs_path:
ret += str(pcs.pcs_id) + '(' + pcs.parse_state.name + ') '
ret += '\n'
ret += 'children: '
for child in self.children:
ret += child.parse_state.name + ' '
return ret
def collect_meta(headers):
""" Classify headers (metadata | parsed representation)
- For metadata: assign each field an offset into meta.data
- NOTE: the same cannot be done for parsed representation headers
until we traverse the parse tree, because each path through the
parse tree potentially yields a distinct set of field offsets
into pr.data.
"""
meta_offsets = {}
metadata_offset = 0
for header_key in headers.keys():
header = headers[header_key]
if header.name == 'standard_metadata':
continue
if header.name == 'intrinsic_metadata':
continue
if header.metadata == True:
for field in header.fields:
fullname = header.name + '.' + field.name
meta_offsets[fullname] = metadata_offset
metadata_offset += field.width
if metadata_offset > METADATA_WIDTH:
unsupported("Error: out of metadata memory with %s" % fullname)
return meta_offsets
def collect_actions(actions):
""" Uniquely number each action """
action_ID = {}
actionID = 1
for action in actions:
if action.lineno > 0: # is action from source (else built-in)?
action_ID[action] = actionID
actionID += 1
return action_ID
def get_prim_subtype(p4_call):
""" p4_call: (p4_action, [list of parameters])
"""
primitive = p4_call[P4_CALL_PRIMITIVE]
params = p4_call[P4_CALL_PARAMS]
if (primitive.name == 'drop' or
primitive.name == 'add_header' or
primitive.name == 'remove_header' or
primitive.name == 'modify_field_rng_uniform'):
return '0'
elif primitive.name == 'add_to_field':
if type(params[0]) is p4_hlir.hlir.p4_headers.p4_field:
if params[0].instance.metadata == True:
unsupported("Not supported: metadata (%s) as dst field in \
add_to_field" % params[0].instance.name)
else:
if type(params[1]) is int:
if params[1] < 0:
return(a2f_prim_subtype_ID['sub'])
else:
return(a2f_prim_subtype_ID['add'])
else:
unsupported("ERROR: Not supported: %s type for src field in \
add_to_field" % type(params[1]))
else:
unsupported("ERROR: dst field type %s in add_to_field" % type(params[0]))
elif primitive.name == 'bit_xor':
first = 0
second = 0
third = 0
if params[0].instance.metadata == True:
first = 'meta' # user-defined metadata
else: # parsed representation
first = 'ext'
if params[1].instance.metadata == True:
second = 'meta' # user-defined metadata
else: # parsed representation
second = 'ext'
if type(params[2]) in [int, long]:
third = 'const'
elif type(params[2]) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
third = 'const'
else:
unsupported("ERROR: Unexpected type %s as third param in \
bit_xor call" % type(params[2]))
return bx_prim_subtype_ID[(first, second, third)]
elif primitive.name == 'modify_field':
first = 0
second = 0
if params[0].instance.metadata == True:
if params[0].instance.name == 'standard_metadata':
if params[0].name == 'egress_spec':
first = params[0].name
else:
unsupported("ERROR: Unexpected stdmeta field %s as dst in \
modify_field primitive" % params[0].name)
elif params[0].instance.name == 'intrinsic_metadata':
if params[0].name == 'mcast_grp':
#first = params[0].name
first = 'egress_spec'
else:
unsupported("ERROR: Unexpected intmeta field %s as dst in \
modify_field primitive" % params[0].name)
else: # user-defined metadata
first = 'meta'
else: # parsed representation
first = 'ext'
if type(params[1]) in [int, long]:
second = 'const'
elif type(params[1]) is p4_hlir.hlir.p4_headers.p4_field:
if params[1].instance.metadata == True:
if params[1].instance.name == 'standard_metadata':
second = params[1].name
else:
second = 'meta'
else:
second = 'ext'
elif type(params[1]) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
second = 'const'
else:
unsupported("ERROR: Unexpected type %s as src in \
modify_field call" % type(params[1]))
return mf_prim_subtype_ID[first, second]
def gen_bitmask(fieldwidth, offset, maskwidth):
"""fieldwidth: bits; offset: bits; maskwidth: bytes"""
mask = '0x'
bytes_written = offset / 8
bits_left = fieldwidth
while bits_left > 0:
byte = 0
bit = 0b10000000 >> (offset % 8)
if bits_left >= 8 - (offset % 8):
for i in range(8 - (offset % 8)):
byte = byte | bit
bit = bit >> 1
bits_left = bits_left - (8 - (offset % 8))
offset = offset + 8 - (offset % 8)
else:
for i in range(bits_left):
byte = byte | bit
bit = bit >> 1
bits_left = 0
mask += hex(byte)[2:]
bytes_written += 1
mask += '[' + str(maskwidth - bytes_written) + 'x00s]'
return mask
def gen_addremove_header_bitmask(offset, maskwidth):
"""offset: bits; maskwidth: bytes"""
bytes_written = offset / 8
return '0x[' + str(maskwidth - bytes_written) + 'xFFs]'
class P4_to_HP4(HP4Compiler):
def walk_ingress_pipeline(self, tables):
""" populate table_to_trep and action_to_arep data structures """
table_to_trep = {}
action_to_arep = {}
stage = 1
# 1) Do topological sort of tables
tsorter = DAG_Topo_Sorter(tables)
tsort = tsorter.sort()
# 2) assign each one to a unique stage
for i in range(len(tsort)):
curr_table = tsort[i]
source_type = ''
match_type = 'MATCHLESS'
field_name = ''
if len(curr_table.match_fields) > 0:
match = curr_table.match_fields[0]
match_type = match[MATCH_TYPE].value
field_name = match[MATCH_FIELD].name
if (match_type == 'P4_MATCH_EXACT' or
match_type == 'P4_MATCH_TERNARY'):
# headers_hp4_type[<str>]: 'standard_metadata' | 'metadata' | 'extracted'
source_type = get_hp4_type(match[MATCH_FIELD].instance)
elif match_type == 'P4_MATCH_VALID':
source_type = get_hp4_type(match[MATCH_FIELD])
table_to_trep[curr_table] = Table_Rep(stage,
match_type,
source_type,
field_name)
for action in curr_table.actions:
if action_to_arep.has_key(action) is False:
action_to_arep[action] = Action_Rep()
for call in action.call_sequence:
prim_type = call[PRIM_TYPE].name
prim_subtype = get_prim_subtype(call)
action_to_arep[action].call_sequence.append((prim_type, prim_subtype))
action_to_arep[action].stages.add(stage)
action_to_arep[action].tables[stage] = curr_table.name
stage += 1
return table_to_trep, action_to_arep
def gen_tX_templates(self, tables):
command_templates = []
for table in self.table_to_trep:
tname = str(self.table_to_trep[table])
aname = 'init_program_state'
mparams = ['[vdev ID]']
if len(table.match_fields) > 1:
unsupported("Not yet supported: more than 1 match field (table: %s)" % table.name)
# mparams_list = []
if len(table.match_fields) == 1:
if table.match_fields[0][1].value == 'P4_MATCH_VALID':
mp = '[valid]&&&'
# self.vbits[(level, header_instance)]
hinst = table.match_fields[0][0]
for key in self.vbits.keys():
if hinst.name == key[1]:
mp += format(self.vbits[key], '#x')
# temp_mparams = list(mparams)
# temp_mparams.append(mp)
# mparams_list.append(temp_mparams)
mparams.append(mp)
elif ((table.match_fields[0][1].value == 'P4_MATCH_EXACT') or
(table.match_fields[0][1].value == 'P4_MATCH_TERNARY')):
field = table.match_fields[0][0]
mp = '[val]'
if field.instance.name != 'standard_metadata':
maskwidth = 100
if field.instance.metadata:
maskwidth = 32
offset = self.field_offsets[str(field)]
mp += '&&&' + gen_bitmask(field.width,
offset,
maskwidth)
elif field.name != 'egress_spec' and field.name != 'ingress_port':
mp += '&&&' + hex((1 << field.width) - 1)
else: # egress_spec... rep'd by virt_egress_spec, which is 8 bits
mp += '&&&0xFF'
mparams.append(mp)
# need a distinct template entry for every possible action
for action in table.actions:
# action_ID
aparams = [str(self.action_ID[action])]
# match_ID
aparams.append('[match ID]')
# next stage, next_table
if 'hit' in table.next_:
next_table_trep = self.table_to_trep[table.next_['hit']]
aparams.append(str(next_table_trep.stage))
aparams.append(next_table_trep.table_type())
elif table.next_[action] == None:
aparams.append('0')
aparams.append('[DONE]')
else:
next_table_trep = self.table_to_trep[table.next_[action]]
aparams.append(str(next_table_trep.stage))
aparams.append(next_table_trep.table_type())
# primitives
idx = 0
for call in action.call_sequence:
prim_type = primitive_ID[call[P4_CALL_PRIMITIVE].name]
prim_subtype = get_prim_subtype(call)
if not prim_subtype:
unsupported("Error: couldn't get the prim_subtype for " + prim_type)
aparams.append(prim_type)
aparams.append(prim_subtype)
idx += 1
if len(action.call_sequence) == 0:
aparams.append(primitive_ID['no_op'])
# subtype
aparams.append('0')
idx = 1
# zeros for remaining type / subtype parameters of init_program_state
for i in range(idx, self.numprimitives):
aparams.append('0')
aparams.append('0')
# all matches are ternary, requiring priority
# TODO: except matchless?
aparams.append('[PRIORITY]')
command_templates.append(HP4_Match_Command(source_table=table.name,
source_action=action.name,
command="table_add",
table=tname,
action=aname,
match_params=mparams,
action_params=aparams))
return command_templates
def gen_action_aparams(self, p4_call, call, field_offsets):
aparams = []
primtype = call[PRIM_TYPE]
subtype = call[PRIM_SUBTYPE]
p4_call_params = p4_call[P4_CALL_PARAMS]
if primtype == 'drop':
return aparams
if primtype == 'add_to_field':
if (a2f_prim_subtype_action[subtype] == 'a_add2f_extracted_const_u' or
a2f_prim_subtype_action[subtype] == 'a_subff_extracted_const_u'):
# aparams: leftshift, val
dst_offset = field_offsets[str(p4_call_params[0])]
leftshift = 800 - (dst_offset + p4_call_params[0].width)
if type(p4_call_params[1]) is int:
val = str(p4_call_params[1])
if a2f_prim_subtype_action[subtype] == 'a_subff_extracted_const_u':
val = str(p4_call_params[1]*-1)
else:
val = '[val]'
aparams.append(str(leftshift))
aparams.append(val)
if primtype == 'add_header' or primtype == 'remove_header':
hdr = p4_call_params[0]
offset = self.header_offsets[hdr.name]
sz = hdr.header_type.length
vb = 0
for key in self.vbits:
if hdr.name == key[1]:
vb = self.vbits[key]
break
if vb == 0:
print('Fail: didn\'t find vbits entry for ' + hdr.name)
exit()
mask = gen_addremove_header_bitmask(offset, MAX_BYTE)
if primtype == 'add_header':
# aparams: sz, offset, msk, vbits
aparams.append(str(sz))
aparams.append(mask)
aparams.append('0x%x' % vb)
else: # 'remove_header'
# aparams: sz, msk, vbits
aparams.append(str(sz))
aparams.append(mask)
vbinv = ~vb & (2**VBITS_WIDTH - 1)
aparams.append('0x%x' % vbinv)
if primtype == 'modify_field_rng_uniform':
# aparams: leftshift, emask, lowerbound, upperbound
if type(p4_call_params[1]) in [int, long]:
val1 = str(p4_call_params[1])
else:
val1 = '[val]'
if type(p4_call_params[2]) in [int, long]:
val2 = str(p4_call_params[2])
else:
val2 = '[val]'
fo = field_offsets[str(p4_call_params[0])]
fw = p4_call_params[0].width
maskwidthbits = 800
leftshift = str(maskwidthbits - (fo + fw))
mask = gen_bitmask(p4_call_params[0].width,
field_offsets[str(p4_call_params[0])],
maskwidthbits / 8)
aparams.append(leftshift)
aparams.append(mask)
aparams.append(val1)
aparams.append(val2)
if primtype == 'bit_xor':
# aparams: elshift, ershift, vlshift, dest_mask, src_mask, val
fo_intermediate = field_offsets[str(p4_call_params[1])]
fw_intermediate = p4_call_params[1].width
fo_final = field_offsets[str(p4_call_params[0])]
fw_final = p4_call_params[0].width
if bx_prim_subtype_action[subtype] == 'bit_xor_meta_meta_const':
unsupported("Not yet supported: bit_xor_meta_meta_const")
dest_maskwidthbits = 256
src_maskwidthbits = 256
elif bx_prim_subtype_action[subtype] == 'bit_xor_extracted_extracted_const':
dest_maskwidthbits = 800
src_maskwidthbits = 800
elif bx_prim_subtype_action[subtype] == 'bit_xor_meta_extracted_const':
dest_maskwidthbits = 256
src_maskwidthbits = 800
elshift = 0
ershift = 0
dst_revo = dest_maskwidthbits - (fo_final + fw_final)
src_revo = src_maskwidthbits - (fo_intermediate + fw_intermediate)
if src_revo > dst_revo:
ershift = src_revo - dst_revo
else:
elshift = dst_revo - src_revo
vlshift = str(src_maskwidthbits - (fo_intermediate + fw_intermediate))
dest_mask = gen_bitmask(fw_final, fo_final, dest_maskwidthbits / 8)
src_mask = gen_bitmask(fw_intermediate, fo_intermediate, src_maskwidthbits / 8)
src_mask = src_mask.split('[')[0]
if type(p4_call_params[2]) in [int, long]:
val = str(p4_call_params[2])
else:
val = '[val]'
aparams.append(str(elshift))
aparams.append(str(ershift))
aparams.append(vlshift)
aparams.append(dest_mask)
aparams.append(src_mask)
aparams.append(val)
if primtype == 'modify_field':
instance_name = p4_call_params[0].instance.name
dst_field_name = p4_call_params[0].name
if instance_name == 'intrinsic_metadata':
if dst_field_name == 'mcast_grp':
instance_name = 'standard_metadata'
dst_field_name = 'egress_spec'
else:
unsupported("Not supported: modify_field(" + instance_name + '.' \
+ dst_field_name + ", *)")
if type(p4_call_params[1]) is p4_hlir.hlir.p4_headers.p4_field:
if p4_call_params[1].width > p4_call_params[0].width:
dst = instance_name + '.' + dst_field_name
src = p4_call_params[1].instance.name + '.' + p4_call_params[1].name
print("WARNING: modify_field(%s, %s): %s width (%i) > %s width (%i)" \
% (dst, src, src, p4_call_params[1].width, dst, p4_call_params[0].width))
if mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_ingressport':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_packetlength':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_egressspec':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_egressport':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_egressinst':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_insttype':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_stdmeta_egressspec_meta':
# aparams: rightshift, tmask
rshift = 256 - (field_offsets[str(p4_call_params[1])] + p4_call_params[1].width)
mask = 0
if p4_call_params[1].width < p4_call_params[0].width:
mask = hex(int(math.pow(2, p4_call_params[1].width)) - 1)
else:
mask = hex(int(math.pow(2, p4_call_params[0].width)) - 1)
aparams.append(str(rshift))
aparams.append(mask)
elif (mf_prim_subtype_action[subtype] == 'mod_meta_const' or
mf_prim_subtype_action[subtype] == 'mod_extracted_const'):
# aparams: leftshift, mask, val
if type(p4_call_params[1]) in [int, long]:
val = str(p4_call_params[1])
else:
val = '[val]'
fo = field_offsets[str(p4_call_params[0])]
fw = p4_call_params[0].width
maskwidthbits = 800
if mf_prim_subtype_action[subtype] == 'mod_meta_const':
maskwidthbits = 256
leftshift = str(maskwidthbits - (fo + fw))
mask = gen_bitmask(p4_call_params[0].width,
field_offsets[str(p4_call_params[0])],
maskwidthbits / 8)
aparams.append(leftshift)
aparams.append(mask)
aparams.append(val)
elif (mf_prim_subtype_action[subtype] == 'mod_stdmeta_egressspec_const'):
if type(p4_call_params[1]) is int:
aparams.append(str(p4_call_params[1]))
else:
aparams.append('[val]')
elif (mf_prim_subtype_action[subtype] == 'mod_intmeta_mcast_grp_const'):
if type(p4_call_params[1]) is int:
unsupported("Not yet supported: mod_intmeta_mcast_grp_const w/ explicit const")
else:
aparams.append('[MCAST_GRP]')
#elif mf_prim_subtype_action[subtype] == 'mod_stdmeta_egressspec_stdmeta_ingressport':
# return aparams
elif mf_prim_subtype_action[subtype] == 'mod_extracted_extracted':
# aparams:
# - leftshift (how far should src field be shifted to align w/ dst)
# - rightshift (how far should src field be shifted to align w/ dst)
# - msk (bitmask for dest field)
dst_offset = field_offsets[str(p4_call_params[0])]
src_offset = field_offsets[str(p4_call_params[1])]
lshift = 0
rshift = 0
# *_revo = revised offset; right-aligned instead of left-aligned
dst_revo = 800 - (dst_offset + p4_call_params[0].width)
src_revo = 800 - (src_offset + p4_call_params[1].width)
if src_revo > dst_revo:
rshift = src_revo - dst_revo
else:
lshift = dst_revo - src_revo
aparams.append(str(lshift))
aparams.append(str(rshift))
aparams.append(gen_bitmask(p4_call_params[0].width, dst_offset, 100))
elif mf_prim_subtype_action[subtype] == 'mod_meta_extracted':
dst_offset = field_offsets[str(p4_call_params[0])]
src_offset = field_offsets[str(p4_call_params[1])]
lshift = 0
rshift = 0
dstmaskwidthbits = 256
srcmaskwidthbits = 800
# *_revo = revised offset; right-aligned instead of left-aligned
dst_revo = dstmaskwidthbits - (dst_offset + p4_call_params[0].width)
src_revo = srcmaskwidthbits - (src_offset + p4_call_params[1].width)
if src_revo > dst_revo:
rshift = src_revo - dst_revo
else:
lshift = dst_revo - src_revo
dstmask = gen_bitmask(p4_call_params[0].width, dst_offset,
dstmaskwidthbits / 8)
srcmask = dstmask
if p4_call_params[1].width < p4_call_params[0].width:
srcmask = gen_bitmask(p4_call_params[1].width, dst_offset,
dstmaskwidthbits / 8)
aparams.append(str(lshift))
aparams.append(str(rshift))
aparams.append(dstmask)
aparams.append(srcmask)
elif mf_prim_subtype_action[subtype] == 'mod_extracted_meta':
dst_offset = field_offsets[str(p4_call_params[0])]
src_offset = field_offsets[str(p4_call_params[1])]
lshift = 0
rshift = 0
dstmaskwidthbits = 800
srcmaskwidthbits = 256
# *_revo = revised offset; right-aligned instead of left-aligned
dst_revo = dstmaskwidthbits - (dst_offset + p4_call_params[0].width)
src_revo = srcmaskwidthbits - (src_offset + p4_call_params[1].width)
if src_revo > dst_revo:
rshift = src_revo - dst_revo
else:
lshift = dst_revo - src_revo
dstmask = gen_bitmask(p4_call_params[0].width, dst_offset,
dstmaskwidthbits / 8)
srcmask = dstmask
if p4_call_params[1].width < p4_call_params[0].width:
srcmask = gen_bitmask(p4_call_params[1].width, dst_offset,
dstmaskwidthbits / 8)
aparams.append(str(lshift))
aparams.append(str(rshift))
aparams.append(dstmask)
aparams.append(srcmask)
return aparams
def gen_action_entries(self, action_to_arep, action_ID, field_offsets):
commands = []
command_templates = []
for action in action_to_arep:
for stage in action_to_arep[action].stages:
table_name = action_to_arep[action].tables[stage]
for p4_call in action.call_sequence:
p4_call_params = p4_call[P4_CALL_PARAMS]
istemplate = False
idx = action.call_sequence.index(p4_call)
call = action_to_arep[action].call_sequence[idx]
primtype = call[PRIM_TYPE]
subtype = call[PRIM_SUBTYPE]
rank = idx + 1
tname = 't_' + primitive_tnames[primtype] + '_' + str(stage) + str(rank)
if primtype == 'modify_field':
aname = mf_prim_subtype_action[subtype]
elif primtype == 'add_to_field':
aname = a2f_prim_subtype_action[subtype]
elif primtype == 'bit_xor':
aname = bx_prim_subtype_action[subtype]
else:
aname = gen_prim_subtype_action[primtype]
mparams = ['[vdev ID]']
if primtype != 'drop':
if primtype in ['modify_field', 'add_to_field', 'bit_xor']:
mparams.append( subtype )
mparams.append(str(action_ID[action]))
# If the parameter passed to the primitive in the source code is an
# action parameter reference, the match_ID parameter should be
# [val]&&&0x7FFFFF because each distinct match could have a different
# value for the action parameter. Otherwise, we don't care what the
# match_ID is so use 0&&&0.
match_ID_param = '0&&&0'
for param in p4_call_params:
if type(param) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
match_ID_param = '[match ID]&&&0x7FFFFF'
istemplate = True
break
mparams.append(match_ID_param)
aparams = self.gen_action_aparams(p4_call, call, field_offsets)
if istemplate == True:
aparams.append('0') # meta_primitive_state.match_ID mparam matters
idx = -1
if type(p4_call_params[-1]) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
idx = p4_call_params[-1].idx
command_templates.append(HP4_Primitive_Command(table_name,
action.name,
"table_add",
tname,
aname,
mparams,
aparams,
str(idx)))
else:
# meta_primitive_state.match_ID mparam does not matter
# only append priority if the table involves ternary matching
# e.g., drop tables do not
if len(mparams) > 0:
for param in mparams:
if '&&&' in param:
aparams.append(str(LOWEST_PRIORITY))
break
commands.append(HP4_Command("table_add",
tname,
aname,
mparams,
aparams))
return commands, command_templates
def build(self, h):
self.field_offsets = collect_meta(h.p4_header_instances)
self.action_ID = collect_actions(h.p4_actions.values())
# reset id counter
PC_State.newid = itertools.count().next
pre_pcs = PC_State(parse_state=h.p4_parse_states['start'])
launch_process_parse_tree_clr(pre_pcs, h)
self.header_offsets = collect_header_offsets(pre_pcs)
self.field_offsets.update(collect_field_offsets(self.header_offsets,
h.p4_header_instances))
consolidate_parse_tree_clr(pre_pcs, h)
ingress_pcs_list = collect_ingress_pcs(pre_pcs)
self.vbits = get_vbits(ingress_pcs_list)
first_table = h.p4_ingress_ptr.keys()[0]
ps_entries = gen_parse_select_entries(pre_pcs)
# post-process output of gen_parse_select_entries:
# parse_select_00_19, _20_29, and _30_39 use 320b field ext.first
ps_entries = process_parse_select_entries(ps_entries)
self.commands = gen_parse_control_entries(pre_pcs) \
+ ps_entries \
+ gen_pipeline_config_entries(pre_pcs, first_table,
ingress_pcs_list, self.vbits)
self.table_to_trep, self.action_to_arep = self.walk_ingress_pipeline(h.p4_tables)
self.command_templates = self.gen_tX_templates(h.p4_tables)
action_commands, action_templates = self.gen_action_entries(self.action_to_arep,
self.action_ID,
self.field_offsets)
self.commands += action_commands
self.command_templates += action_templates
self.commands += gen_tmiss_entries(h.p4_tables,
self.table_to_trep,
h.p4_control_flows['ingress'],
self.numprimitives)
self.commands += gen_t_checksum_entries(h.calculated_fields,
h.p4_field_list_calculations,
self.field_offsets,
self.vbits)
self.commands += gen_t_resize_pr_entries()
def compile_to_hp4(self, program_path, out_path, mt_out_path, numprimitives):
self.out_path = out_path
self.mt_out_path = mt_out_path
self.numprimitives = numprimitives
# reset state
self.action_ID = {}
self.action_to_arep = {}
self.command_templates = []
self.commands = []
self.field_offsets = {}
self.header_offsets = {}
self.table_to_trep = {}
self.vbits = {}
h = HLIR(program_path)
h.add_primitives(json.loads(pkg_resources.resource_string('p4c_bm', 'primitives.json')))
h.build()
do_support_checks(h)
self.build(h)
self.write_output()
return CodeRepresentation(out_path, mt_out_path)
def write_output(self):
out = open(self.out_path, 'w')
for command in self.commands:
out.write(str(command) + '\n')
out.close()
out = open(self.mt_out_path, 'w')
def getkey(command):
return (command.table, command.source_action, command.action_params)
sorted_ct = sorted(self.command_templates, key=getkey)
json.dump(sorted_ct, out, default=convert_to_builtin_type, indent=2)
out.close()
def do_support_checks(h):
# Not sure how this would happen in P4 but HLIR suggests the possibility:
if len(h.p4_ingress_ptr.keys()) > 1:
unsupported("Not supported: multiple entry points into the ingress pipeline")
# Tables with multiple match criteria:
for table in h.p4_tables.values():
if len(table.match_fields) > 1:
unsupported("Not supported: multiple field matches (table: %s)" % table.name)
def get_parse_select_table_code(first_byte):
for i in range(len(parse_select_table_boundaries) - 1):
lowerbound = parse_select_table_boundaries[i]
upperbound = parse_select_table_boundaries[i+1]
if first_byte >= lowerbound and first_byte < upperbound:
ret = '[PARSE_SELECT_'
ret += '%02d_%02d]' % (lowerbound, upperbound - 1)
return ret
debug()
raise Exception("Did not find parse_select table; first_byte: %d" % first_byte)
def get_pc_action(pcs):
select_first_byte = MAX_BYTE
for criteria in pcs.select_criteria:
select_first_byte = min(criteria[OFFSET] / 8, select_first_byte)
return get_parse_select_table_code(select_first_byte)
def gen_pc_entry_start(pcs):
""" Generate parse_control entries for pc 0/1:
We need an entry for pc 1 only if SEB is insufficient
to handle 'start', in which case the action for pc 0
must be extract_more
"""
start_pcs = pcs.children[0] # pc 0 always has one child: pc 1
mparams = ['[vdev ID]', str(pcs.pcs_id)]
aparams = []
act = 'set_next_action'
if start_pcs.p4_bits_extracted > pcs.hp4_bits_extracted:
act = 'extract_more'
aparams.append(str(int(ceil(start_pcs.p4_bits_extracted / 8.0))))
else:
if not start_pcs.children:
aparams.append('[PROCEED]')
else:
aparams.append(get_pc_action(start_pcs))
aparams.append(str(start_pcs.pcs_id))
cmd = HP4_Command(command='table_add',
table='tset_parse_control',
action=act,
match_params=mparams,
action_params=aparams)
return cmd
def get_p_ps_tables(pcs):
# sort
sorted_criteria, sorted_branches, default_branch = sort_return_select(pcs)
# revise branch_values, select_criteria per parse_select table boundaries
revised_criteria, revised_branches = revise_return_select(pcs,
sorted_criteria,
sorted_branches)
return get_parse_select_tables(revised_criteria)
def did_rewind(pcs):
if not pcs.children:
return False
first_criteria = sort_return_select(pcs)[0][0]
j = 0
while parse_select_table_boundaries[j+1] * 8 <= first_criteria[OFFSET]:
j += 1
p_ps_tables = get_p_ps_tables(pcs.pcs_path[-1])
if parse_select_table_boundaries[j] <= p_ps_tables[-1][L_BOUND]:
return True
return False
def gen_parse_control_entries(pcs, commands=None):
if commands is None:
commands = []
if pcs.pcs_id == 0:
cmd = gen_pc_entry_start(pcs)
commands.append(cmd)
if cmd.action == 'extract_more':
commands = gen_parse_control_entries(pcs.children[0], commands)
else:
for child in pcs.children[0].children:
commands = gen_parse_control_entries(child, commands)
else:
try:
test = pcs.pcs_path[-1].hp4_bits_extracted < pcs.hp4_bits_extracted
except IndexError as e:
print(e)
debug()
if (pcs.pcs_path[-1].hp4_bits_extracted < pcs.hp4_bits_extracted or
did_rewind(pcs)):
mparams = ['[vdev ID]', str(pcs.pcs_id)]
aparams = []
act = 'set_next_action'
if not pcs.children:
aparams.append('[PROCEED]')
else:
aparams.append(get_pc_action(pcs))
aparams.append(str(pcs.pcs_id))
cmd = HP4_Command(command='table_add',
table='tset_parse_control',
action=act,
match_params=mparams,
action_params=aparams)
commands.append(cmd)
for child in pcs.children:
commands = gen_parse_control_entries(child, commands)
return commands
def get_new_val(val, width, offset, new_width):
mask = 0
bitpos = offset
while bitpos < (offset + new_width):
mask += 2**(width - bitpos - 1)
bitpos += 1
newval = val & mask
newval = newval >> (width - (offset + new_width))
return newval
def sort_return_select(pcs):
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
return [], [], None
sorted_indices = sorted(range(len(pcs.select_criteria)),
key=pcs.select_criteria.__getitem__)
sorted_criteria = []
for i in sorted_indices:
sorted_criteria.append(pcs.select_criteria[i])
sorted_branches = []
default_branch = None
for branch in pcs.parse_state.return_statement[PS_RET_BRANCHES]:
if branch[BRANCH_VALUES][0][VAL_TYPE] == 'value':
sorted_values = []
for i in sorted_indices:
sorted_values.append(branch[BRANCH_VALUES][i][VAL_VALUE])
sorted_branches.append((sorted_values, branch[BRANCH_STATE]))
elif branch[BRANCH_VALUES][0][VAL_TYPE] == 'default':
default_branch = branch
return sorted_criteria, sorted_branches, default_branch
def revise_value(val, crit, j):
curr_offset = crit[OFFSET]
ret = []
while curr_offset < (crit[OFFSET] + crit[WIDTH]):
# setup
diff = parse_select_table_boundaries[j+1] * 8 - curr_offset
if diff > crit[WIDTH]:
diff = crit[OFFSET] + crit[WIDTH] - curr_offset
# rev_branch_values
ret.append(get_new_val(val,
crit[WIDTH],
curr_offset - crit[OFFSET],
diff))
# cleanup
curr_offset += diff
j += 1
return ret
def revise_criteria(crit, j):
ret = []
curr_offset = crit[OFFSET]
while curr_offset < (crit[OFFSET] + crit[WIDTH]):
# setup
diff = parse_select_table_boundaries[j+1] * 8 - curr_offset
if diff > crit[WIDTH]:
diff = crit[OFFSET] + crit[WIDTH] - curr_offset
# update
ret.append((curr_offset, diff))
# cleanup
curr_offset += diff
j += 1
return ret
def revise_return_select(pcs, sorted_criteria, sorted_branches):
revised_criteria = []
revised_branches = [[] for count in xrange(len(sorted_branches))]
i = 0
for crit in sorted_criteria:
j = 0
while parse_select_table_boundaries[j+1] * 8 <= crit[OFFSET]:
j += 1
# detect and handle broken boundary
if parse_select_table_boundaries[j+1] * 8 <= (crit[OFFSET] + crit[WIDTH]):
revised_criteria += revise_criteria(crit, j)
k = 0
for branch in sorted_branches:
val = branch[BRANCH_VALUES][i]
revised_branches[k] += revise_value(val, crit, j)
k += 1
else:
revised_criteria.append(crit)
k = 0
for branch in sorted_branches:
val = branch[BRANCH_VALUES][i]
revised_branches[k].append(val)
k += 1
i += 1
return revised_criteria, revised_branches
def do_split_criteria(crit):
try:
assert(crit[WIDTH] % 8 == 0)
except AssertionError as e:
print(e)
unsupported("select criteria (" + str(crit[OFFSET]) + ", " + str(crit[WIDTH]) \
+ ") not divisible by 8")
curr_offset = crit[OFFSET]
split_crit = []
while curr_offset < crit[OFFSET] + crit[WIDTH]:
split_crit.append((curr_offset, 8))
curr_offset += 8
return split_crit
def do_split_val(val, width):
ret = []
mask = 0
bitpos = 0
offset = 0
while offset < width:
mask = 0
while bitpos < offset + 8:
mask += 2**(width - bitpos - 1)
bitpos += 1
ret.append((val & mask) >> (width - bitpos))
offset += 8
return ret
def split_return_select(revised_criteria, revised_branches):
split_criteria = []
split_branches = [[] for count in xrange(len(revised_branches))]
i = 0
for crit in revised_criteria:
split_crits = do_split_criteria(crit)
for split_crit in split_crits:
split_criteria.append(split_crit)
j = 0
for branch in revised_branches:
val = branch[i]
split_vals = do_split_val(val, crit[WIDTH])
for split_val in split_vals:
split_branches[j].append(split_val)
j += 1
i += 1
return split_criteria, split_branches
def get_parse_select_table(crit):
j = 0
while parse_select_table_boundaries[j+1] * 8 <= crit[OFFSET]:
j += 1
lowerbound = parse_select_table_boundaries[j]
upperbound = parse_select_table_boundaries[j+1]
table_name = 'tset_parse_select_%02d_%02d' % (lowerbound, upperbound - 1)
return table_name, lowerbound * 8, upperbound * 8
def get_parse_select_tables(revised_criteria):
parse_select_tables = []
for crit in revised_criteria:
parse_select_table = get_parse_select_table(crit)
if parse_select_table not in parse_select_tables:
parse_select_tables.append(parse_select_table)
return parse_select_tables
def get_mparam_indices(table, crits):
mparam_indices = []
for crit in crits:
curr_offset = crit[OFFSET]
while curr_offset < crit[OFFSET] + crit[WIDTH]:
mparam_indices.append((curr_offset - table[L_BOUND]) / 8)
curr_offset += 8
return mparam_indices
def get_branch_mparams(branch_mparams, branch, mparam_indices):
for index in mparam_indices:
branch_mparams[index] = hex(branch.pop(0)) + '&&&0xFF'
return branch_mparams
def get_ps_action(tablename):
return '[' + tablename.split('tset_')[1].upper() + ']'
def get_branch_action(pcs, pst_count, parse_select_tables, branch):
action = ''
aparams = []
if branch[BRANCH_STATE] == 'ingress':
action = 'set_next_action'
aparams.append('[PROCEED]')
aparams.append(str(pcs.pcs_id))
return action, aparams
# set_next_action or extract_more
if pst_count != len(parse_select_tables) - 1:
action = 'set_next_action'
aparams.append(get_ps_action(parse_select_tables[pst_count + 1][T_NAME]))
aparams.append(str(pcs.pcs_id))
else:
next_pcs = [child for child in pcs.children \
if child.parse_state.name == branch[BRANCH_STATE]][0]
if next_pcs.hp4_bits_extracted > pcs.hp4_bits_extracted:
action = 'extract_more'
numbytes = int(ceil(next_pcs.hp4_bits_extracted / 8.0))
aparams.append(str(numbytes))
else:
if not next_pcs.children:
action = 'set_next_action'
aparams.append('[PROCEED]')
aparams.append(str(next_pcs.pcs_id))
return action, aparams
# another select statement in next pcs - need to rewind?
n_first_criteria = sort_return_select(next_pcs)[0][0]
j = 0
while parse_select_table_boundaries[j+1] * 8 <= n_first_criteria[OFFSET]:
j += 1
if parse_select_table_boundaries[j] <= parse_select_tables[pst_count][L_BOUND]:
# rewind
action = 'extract_more'
numbytes = int(ceil(next_pcs.hp4_bits_extracted / 8.0))
aparams.append(str(numbytes))
else:
action = 'set_next_action'
next_ps_table = get_parse_select_table(n_first_criteria)
aparams.append(get_ps_action(next_ps_table[T_NAME]))
aparams.append(str(next_pcs.pcs_id))
return action, aparams
def get_parse_select_entries(pcs,
parse_select_tables,
split_criteria,
split_branches_with_dests,
default_branch):
commands = []
# for each parse_select table:
# - pop all queue items that belong to the table
# - generate table entry
for pst_count, table in enumerate(parse_select_tables):
crits = []
while (split_criteria[0][OFFSET] >= table[L_BOUND] and
split_criteria[0][OFFSET] < table[U_BOUND]):
crits.append(split_criteria.pop(0))
if not split_criteria:
break
mparam_indices = get_mparam_indices(table, crits)
mparams = ['0&&&0' for count in xrange((table[U_BOUND] - table[L_BOUND]) / 8)]
for branch in split_branches_with_dests:
branch_mparams = ['[vdev ID]', str(pcs.pcs_id)]
branch_mparams += get_branch_mparams(list(mparams), branch[BRANCH_VALUES], mparam_indices)
# determine action and action_params
branch_action, branch_aparams = get_branch_action(pcs,
pst_count,
parse_select_tables,
branch)
# priority
branch_aparams.append(HIGHEST_PRIORITY)
commands.append(HP4_Command(command='table_add',
table=table[T_NAME],
action=branch_action,
match_params=branch_mparams,
action_params=branch_aparams))
# default branch
default_mparams = ['[vdev ID]', str(pcs.pcs_id)]
default_mparams += list(mparams)
default_action, default_aparams = get_branch_action(pcs,
pst_count,
parse_select_tables,
default_branch)
default_aparams.append(LOWEST_PRIORITY)
commands.append(HP4_Command(command='table_add',
table=table[T_NAME],
action=default_action,
match_params=default_mparams,
action_params=default_aparams))
return commands
def gen_parse_select_entries(pcs, commands=None):
if commands is None:
commands = []
# base cases
if pcs.pcs_id == 0:
return gen_parse_select_entries(pcs.children[0])
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
return commands
# sort
sorted_criteria, sorted_branches, default_branch = sort_return_select(pcs)
# revise branch_values, select_criteria per parse_select table boundaries
revised_criteria, revised_branches = revise_return_select(pcs,
sorted_criteria,
sorted_branches)
split_criteria, split_branches = split_return_select(revised_criteria,
revised_branches)
parse_select_tables = get_parse_select_tables(revised_criteria)
dests = [branch[BRANCH_STATE] for branch in sorted_branches]
commands += get_parse_select_entries(pcs,
parse_select_tables,
split_criteria,
zip(split_branches, dests),
default_branch)
for child in pcs.children:
commands = gen_parse_select_entries(child, commands)
return commands
def process_parse_select_entries(ps_entries):
ret = []
for command in ps_entries:
strbounds = command.table.split('tset_parse_select_')[1].split('_')
lower, upper = [int(x) for x in strbounds]
if lower > EXT_FIRST_WIDTH:
ret.append(command)
new_mp_val = ''
new_mp_mask = ''
started = False
for mp in command.match_params[EXT_START_INDEX:]:
val, mask = [int(x, 0) for x in mp.split('&&&')]
if started or mask != 0:
started = True
valstr, maskstr = ["0x{:02x}".format(x).split('0x')[1] for x in [val, mask]]
new_mp_val += valstr
new_mp_mask += maskstr
# fill out remaining bytes until we have all 40
for j in range(upper + 1, EXT_FIRST_WIDTH):
new_mp_val += '00'
new_mp_mask += '00'
new_mp = command.match_params[0:EXT_START_INDEX]
if new_mp_val == '':
assert(new_mp_mask == '')
new_mp.append('0&&&0')
else:
new_mp.append('0x' + new_mp_val + '&&&0x' + new_mp_mask)
ret.append(HP4_Command(command='table_add',
table=command.table,
action=command.action,
match_params=new_mp,
action_params=command.action_params))
return ret
def collect_ingress_pcs(pcs, ingress_pcs_list=None):
if ingress_pcs_list is None:
ingress_pcs_list = []
if pcs.pcs_id == 0:
return collect_ingress_pcs(pcs.children[0])
ps = pcs.parse_state
if ps.return_statement[PS_RET_TYPE] == 'select':
for branch in ps.return_statement[PS_RET_BRANCHES]:
if branch[BRANCH_STATE] == 'ingress':
ingress_pcs_list.append(pcs)
break
elif ps.return_statement[PS_RET_TYPE] == 'immediate':
if ps.return_statement[PS_RET_IMM_STATE] == 'ingress':
ingress_pcs_list.append(pcs)
else:
unsupported("Unhandled ps return_statement: " + ps.return_statement[PS_RET_TYPE])
for child in pcs.children:
ingress_pcs_list = collect_ingress_pcs(child, ingress_pcs_list)
return ingress_pcs_list
def get_headerset_and_maxdepth(ingress_pcs_list):
pcs_headers = {}
longest = 0
for pcs in ingress_pcs_list:
if len(pcs.header_offsets) > longest:
longest = len(pcs.header_offsets)
headerset = [set() for count in xrange(longest)]
for j in range(longest):
for pcs in ingress_pcs_list:
if len(pcs.header_offsets) > j:
pcs_headers = sorted(pcs.header_offsets, key=pcs.header_offsets.get)
headerset[j].add(pcs_headers[j])
return headerset, longest
def get_vbits(ingress_pcs_list):
headerset, maxdepth = get_headerset_and_maxdepth(ingress_pcs_list)
vbits = {}
lshift = VBITS_WIDTH
for j in range(maxdepth):
numbits = len(headerset[j])
lshift = lshift - numbits
i = 1
for header in headerset[j]:
vbits[(j, header)] = i << lshift
i = i << 1
return vbits
def get_hp4_type(header):
if header.name == 'standard_metadata':
return 'standard_metadata'
if header.metadata == True:
return 'metadata'
return 'extracted'
def get_aparam_table_ID(table):
if len(table.match_fields) == 0:
return '[MATCHLESS]'
match = table.match_fields[0] # supporting only one match field
match_type = match[MATCH_TYPE]
if match_type.value == 'P4_MATCH_EXACT':
field = match[MATCH_OBJECT]
header = field.instance
header_hp4_type = get_hp4_type(header)
if header_hp4_type == 'standard_metadata':
if field.name == 'ingress_port':
return '[STDMETA_INGRESS_PORT_EXACT]'
elif field.name == 'packet_length':
return '[STDMETA_PACKET_LENGTH_EXACT]'
elif field.name == 'instance_type':
return '[STDMETA_INSTANCE_TYPE_EXACT]'
elif field.name == 'egress_spec':
return '[STDMETA_EGRESS_SPEC_EXACT]'
else:
unsupported("ERROR: Unsupported: match on stdmetadata field %s" % field.name)
elif header_hp4_type == 'metadata':
return '[METADATA_EXACT]'
elif header_hp4_type == 'extracted':
return '[EXTRACTED_EXACT]'
elif match_type.value == 'P4_MATCH_VALID':
return '[EXTRACTED_VALID]'
else:
unsupported("Not yet supported: " + match_type.value)
def gen_pipeline_config_entries(pcs, first_table, ingress_pcs_list, vbits):
if pcs.pcs_id == 0:
return gen_pipeline_config_entries(pcs.children[0],
first_table,
ingress_pcs_list,
vbits)
commands = []
aparam_table_ID = get_aparam_table_ID(first_table)
for pcs in ingress_pcs_list:
val = 0
for i, header in enumerate( sorted(pcs.header_offsets,
key=pcs.header_offsets.get) ):
val = val | vbits[(i, header)]
valstr = '0x' + '%x' % val
commands.append(HP4_Command('table_add',
'tset_pipeline_config',
'a_set_pipeline',
['[vdev ID]', str(pcs.pcs_id)],
[aparam_table_ID, valstr, HIGHEST_PRIORITY]))
return commands
def process_extract_statements(pcs):
for call in pcs.parse_state.call_sequence:
if call[PS_CALL_TYPE] == p4_hlir.hlir.p4_parser.parse_call.extract:
pcs.header_offsets[call[PS_CALL_H_INST].name] = pcs.p4_bits_extracted
pcs.p4_bits_extracted += call[PS_CALL_H_INST].header_type.length * 8
if pcs.hp4_bits_extracted < pcs.p4_bits_extracted:
pcs.hp4_bits_extracted = pcs.p4_bits_extracted
else:
debug()
raise Exception('Unsupported parse call: %s' % call[PS_CALL_TYPE])
def process_parse_tree_clr(pcs, h):
#print(str(pcs.pcs_id) + ' [' + pcs.parse_state.name + ']')
process_extract_statements(pcs)
def add_next(next_parse_state):
next_pcs_pcs_path = list(pcs.pcs_path)
next_pcs_pcs_path.append(pcs)
next_pcs_ps_path = list(pcs.ps_path)
next_pcs_ps_path.append(pcs.parse_state)
next_pcs = PC_State(hp4_bits_extracted = pcs.hp4_bits_extracted,
p4_bits_extracted = pcs.p4_bits_extracted,
ps_path = next_pcs_ps_path,
pcs_path = next_pcs_pcs_path,
parse_state = next_parse_state)
pcs.children.append(next_pcs)
return next_pcs
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'select':
for criteria in pcs.parse_state.return_statement[PS_RET_CRITERIA]:
if isinstance(criteria, current_call):
curr_reqmt = criteria[OFFSET] + criteria[WIDTH]
if pcs.p4_bits_extracted + curr_reqmt > pcs.hp4_bits_extracted:
pcs.hp4_bits_extracted += curr_reqmt
hp4_criteria_offset = criteria[OFFSET] + pcs.p4_bits_extracted
pcs.select_criteria.append((hp4_criteria_offset, criteria[WIDTH]))
else:
hdr_name, fld_name = criteria.split('.')
hp4_criteria_offset = h.p4_fields[criteria].offset + pcs.header_offsets[hdr_name]
pcs.select_criteria.append((hp4_criteria_offset, h.p4_fields[criteria].width))
next_parse_states = []
for branch in pcs.parse_state.return_statement[PS_RET_BRANCHES]:
# e.g., ([('value', 1108152157446)], 'parse_A')
values = []
for value in branch[BRANCH_VALUES]:
if value[VAL_TYPE] != 'value' and value[VAL_TYPE] != 'default':
debug()
raise Exception('Unsupported branch value type: %s' % value[VAL_TYPE])
if value[VAL_TYPE] == 'default':
values.append('default')
else:
values.append(value[VAL_VALUE])
pcs.select_values.append( values )
if branch[BRANCH_STATE] != 'ingress':
next_parse_state = h.p4_parse_states[branch[BRANCH_STATE]]
if next_parse_state not in next_parse_states:
next_parse_states.append(next_parse_state)
next_pcs = add_next(next_parse_state)
process_parse_tree_clr(next_pcs, h)
elif pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
next_parse_state_name = pcs.parse_state.return_statement[PS_RET_IMM_STATE]
if next_parse_state_name != 'ingress':
next_parse_state = h.p4_parse_states[next_parse_state_name]
next_pcs = add_next(next_parse_state)
process_parse_tree_clr(next_pcs, h)
else:
debug()
raise Exception('Unsupported return type: %s' % \
pcs.parse_state.return_statement[PS_RET_TYPE])
def consolidate_parse_tree_clr(pcs, h):
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
next_parse_state_name = pcs.parse_state.return_statement[PS_RET_IMM_STATE]
if next_parse_state_name != 'ingress':
next_pc_state = pcs.children[0]
next_parse_state = next_pc_state.parse_state
old_ps_name = pcs.parse_state.name
new_ps_name = pcs.parse_state.name + '-' + next_parse_state.name
new_ps_call_sequence = list(pcs.parse_state.call_sequence)
new_ps_call_sequence += next_parse_state.call_sequence
new_ps = p4_parse_state(h,
new_ps_name,
call_sequence=new_ps_call_sequence,
return_statement=next_parse_state.return_statement)
hp4_bits_diff = next_pc_state.hp4_bits_extracted - pcs.hp4_bits_extracted
pcs.hp4_bits_extracted += hp4_bits_diff
p4_bits_diff = next_pc_state.p4_bits_extracted - pcs.p4_bits_extracted
pcs.p4_bits_extracted += p4_bits_diff
pcs.parse_state = new_ps
pcs.children = list(next_pc_state.children)
prev_ps = pcs.ps_path[-1]
for i, branch in enumerate(prev_ps.return_statement[PS_RET_BRANCHES]):
if branch[BRANCH_STATE] == old_ps_name:
prev_ps.return_statement[PS_RET_BRANCHES][i] = (branch[BRANCH_VALUES], new_ps_name)
for child in pcs.children:
consolidate_parse_tree_clr(child, h)
def collect_header_offsets(pcs, header_offsets=None):
if header_offsets is None:
header_offsets = {}
for header in pcs.header_offsets:
if header in header_offsets:
if pcs.header_offsets[header] != header_offsets[header]:
unsupported("Unsupported: %s has multiple potential offsets; %db and %db" \
% (header, pcs.header_offsets[header], header_offsets[header]))
header_offsets.update(pcs.header_offsets)
for child in pcs.children:
header_offsets = collect_header_offsets(child, header_offsets)
return header_offsets
def collect_field_offsets(header_offsets, header_instances):
field_offsets = {}
for header in header_offsets:
try:
hinst = header_instances[header]
except KeyError as e:
print(e)
debug()
for field in hinst.fields:
field_offsets[header + '.' + field.name] = field.offset + header_offsets[header]
return field_offsets
def get_table_from_cs(control_statement):
if type(control_statement) is p4_table:
return control_statement
elif type(control_statement) is tuple:
return control_statement[0]
else:
unsupported("Error (get_table_from_cs): unsupported control statement type: " \
+ str(type(control_statement)))
def walk_control_block(control_block, table):
for control_statement in control_block:
cs_idx = control_block.index(control_statement)
if type(control_statement) is p4_table:
# apply_table_call
if control_statement == table:
if cs_idx == len(control_block) - 1:
return True, None
return True, get_table_from_cs(control_block[cs_idx + 1])
elif type(control_statement) is tuple:
# apply_and_select_block
if control_statement[0] == table:
if cs_idx == len(control_block) - 1:
return True, None
return True, get_table_from_cs(control_block[cs_idx + 1])
else:
for case in control_statement[1]:
found, next_table = walk_control_block(case[1], table)
if found:
if next_table != None:
return True, next_table
elif cs_idx < len(control_block) - 1:
return True, get_table_from_cs(control_block[cs_idx + 1])
else:
return True, None
else:
unsupported("Error: unsupported call_sequence entry type: " + str(type(entry)))
return False, None
def gen_tmiss_entries(tables, table_to_trep, ingress, numprimitives):
commands = []
for table_name in tables:
table = tables[table_name]
trep = table_to_trep[table]
tname = trep.name
stage = trep.stage # int
aname = 'init_program_state'
mparams = ['[vdev ID]']
if 'matchless' not in tname:
mparams.append('0&&&0')
# identify next_table so we can look up stage for aparams[0]
# aparams[0]: 'next_stage' parameter in finish_action (stages.p4/p4t)
if 'miss' in table.next_:
next_table = table.next_['miss']
else:
found, next_table = walk_control_block(ingress.call_sequence, table)
if next_table == None:
next_stage = '0'
next_table_type = '0'
else:
next_stage = str(table_to_trep[next_table].stage)
next_table_type = table_to_trep[next_table].table_type()
aparams = ['0', # action_ID
'0', # match_ID
next_stage,
next_table_type]
# zeros for remaining type / subtype parameters of init_program_state
for i in range(numprimitives):
aparams.append('0')
aparams.append('0')
if 'matchless' not in tname:
aparams.append(str(LOWEST_PRIORITY))
commands.append(HP4_Command(command="table_add",
table=tname,
action=aname,
match_params=mparams,
action_params=aparams))
return commands
# gen_t_checksum_entries(h.calculated_fields)
def gen_t_checksum_entries(calculated_fields, p4_field_list_calculations,
field_offsets, vbits):
""" detect and handle ipv4 checksum """
commands = []
cf_none_types = 0
cf_valid_types = 0
checksum_detected = False
for cf in calculated_fields:
for statement in cf[1]:
if statement[0] == 'update':
flc = p4_field_list_calculations[statement[1]]
for fl in flc.input:
count = 0
max_field_offset = 0
max_field = None
for field in fl.fields:
count += field.width
if field.offset > max_field_offset:
max_field_offset = field.offset
max_field = field
if count == 144:
if flc.algorithm == 'csum16' and flc.output_width == 16:
# Calculate rshift_base parameter
# This is the amount to R-shift extracted.data such
# that the ipv4 header is right aligned
key = max_field.instance.name + '.' + max_field.name
# TODO: remove assumption that extracted.data is 800 bits
aparam = str(800 - field_offsets[key] - max_field.width)
if statement[2] == None:
cf_none_types += 1
if (cf_none_types + cf_valid_types) > 1:
print("ERROR: Unsupported: multiple checksums")
exit()
else:
checksum_detected = True
commands.append(HP4_Command("table_add",
"t_checksum",
"a_ipv4_csum16",
['[vdev ID]', '0&&&0'],
[aparam, str(LOWEST_PRIORITY)]))
else:
if statement[2].op == 'valid':
cf_valid_types += 1
if (cf_none_types + cf_valid_types) > 1:
print("ERROR: Unsupported: multiple checksums")
exit()
else:
# TODO: reduce entries by isolating relevant bit
for key in vbits.keys():
if statement[2].right == key[1]:
mparams = ['[vdev ID]']
val = format(vbits[key], '#x')
mparams.append(val + '&&&' + val)
checksum_detected = True
commands.append(HP4_Command("table_add",
"t_checksum",
"a_ipv4_csum16",
mparams,
[aparam, '0']))
else:
unsupported("ERROR: Unsupported if_cond op " \
+ "in calculated field: %s" % statement[2].op)
else:
unsupported("ERROR: Unsupported checksum (%s, %i)" \
% (flc.algorithm, flc.output_width))
else:
unsupported("ERROR: Unsupported checksum - field list of %i bits" \
% count)
else:
unsupported("WARNING: Unsupported update_verify_spec " \
+ "for calculated field: %s" % statement[0])
if checksum_detected == False:
commands.append(HP4_Command("table_add",
"t_checksum",
"_no_op",
['[vdev ID]', '0&&&0'],
[str(LOWEST_PRIORITY)]))
return commands
def gen_t_resize_pr_entries():
commands = []
# TODO: full implementation as the following primitives get support:
# - add_header | remove_header | truncate | push | pop | copy_header*
# * maybe (due to possibility of making previously invalid header
# valid)
# default entry handled by controller
return commands
def print_processed_parse_tree(pcs, level=0):
for line in str(pcs).split('\n'):
print '\t' * level + line
for child in pcs.children:
print_processed_parse_tree(child, level+1)
def print_commands(commands):
for command in commands:
print(command)
def launch_process_parse_tree_clr(pcs, h):
start_pcs = PC_State(pcs_path=[pcs],
parse_state=pcs.parse_state)
pcs.children.append(start_pcs)
process_parse_tree_clr(start_pcs, h)
def parse_args(args):
parser = argparse.ArgumentParser(description='Recursive Parse Tree Processing')
parser.add_argument('input', help='path for input .p4', type=str)
parser.add_argument('-o', '--output', help='path for output .hp4t file',
type=str, action="store", default='output.hp4t')
parser.add_argument('-m', '--mt_output', help='path for match template output',
type=str, action="store", default='output.hp4mt')
parser.add_argument('--numprimitives', help='maximum number of primitives \
for which HyPer4 is configured',
type=int, action="store", default=9)
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
hp4c = P4_to_HP4()
hp4c.compile_to_hp4(args.input, args.output, args.mt_output, args.numprimitives)
if __name__ == '__main__':
main()
| 37.437225 | 107 | 0.599969 |
from p4_hlir.main import HLIR
from p4_hlir.hlir.p4_parser import p4_parse_state
import p4_hlir
from p4_hlir.hlir.p4_tables import p4_table
from compiler import HP4Compiler, CodeRepresentation
import argparse
import itertools
import code
from inspect import currentframe, getframeinfo
import sys
import math
from math import ceil
import json
import pkg_resources
SEB = 320
METADATA_WIDTH = 256
PS_RET_TYPE = 0
PS_RET_CRITERIA = 1
PS_RET_BRANCHES = 2
PS_RET_IMM_STATE = 1
PS_CALL_TYPE = 0
PS_CALL_H_INST = 1
OFFSET = 0
WIDTH = 1
BRANCH_VALUES = 0
BRANCH_STATE = 1
VAL_TYPE = 0
VAL_VALUE = 1
MAX_BYTE = 100
T_NAME = 0
L_BOUND = 1
U_BOUND = 2
HIGHEST_PRIORITY = '0'
LOWEST_PRIORITY = '2147483646'
VBITS_WIDTH = 80
MATCH_TYPE = 1
MATCH_FIELD = 0
PRIM_TYPE = 0
PRIM_SUBTYPE = 1
P4_CALL_PRIMITIVE = 0
P4_CALL_PARAMS = 1
PARAM = 0
PARAM_TYPE = 1
MATCH_OBJECT = 0
MATCH_TYPE = 1
EXT_FIRST_WIDTH = 40
EXT_START_INDEX = 2
parse_select_table_boundaries = [0, 20, 30, 40, 50, 60, 70, 80, 90, 100]
primitive_ID = {'modify_field': '[MODIFY_FIELD]',
'add_header': '[ADD_HEADER]',
'copy_header': '[COPY_HEADER]',
'remove_header': '[REMOVE_HEADER]',
'modify_field_with_hash_based_offset': '[MODIFY_FIELD_WITH_HBO]',
'modify_field_rng_uniform': '[MODIFY_FIELD_RNG_U]',
'truncate': '[TRUNCATE]',
'drop': '[DROP]',
'no_op': '[NO_OP]',
'push': '[PUSH]',
'pop': '[POP]',
'count': '[COUNT]',
'execute_meter': '[METER]',
'generate_digest': '[GENERATE_DIGEST]',
'recirculate': '[RECIRCULATE]',
'resubmit': '[RESUBMIT]',
'clone_ingress_pkt_to_egress': '[CLONE_INGRESS_EGRESS]',
'clone_egress_pkt_to_egress': '[CLONE_EGRESS_EGRESS]',
'multicast': '[MULTICAST]',
'add_to_field': '[MATH_ON_FIELD]',
'bit_xor': '[BIT_XOR]'}
primitive_tnames = {'modify_field': 'mod',
'add_header': 'addh',
'copy_header': '',
'remove_header': 'removeh',
'modify_field_with_hash_based_offset': '',
'modify_field_rng_uniform': 'mod_rng',
'truncate' : 'truncate',
'drop' : 'drop',
'no_op' : '',
'push' : '',
'pop' : '',
'count' : '',
'execute_meter': '',
'generate_digest': '',
'recirculate': '',
'resubmit': '',
'clone_ingress_pkt_to_egress': '',
'clone_egress_pkt_to_egress': '',
'multicast': 'multicast',
'add_to_field': 'math_on_field',
'bit_xor': 'bit_xor'}
mf_prim_subtype_ID = {('meta', 'ingress_port'): '1',
('meta', 'packet_length'): '2',
('meta', 'egress_spec'): '3',
('meta', 'egress_port'): '4',
('meta', 'egress_instance'): '5',
('meta', 'instance_type'): '6',
('egress_spec', 'meta'): '7',
('meta', 'const'): '8',
('egress_spec', 'const'): '9',
('ext', 'const'): '10',
('egress_spec', 'ingress_port'): '11',
('ext', 'ext'): '12',
('meta', 'ext'): '13',
('ext', 'meta'): '14'}
mf_prim_subtype_action = {'1': 'mod_meta_stdmeta_ingressport',
'2': 'mod_meta_stdmeta_packetlength',
'3': 'mod_meta_stdmeta_egressspec',
'4': 'mod_meta_stdmeta_egressport',
'5': 'mod_meta_stdmeta_egressinst',
'6': 'mod_meta_stdmeta_insttype',
'7': 'mod_stdmeta_egressspec_meta',
'8': 'mod_meta_const',
'9': 'mod_stdmeta_egressspec_const',
'10': 'mod_extracted_const',
'11': 'mod_stdmeta_egressspec_stdmeta_ingressport',
'12': 'mod_extracted_extracted',
'13': 'mod_meta_extracted',
'14': 'mod_extracted_meta'}
a2f_prim_subtype_ID = {'add': '1', 'sub': '2'}
a2f_prim_subtype_action = {'1': 'a_add2f_extracted_const_u',
'2': 'a_subff_extracted_const_u'}
bx_prim_subtype_ID = {('meta', 'meta', 'const'): '1',
('ext', 'ext', 'const'): '2',
('meta', 'ext', 'const'): '3'}
bx_prim_subtype_action = {'1': 'bit_xor_meta_meta_const',
'2': 'bit_xor_extracted_extracted_const',
'3': 'bit_xor_meta_extracted_const'}
gen_prim_subtype_action = {'add_header': 'a_addh',
'copy_header': '',
'remove_header': 'a_removeh',
'modify_field_with_hash_based_offset': '',
'modify_field_rng_uniform': 'mod_extracted_rng',
'truncate': 'a_truncate',
'drop': 'a_drop',
'no_op': '',
'push': '',
'pop': '',
'count': '',
'execute_meter': '',
'recirculate': '',
'resubmit': '',
'clone_ingress_pkt_to_egress': '',
'clone_egress_pkt_to_egress': '',
'multicast': 'a_multicast'}
current_call = tuple
def debug():
""" Break and enter interactive method after printing location info """
caller = currentframe().f_back
method_name = caller.f_code.co_name
line_no = getframeinfo(caller).lineno
print(method_name + ": line " + str(line_no))
code.interact(local=dict(globals(), **caller.f_locals))
def unsupported(msg):
print(msg)
exit()
def convert_to_builtin_type(obj):
d = { '__class__':obj.__class__.__name__, '__module__':obj.__module__, }
d.update(obj.__dict__)
return d
class HP4_Command(object):
def __init__(self, command='table_add',
table='',
action='',
match_params=[],
action_params=[]):
self.command = command
self.table = table
self.action = action
self.match_params = match_params
self.action_params = action_params
def __str__(self):
""" assumes command is \'table_add\' """
if self.command != 'table_add':
debug()
raise Exception("Incorrect table command %s, table %s" % (self.command, self.table))
ret = self.table + ' ' + self.action + ' :'
ret += ' '.join(self.match_params)
ret += ':'
ret += ' '.join(self.action_params)
return ret
class HP4_Match_Command(HP4_Command):
def __init__(self, source_table='',
source_action='',
**kwargs):
super(HP4_Match_Command, self).__init__(**kwargs)
self.source_table = source_table
self.source_action = source_action
class HP4_Primitive_Command(HP4_Command):
def __init__(self, source_table, source_action, command, table, action, mparams, aparams, src_aparam_id):
HP4_Command.__init__(self, command, table, action, mparams, aparams)
self.source_table = source_table
self.source_action = source_action
self.src_aparam_id = src_aparam_id
class DAG_Topo_Sorter():
def __init__(self, p4_tables):
self.unmarked = []
self.tempmarked = []
self.permmarked = []
self.L = []
for key in p4_tables:
self.unmarked.append(p4_tables[key])
def visit(self, n):
if n.control_flow_parent == 'egress':
unsupported("ERROR: Not yet supported: tables in egress (" + n.name + ")")
if n in self.tempmarked:
unsupported("ERROR: not a DAG")
if n in self.unmarked:
self.unmarked.remove(n)
self.tempmarked.append(n)
for m in n.next_.values():
if m != None:
self.visit(m)
self.permmarked.append(n)
self.tempmarked.remove(n)
self.L.insert(0, n)
def sort(self):
while len(self.unmarked) > 0:
n = self.unmarked[0]
self.visit(n)
return self.L
class Table_Rep():
def __init__(self, stage, match_type, source_type, field_name):
self.stage = stage
self.match_type = match_type
self.source_type = source_type
self.field_name = field_name
self.name = 't' + str(self.stage) + '_'
if source_type == 'standard_metadata':
self.name += 'stdmeta_' + field_name + '_'
elif source_type == 'metadata':
self.name += 'metadata_'
elif source_type == 'extracted':
self.name += 'extracted_'
if match_type == 'P4_MATCH_EXACT':
self.name += 'exact'
elif match_type == 'P4_MATCH_VALID':
self.name += 'valid'
elif match_type == 'P4_MATCH_TERNARY':
self.name += 'ternary'
elif match_type == 'MATCHLESS':
self.name += 'matchless'
def table_type(self):
if self.source_type == 'standard_metadata':
if self.match_type == 'P4_MATCH_EXACT':
if self.field_name == 'ingress_port':
return '[STDMETA_INGRESS_PORT_EXACT]'
elif self.field_name == 'packet_length':
return '[STDMETA_PACKET_LENGTH_EXACT]'
elif self.field_name == 'instance_type':
return '[STDMETA_INSTANCE_TYPE_EXACT]'
elif self.field_name == 'egress_spec':
return '[STDMETA_EGRESS_SPEC_EXACT]'
else:
unsupported("Not supported: standard_metadata field %s" \
% self.field_name)
else:
unsupported("Not supported: standard_metadata with %s match type" \
% self.match_type)
elif self.source_type == 'metadata':
if self.match_type == 'P4_MATCH_EXACT':
return '[METADATA_EXACT]'
elif self.match_type == 'P4_MATCH_TERNARY':
return '[METADATA_TERNARY]'
else:
unsupported("Not supported: metadata with %s match type" \
% self.match_type)
elif self.source_type == 'extracted':
if self.match_type == 'P4_MATCH_EXACT':
return '[EXTRACTED_EXACT]'
elif self.match_type == 'P4_MATCH_VALID':
return '[EXTRACTED_VALID]'
elif self.match_type == 'P4_MATCH_TERNARY':
return '[EXTRACTED_TERNARY]'
else:
unsupported("Not supported: extracted with %s match type" \
% self.match_type)
elif self.source_type == '':
if self.match_type == 'MATCHLESS':
return '[MATCHLESS]'
else:
unsupported("Not supported: [no source] with %s match type" \
% self.match_type)
else:
unsupported("Not supported: source type %s, match type %s" \
% (self.source_type, self.match_type))
def __str__(self):
return self.name
class Action_Rep():
def __init__(self):
self.stages = set()
self.tables = {}
self.next = {}
self.call_sequence = []
class PC_State(object):
newid = itertools.count().next
def __init__(self, hp4_bits_extracted=SEB,
p4_bits_extracted=0,
ps_path=[],
pcs_path=[],
parse_state=None,
entry_table='tset_parse_control',
**kwargs):
self.hp4_bits_extracted = hp4_bits_extracted
self.p4_bits_extracted = p4_bits_extracted
self.ps_path = ps_path
self.pcs_path = pcs_path
self.pcs_id = PC_State.newid()
self.parse_state = parse_state
self.entry_table = entry_table
self.children = []
self.header_offsets = {} # header name (str) : hp4 bit offset (int)
for pcs in self.pcs_path:
self.header_offsets.update(pcs.header_offsets)
self.select_criteria = [] # list of (offset, width) tuples, each
# element corresponding to a criteria in the
# select statement, representing the hp4 view
self.select_values = [] # list of lists: each member a list of values,
# each value corresponding to a criteria in
# select_criteria
def __str__(self):
ret = 'ID: ' + str(self.pcs_id) + '; ' + self.parse_state.name + '\n'
ret += 'hp4_bits_extracted: ' + str(self.hp4_bits_extracted) + '\n'
ret += 'p4_bits_extracted: ' + str(self.p4_bits_extracted) + '\n'
ret += 'ps_path: ' + str(self.ps_path) + '\n'
ret += 'pcs_path: '
for pcs in self.pcs_path:
ret += str(pcs.pcs_id) + '(' + pcs.parse_state.name + ') '
ret += '\n'
ret += 'children: '
for child in self.children:
ret += child.parse_state.name + ' '
return ret
def collect_meta(headers):
""" Classify headers (metadata | parsed representation)
- For metadata: assign each field an offset into meta.data
- NOTE: the same cannot be done for parsed representation headers
until we traverse the parse tree, because each path through the
parse tree potentially yields a distinct set of field offsets
into pr.data.
"""
meta_offsets = {}
metadata_offset = 0
for header_key in headers.keys():
header = headers[header_key]
if header.name == 'standard_metadata':
continue
if header.name == 'intrinsic_metadata':
continue
if header.metadata == True:
for field in header.fields:
fullname = header.name + '.' + field.name
meta_offsets[fullname] = metadata_offset
metadata_offset += field.width
if metadata_offset > METADATA_WIDTH:
unsupported("Error: out of metadata memory with %s" % fullname)
return meta_offsets
def collect_actions(actions):
""" Uniquely number each action """
action_ID = {}
actionID = 1
for action in actions:
if action.lineno > 0: # is action from source (else built-in)?
action_ID[action] = actionID
actionID += 1
return action_ID
def get_prim_subtype(p4_call):
""" p4_call: (p4_action, [list of parameters])
"""
primitive = p4_call[P4_CALL_PRIMITIVE]
params = p4_call[P4_CALL_PARAMS]
if (primitive.name == 'drop' or
primitive.name == 'add_header' or
primitive.name == 'remove_header' or
primitive.name == 'modify_field_rng_uniform'):
return '0'
elif primitive.name == 'add_to_field':
if type(params[0]) is p4_hlir.hlir.p4_headers.p4_field:
if params[0].instance.metadata == True:
unsupported("Not supported: metadata (%s) as dst field in \
add_to_field" % params[0].instance.name)
else:
if type(params[1]) is int:
if params[1] < 0:
return(a2f_prim_subtype_ID['sub'])
else:
return(a2f_prim_subtype_ID['add'])
else:
unsupported("ERROR: Not supported: %s type for src field in \
add_to_field" % type(params[1]))
else:
unsupported("ERROR: dst field type %s in add_to_field" % type(params[0]))
elif primitive.name == 'bit_xor':
first = 0
second = 0
third = 0
if params[0].instance.metadata == True:
first = 'meta' # user-defined metadata
else: # parsed representation
first = 'ext'
if params[1].instance.metadata == True:
second = 'meta' # user-defined metadata
else: # parsed representation
second = 'ext'
if type(params[2]) in [int, long]:
third = 'const'
elif type(params[2]) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
third = 'const'
else:
unsupported("ERROR: Unexpected type %s as third param in \
bit_xor call" % type(params[2]))
return bx_prim_subtype_ID[(first, second, third)]
elif primitive.name == 'modify_field':
first = 0
second = 0
if params[0].instance.metadata == True:
if params[0].instance.name == 'standard_metadata':
if params[0].name == 'egress_spec':
first = params[0].name
else:
unsupported("ERROR: Unexpected stdmeta field %s as dst in \
modify_field primitive" % params[0].name)
elif params[0].instance.name == 'intrinsic_metadata':
if params[0].name == 'mcast_grp':
#first = params[0].name
first = 'egress_spec'
else:
unsupported("ERROR: Unexpected intmeta field %s as dst in \
modify_field primitive" % params[0].name)
else: # user-defined metadata
first = 'meta'
else: # parsed representation
first = 'ext'
if type(params[1]) in [int, long]:
second = 'const'
elif type(params[1]) is p4_hlir.hlir.p4_headers.p4_field:
if params[1].instance.metadata == True:
if params[1].instance.name == 'standard_metadata':
second = params[1].name
else:
second = 'meta'
else:
second = 'ext'
elif type(params[1]) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
second = 'const'
else:
unsupported("ERROR: Unexpected type %s as src in \
modify_field call" % type(params[1]))
return mf_prim_subtype_ID[first, second]
def gen_bitmask(fieldwidth, offset, maskwidth):
"""fieldwidth: bits; offset: bits; maskwidth: bytes"""
mask = '0x'
bytes_written = offset / 8
bits_left = fieldwidth
while bits_left > 0:
byte = 0
bit = 0b10000000 >> (offset % 8)
if bits_left >= 8 - (offset % 8):
for i in range(8 - (offset % 8)):
byte = byte | bit
bit = bit >> 1
bits_left = bits_left - (8 - (offset % 8))
offset = offset + 8 - (offset % 8)
else:
for i in range(bits_left):
byte = byte | bit
bit = bit >> 1
bits_left = 0
mask += hex(byte)[2:]
bytes_written += 1
mask += '[' + str(maskwidth - bytes_written) + 'x00s]'
return mask
def gen_addremove_header_bitmask(offset, maskwidth):
"""offset: bits; maskwidth: bytes"""
bytes_written = offset / 8
return '0x[' + str(maskwidth - bytes_written) + 'xFFs]'
class P4_to_HP4(HP4Compiler):
def walk_ingress_pipeline(self, tables):
""" populate table_to_trep and action_to_arep data structures """
table_to_trep = {}
action_to_arep = {}
stage = 1
# 1) Do topological sort of tables
tsorter = DAG_Topo_Sorter(tables)
tsort = tsorter.sort()
# 2) assign each one to a unique stage
for i in range(len(tsort)):
curr_table = tsort[i]
source_type = ''
match_type = 'MATCHLESS'
field_name = ''
if len(curr_table.match_fields) > 0:
match = curr_table.match_fields[0]
match_type = match[MATCH_TYPE].value
field_name = match[MATCH_FIELD].name
if (match_type == 'P4_MATCH_EXACT' or
match_type == 'P4_MATCH_TERNARY'):
# headers_hp4_type[<str>]: 'standard_metadata' | 'metadata' | 'extracted'
source_type = get_hp4_type(match[MATCH_FIELD].instance)
elif match_type == 'P4_MATCH_VALID':
source_type = get_hp4_type(match[MATCH_FIELD])
table_to_trep[curr_table] = Table_Rep(stage,
match_type,
source_type,
field_name)
for action in curr_table.actions:
if action_to_arep.has_key(action) is False:
action_to_arep[action] = Action_Rep()
for call in action.call_sequence:
prim_type = call[PRIM_TYPE].name
prim_subtype = get_prim_subtype(call)
action_to_arep[action].call_sequence.append((prim_type, prim_subtype))
action_to_arep[action].stages.add(stage)
action_to_arep[action].tables[stage] = curr_table.name
stage += 1
return table_to_trep, action_to_arep
def gen_tX_templates(self, tables):
command_templates = []
for table in self.table_to_trep:
tname = str(self.table_to_trep[table])
aname = 'init_program_state'
mparams = ['[vdev ID]']
if len(table.match_fields) > 1:
unsupported("Not yet supported: more than 1 match field (table: %s)" % table.name)
# mparams_list = []
if len(table.match_fields) == 1:
if table.match_fields[0][1].value == 'P4_MATCH_VALID':
mp = '[valid]&&&'
# self.vbits[(level, header_instance)]
hinst = table.match_fields[0][0]
for key in self.vbits.keys():
if hinst.name == key[1]:
mp += format(self.vbits[key], '
# temp_mparams = list(mparams)
# temp_mparams.append(mp)
# mparams_list.append(temp_mparams)
mparams.append(mp)
elif ((table.match_fields[0][1].value == 'P4_MATCH_EXACT') or
(table.match_fields[0][1].value == 'P4_MATCH_TERNARY')):
field = table.match_fields[0][0]
mp = '[val]'
if field.instance.name != 'standard_metadata':
maskwidth = 100
if field.instance.metadata:
maskwidth = 32
offset = self.field_offsets[str(field)]
mp += '&&&' + gen_bitmask(field.width,
offset,
maskwidth)
elif field.name != 'egress_spec' and field.name != 'ingress_port':
mp += '&&&' + hex((1 << field.width) - 1)
else: # egress_spec... rep'd by virt_egress_spec, which is 8 bits
mp += '&&&0xFF'
mparams.append(mp)
for action in table.actions:
aparams = [str(self.action_ID[action])]
aparams.append('[match ID]')
if 'hit' in table.next_:
next_table_trep = self.table_to_trep[table.next_['hit']]
aparams.append(str(next_table_trep.stage))
aparams.append(next_table_trep.table_type())
elif table.next_[action] == None:
aparams.append('0')
aparams.append('[DONE]')
else:
next_table_trep = self.table_to_trep[table.next_[action]]
aparams.append(str(next_table_trep.stage))
aparams.append(next_table_trep.table_type())
idx = 0
for call in action.call_sequence:
prim_type = primitive_ID[call[P4_CALL_PRIMITIVE].name]
prim_subtype = get_prim_subtype(call)
if not prim_subtype:
unsupported("Error: couldn't get the prim_subtype for " + prim_type)
aparams.append(prim_type)
aparams.append(prim_subtype)
idx += 1
if len(action.call_sequence) == 0:
aparams.append(primitive_ID['no_op'])
# subtype
aparams.append('0')
idx = 1
# zeros for remaining type / subtype parameters of init_program_state
for i in range(idx, self.numprimitives):
aparams.append('0')
aparams.append('0')
# all matches are ternary, requiring priority
# TODO: except matchless?
aparams.append('[PRIORITY]')
command_templates.append(HP4_Match_Command(source_table=table.name,
source_action=action.name,
command="table_add",
table=tname,
action=aname,
match_params=mparams,
action_params=aparams))
return command_templates
def gen_action_aparams(self, p4_call, call, field_offsets):
aparams = []
primtype = call[PRIM_TYPE]
subtype = call[PRIM_SUBTYPE]
p4_call_params = p4_call[P4_CALL_PARAMS]
if primtype == 'drop':
return aparams
if primtype == 'add_to_field':
if (a2f_prim_subtype_action[subtype] == 'a_add2f_extracted_const_u' or
a2f_prim_subtype_action[subtype] == 'a_subff_extracted_const_u'):
# aparams: leftshift, val
dst_offset = field_offsets[str(p4_call_params[0])]
leftshift = 800 - (dst_offset + p4_call_params[0].width)
if type(p4_call_params[1]) is int:
val = str(p4_call_params[1])
if a2f_prim_subtype_action[subtype] == 'a_subff_extracted_const_u':
val = str(p4_call_params[1]*-1)
else:
val = '[val]'
aparams.append(str(leftshift))
aparams.append(val)
if primtype == 'add_header' or primtype == 'remove_header':
hdr = p4_call_params[0]
offset = self.header_offsets[hdr.name]
sz = hdr.header_type.length
vb = 0
for key in self.vbits:
if hdr.name == key[1]:
vb = self.vbits[key]
break
if vb == 0:
print('Fail: didn\'t find vbits entry for ' + hdr.name)
exit()
mask = gen_addremove_header_bitmask(offset, MAX_BYTE)
if primtype == 'add_header':
aparams.append(str(sz))
aparams.append(mask)
aparams.append('0x%x' % vb)
else:
aparams.append(str(sz))
aparams.append(mask)
vbinv = ~vb & (2**VBITS_WIDTH - 1)
aparams.append('0x%x' % vbinv)
if primtype == 'modify_field_rng_uniform':
if type(p4_call_params[1]) in [int, long]:
val1 = str(p4_call_params[1])
else:
val1 = '[val]'
if type(p4_call_params[2]) in [int, long]:
val2 = str(p4_call_params[2])
else:
val2 = '[val]'
fo = field_offsets[str(p4_call_params[0])]
fw = p4_call_params[0].width
maskwidthbits = 800
leftshift = str(maskwidthbits - (fo + fw))
mask = gen_bitmask(p4_call_params[0].width,
field_offsets[str(p4_call_params[0])],
maskwidthbits / 8)
aparams.append(leftshift)
aparams.append(mask)
aparams.append(val1)
aparams.append(val2)
if primtype == 'bit_xor':
fo_intermediate = field_offsets[str(p4_call_params[1])]
fw_intermediate = p4_call_params[1].width
fo_final = field_offsets[str(p4_call_params[0])]
fw_final = p4_call_params[0].width
if bx_prim_subtype_action[subtype] == 'bit_xor_meta_meta_const':
unsupported("Not yet supported: bit_xor_meta_meta_const")
dest_maskwidthbits = 256
src_maskwidthbits = 256
elif bx_prim_subtype_action[subtype] == 'bit_xor_extracted_extracted_const':
dest_maskwidthbits = 800
src_maskwidthbits = 800
elif bx_prim_subtype_action[subtype] == 'bit_xor_meta_extracted_const':
dest_maskwidthbits = 256
src_maskwidthbits = 800
elshift = 0
ershift = 0
dst_revo = dest_maskwidthbits - (fo_final + fw_final)
src_revo = src_maskwidthbits - (fo_intermediate + fw_intermediate)
if src_revo > dst_revo:
ershift = src_revo - dst_revo
else:
elshift = dst_revo - src_revo
vlshift = str(src_maskwidthbits - (fo_intermediate + fw_intermediate))
dest_mask = gen_bitmask(fw_final, fo_final, dest_maskwidthbits / 8)
src_mask = gen_bitmask(fw_intermediate, fo_intermediate, src_maskwidthbits / 8)
src_mask = src_mask.split('[')[0]
if type(p4_call_params[2]) in [int, long]:
val = str(p4_call_params[2])
else:
val = '[val]'
aparams.append(str(elshift))
aparams.append(str(ershift))
aparams.append(vlshift)
aparams.append(dest_mask)
aparams.append(src_mask)
aparams.append(val)
if primtype == 'modify_field':
instance_name = p4_call_params[0].instance.name
dst_field_name = p4_call_params[0].name
if instance_name == 'intrinsic_metadata':
if dst_field_name == 'mcast_grp':
instance_name = 'standard_metadata'
dst_field_name = 'egress_spec'
else:
unsupported("Not supported: modify_field(" + instance_name + '.' \
+ dst_field_name + ", *)")
if type(p4_call_params[1]) is p4_hlir.hlir.p4_headers.p4_field:
if p4_call_params[1].width > p4_call_params[0].width:
dst = instance_name + '.' + dst_field_name
src = p4_call_params[1].instance.name + '.' + p4_call_params[1].name
print("WARNING: modify_field(%s, %s): %s width (%i) > %s width (%i)" \
% (dst, src, src, p4_call_params[1].width, dst, p4_call_params[0].width))
if mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_ingressport':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_packetlength':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_egressspec':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_egressport':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_egressinst':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_meta_stdmeta_insttype':
unsupported("Not yet supported: %s" % mf_prim_subtype_action[subtype])
elif mf_prim_subtype_action[subtype] == 'mod_stdmeta_egressspec_meta':
rshift = 256 - (field_offsets[str(p4_call_params[1])] + p4_call_params[1].width)
mask = 0
if p4_call_params[1].width < p4_call_params[0].width:
mask = hex(int(math.pow(2, p4_call_params[1].width)) - 1)
else:
mask = hex(int(math.pow(2, p4_call_params[0].width)) - 1)
aparams.append(str(rshift))
aparams.append(mask)
elif (mf_prim_subtype_action[subtype] == 'mod_meta_const' or
mf_prim_subtype_action[subtype] == 'mod_extracted_const'):
if type(p4_call_params[1]) in [int, long]:
val = str(p4_call_params[1])
else:
val = '[val]'
fo = field_offsets[str(p4_call_params[0])]
fw = p4_call_params[0].width
maskwidthbits = 800
if mf_prim_subtype_action[subtype] == 'mod_meta_const':
maskwidthbits = 256
leftshift = str(maskwidthbits - (fo + fw))
mask = gen_bitmask(p4_call_params[0].width,
field_offsets[str(p4_call_params[0])],
maskwidthbits / 8)
aparams.append(leftshift)
aparams.append(mask)
aparams.append(val)
elif (mf_prim_subtype_action[subtype] == 'mod_stdmeta_egressspec_const'):
if type(p4_call_params[1]) is int:
aparams.append(str(p4_call_params[1]))
else:
aparams.append('[val]')
elif (mf_prim_subtype_action[subtype] == 'mod_intmeta_mcast_grp_const'):
if type(p4_call_params[1]) is int:
unsupported("Not yet supported: mod_intmeta_mcast_grp_const w/ explicit const")
else:
aparams.append('[MCAST_GRP]')
elif mf_prim_subtype_action[subtype] == 'mod_extracted_extracted':
dst_offset = field_offsets[str(p4_call_params[0])]
src_offset = field_offsets[str(p4_call_params[1])]
lshift = 0
rshift = 0
dst_revo = 800 - (dst_offset + p4_call_params[0].width)
src_revo = 800 - (src_offset + p4_call_params[1].width)
if src_revo > dst_revo:
rshift = src_revo - dst_revo
else:
lshift = dst_revo - src_revo
aparams.append(str(lshift))
aparams.append(str(rshift))
aparams.append(gen_bitmask(p4_call_params[0].width, dst_offset, 100))
elif mf_prim_subtype_action[subtype] == 'mod_meta_extracted':
dst_offset = field_offsets[str(p4_call_params[0])]
src_offset = field_offsets[str(p4_call_params[1])]
lshift = 0
rshift = 0
dstmaskwidthbits = 256
srcmaskwidthbits = 800
dst_revo = dstmaskwidthbits - (dst_offset + p4_call_params[0].width)
src_revo = srcmaskwidthbits - (src_offset + p4_call_params[1].width)
if src_revo > dst_revo:
rshift = src_revo - dst_revo
else:
lshift = dst_revo - src_revo
dstmask = gen_bitmask(p4_call_params[0].width, dst_offset,
dstmaskwidthbits / 8)
srcmask = dstmask
if p4_call_params[1].width < p4_call_params[0].width:
srcmask = gen_bitmask(p4_call_params[1].width, dst_offset,
dstmaskwidthbits / 8)
aparams.append(str(lshift))
aparams.append(str(rshift))
aparams.append(dstmask)
aparams.append(srcmask)
elif mf_prim_subtype_action[subtype] == 'mod_extracted_meta':
dst_offset = field_offsets[str(p4_call_params[0])]
src_offset = field_offsets[str(p4_call_params[1])]
lshift = 0
rshift = 0
dstmaskwidthbits = 800
srcmaskwidthbits = 256
dst_revo = dstmaskwidthbits - (dst_offset + p4_call_params[0].width)
src_revo = srcmaskwidthbits - (src_offset + p4_call_params[1].width)
if src_revo > dst_revo:
rshift = src_revo - dst_revo
else:
lshift = dst_revo - src_revo
dstmask = gen_bitmask(p4_call_params[0].width, dst_offset,
dstmaskwidthbits / 8)
srcmask = dstmask
if p4_call_params[1].width < p4_call_params[0].width:
srcmask = gen_bitmask(p4_call_params[1].width, dst_offset,
dstmaskwidthbits / 8)
aparams.append(str(lshift))
aparams.append(str(rshift))
aparams.append(dstmask)
aparams.append(srcmask)
return aparams
def gen_action_entries(self, action_to_arep, action_ID, field_offsets):
commands = []
command_templates = []
for action in action_to_arep:
for stage in action_to_arep[action].stages:
table_name = action_to_arep[action].tables[stage]
for p4_call in action.call_sequence:
p4_call_params = p4_call[P4_CALL_PARAMS]
istemplate = False
idx = action.call_sequence.index(p4_call)
call = action_to_arep[action].call_sequence[idx]
primtype = call[PRIM_TYPE]
subtype = call[PRIM_SUBTYPE]
rank = idx + 1
tname = 't_' + primitive_tnames[primtype] + '_' + str(stage) + str(rank)
if primtype == 'modify_field':
aname = mf_prim_subtype_action[subtype]
elif primtype == 'add_to_field':
aname = a2f_prim_subtype_action[subtype]
elif primtype == 'bit_xor':
aname = bx_prim_subtype_action[subtype]
else:
aname = gen_prim_subtype_action[primtype]
mparams = ['[vdev ID]']
if primtype != 'drop':
if primtype in ['modify_field', 'add_to_field', 'bit_xor']:
mparams.append( subtype )
mparams.append(str(action_ID[action]))
# match_ID is so use 0&&&0.
match_ID_param = '0&&&0'
for param in p4_call_params:
if type(param) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
match_ID_param = '[match ID]&&&0x7FFFFF'
istemplate = True
break
mparams.append(match_ID_param)
aparams = self.gen_action_aparams(p4_call, call, field_offsets)
if istemplate == True:
aparams.append('0') # meta_primitive_state.match_ID mparam matters
idx = -1
if type(p4_call_params[-1]) is p4_hlir.hlir.p4_imperatives.p4_signature_ref:
idx = p4_call_params[-1].idx
command_templates.append(HP4_Primitive_Command(table_name,
action.name,
"table_add",
tname,
aname,
mparams,
aparams,
str(idx)))
else:
# meta_primitive_state.match_ID mparam does not matter
# only append priority if the table involves ternary matching
# e.g., drop tables do not
if len(mparams) > 0:
for param in mparams:
if '&&&' in param:
aparams.append(str(LOWEST_PRIORITY))
break
commands.append(HP4_Command("table_add",
tname,
aname,
mparams,
aparams))
return commands, command_templates
def build(self, h):
self.field_offsets = collect_meta(h.p4_header_instances)
self.action_ID = collect_actions(h.p4_actions.values())
# reset id counter
PC_State.newid = itertools.count().next
pre_pcs = PC_State(parse_state=h.p4_parse_states['start'])
launch_process_parse_tree_clr(pre_pcs, h)
self.header_offsets = collect_header_offsets(pre_pcs)
self.field_offsets.update(collect_field_offsets(self.header_offsets,
h.p4_header_instances))
consolidate_parse_tree_clr(pre_pcs, h)
ingress_pcs_list = collect_ingress_pcs(pre_pcs)
self.vbits = get_vbits(ingress_pcs_list)
first_table = h.p4_ingress_ptr.keys()[0]
ps_entries = gen_parse_select_entries(pre_pcs)
# post-process output of gen_parse_select_entries:
# parse_select_00_19, _20_29, and _30_39 use 320b field ext.first
ps_entries = process_parse_select_entries(ps_entries)
self.commands = gen_parse_control_entries(pre_pcs) \
+ ps_entries \
+ gen_pipeline_config_entries(pre_pcs, first_table,
ingress_pcs_list, self.vbits)
self.table_to_trep, self.action_to_arep = self.walk_ingress_pipeline(h.p4_tables)
self.command_templates = self.gen_tX_templates(h.p4_tables)
action_commands, action_templates = self.gen_action_entries(self.action_to_arep,
self.action_ID,
self.field_offsets)
self.commands += action_commands
self.command_templates += action_templates
self.commands += gen_tmiss_entries(h.p4_tables,
self.table_to_trep,
h.p4_control_flows['ingress'],
self.numprimitives)
self.commands += gen_t_checksum_entries(h.calculated_fields,
h.p4_field_list_calculations,
self.field_offsets,
self.vbits)
self.commands += gen_t_resize_pr_entries()
def compile_to_hp4(self, program_path, out_path, mt_out_path, numprimitives):
self.out_path = out_path
self.mt_out_path = mt_out_path
self.numprimitives = numprimitives
# reset state
self.action_ID = {}
self.action_to_arep = {}
self.command_templates = []
self.commands = []
self.field_offsets = {}
self.header_offsets = {}
self.table_to_trep = {}
self.vbits = {}
h = HLIR(program_path)
h.add_primitives(json.loads(pkg_resources.resource_string('p4c_bm', 'primitives.json')))
h.build()
do_support_checks(h)
self.build(h)
self.write_output()
return CodeRepresentation(out_path, mt_out_path)
def write_output(self):
out = open(self.out_path, 'w')
for command in self.commands:
out.write(str(command) + '\n')
out.close()
out = open(self.mt_out_path, 'w')
def getkey(command):
return (command.table, command.source_action, command.action_params)
sorted_ct = sorted(self.command_templates, key=getkey)
json.dump(sorted_ct, out, default=convert_to_builtin_type, indent=2)
out.close()
def do_support_checks(h):
# Not sure how this would happen in P4 but HLIR suggests the possibility:
if len(h.p4_ingress_ptr.keys()) > 1:
unsupported("Not supported: multiple entry points into the ingress pipeline")
# Tables with multiple match criteria:
for table in h.p4_tables.values():
if len(table.match_fields) > 1:
unsupported("Not supported: multiple field matches (table: %s)" % table.name)
def get_parse_select_table_code(first_byte):
for i in range(len(parse_select_table_boundaries) - 1):
lowerbound = parse_select_table_boundaries[i]
upperbound = parse_select_table_boundaries[i+1]
if first_byte >= lowerbound and first_byte < upperbound:
ret = '[PARSE_SELECT_'
ret += '%02d_%02d]' % (lowerbound, upperbound - 1)
return ret
debug()
raise Exception("Did not find parse_select table; first_byte: %d" % first_byte)
def get_pc_action(pcs):
select_first_byte = MAX_BYTE
for criteria in pcs.select_criteria:
select_first_byte = min(criteria[OFFSET] / 8, select_first_byte)
return get_parse_select_table_code(select_first_byte)
def gen_pc_entry_start(pcs):
""" Generate parse_control entries for pc 0/1:
We need an entry for pc 1 only if SEB is insufficient
to handle 'start', in which case the action for pc 0
must be extract_more
"""
start_pcs = pcs.children[0] # pc 0 always has one child: pc 1
mparams = ['[vdev ID]', str(pcs.pcs_id)]
aparams = []
act = 'set_next_action'
if start_pcs.p4_bits_extracted > pcs.hp4_bits_extracted:
act = 'extract_more'
aparams.append(str(int(ceil(start_pcs.p4_bits_extracted / 8.0))))
else:
if not start_pcs.children:
aparams.append('[PROCEED]')
else:
aparams.append(get_pc_action(start_pcs))
aparams.append(str(start_pcs.pcs_id))
cmd = HP4_Command(command='table_add',
table='tset_parse_control',
action=act,
match_params=mparams,
action_params=aparams)
return cmd
def get_p_ps_tables(pcs):
# sort
sorted_criteria, sorted_branches, default_branch = sort_return_select(pcs)
# revise branch_values, select_criteria per parse_select table boundaries
revised_criteria, revised_branches = revise_return_select(pcs,
sorted_criteria,
sorted_branches)
return get_parse_select_tables(revised_criteria)
def did_rewind(pcs):
if not pcs.children:
return False
first_criteria = sort_return_select(pcs)[0][0]
j = 0
while parse_select_table_boundaries[j+1] * 8 <= first_criteria[OFFSET]:
j += 1
p_ps_tables = get_p_ps_tables(pcs.pcs_path[-1])
if parse_select_table_boundaries[j] <= p_ps_tables[-1][L_BOUND]:
return True
return False
def gen_parse_control_entries(pcs, commands=None):
if commands is None:
commands = []
if pcs.pcs_id == 0:
cmd = gen_pc_entry_start(pcs)
commands.append(cmd)
if cmd.action == 'extract_more':
commands = gen_parse_control_entries(pcs.children[0], commands)
else:
for child in pcs.children[0].children:
commands = gen_parse_control_entries(child, commands)
else:
try:
test = pcs.pcs_path[-1].hp4_bits_extracted < pcs.hp4_bits_extracted
except IndexError as e:
print(e)
debug()
if (pcs.pcs_path[-1].hp4_bits_extracted < pcs.hp4_bits_extracted or
did_rewind(pcs)):
mparams = ['[vdev ID]', str(pcs.pcs_id)]
aparams = []
act = 'set_next_action'
if not pcs.children:
aparams.append('[PROCEED]')
else:
aparams.append(get_pc_action(pcs))
aparams.append(str(pcs.pcs_id))
cmd = HP4_Command(command='table_add',
table='tset_parse_control',
action=act,
match_params=mparams,
action_params=aparams)
commands.append(cmd)
for child in pcs.children:
commands = gen_parse_control_entries(child, commands)
return commands
def get_new_val(val, width, offset, new_width):
mask = 0
bitpos = offset
while bitpos < (offset + new_width):
mask += 2**(width - bitpos - 1)
bitpos += 1
newval = val & mask
newval = newval >> (width - (offset + new_width))
return newval
def sort_return_select(pcs):
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
return [], [], None
sorted_indices = sorted(range(len(pcs.select_criteria)),
key=pcs.select_criteria.__getitem__)
sorted_criteria = []
for i in sorted_indices:
sorted_criteria.append(pcs.select_criteria[i])
sorted_branches = []
default_branch = None
for branch in pcs.parse_state.return_statement[PS_RET_BRANCHES]:
if branch[BRANCH_VALUES][0][VAL_TYPE] == 'value':
sorted_values = []
for i in sorted_indices:
sorted_values.append(branch[BRANCH_VALUES][i][VAL_VALUE])
sorted_branches.append((sorted_values, branch[BRANCH_STATE]))
elif branch[BRANCH_VALUES][0][VAL_TYPE] == 'default':
default_branch = branch
return sorted_criteria, sorted_branches, default_branch
def revise_value(val, crit, j):
curr_offset = crit[OFFSET]
ret = []
while curr_offset < (crit[OFFSET] + crit[WIDTH]):
# setup
diff = parse_select_table_boundaries[j+1] * 8 - curr_offset
if diff > crit[WIDTH]:
diff = crit[OFFSET] + crit[WIDTH] - curr_offset
# rev_branch_values
ret.append(get_new_val(val,
crit[WIDTH],
curr_offset - crit[OFFSET],
diff))
# cleanup
curr_offset += diff
j += 1
return ret
def revise_criteria(crit, j):
ret = []
curr_offset = crit[OFFSET]
while curr_offset < (crit[OFFSET] + crit[WIDTH]):
# setup
diff = parse_select_table_boundaries[j+1] * 8 - curr_offset
if diff > crit[WIDTH]:
diff = crit[OFFSET] + crit[WIDTH] - curr_offset
# update
ret.append((curr_offset, diff))
# cleanup
curr_offset += diff
j += 1
return ret
def revise_return_select(pcs, sorted_criteria, sorted_branches):
revised_criteria = []
revised_branches = [[] for count in xrange(len(sorted_branches))]
i = 0
for crit in sorted_criteria:
j = 0
while parse_select_table_boundaries[j+1] * 8 <= crit[OFFSET]:
j += 1
# detect and handle broken boundary
if parse_select_table_boundaries[j+1] * 8 <= (crit[OFFSET] + crit[WIDTH]):
revised_criteria += revise_criteria(crit, j)
k = 0
for branch in sorted_branches:
val = branch[BRANCH_VALUES][i]
revised_branches[k] += revise_value(val, crit, j)
k += 1
else:
revised_criteria.append(crit)
k = 0
for branch in sorted_branches:
val = branch[BRANCH_VALUES][i]
revised_branches[k].append(val)
k += 1
i += 1
return revised_criteria, revised_branches
def do_split_criteria(crit):
try:
assert(crit[WIDTH] % 8 == 0)
except AssertionError as e:
print(e)
unsupported("select criteria (" + str(crit[OFFSET]) + ", " + str(crit[WIDTH]) \
+ ") not divisible by 8")
curr_offset = crit[OFFSET]
split_crit = []
while curr_offset < crit[OFFSET] + crit[WIDTH]:
split_crit.append((curr_offset, 8))
curr_offset += 8
return split_crit
def do_split_val(val, width):
ret = []
mask = 0
bitpos = 0
offset = 0
while offset < width:
mask = 0
while bitpos < offset + 8:
mask += 2**(width - bitpos - 1)
bitpos += 1
ret.append((val & mask) >> (width - bitpos))
offset += 8
return ret
def split_return_select(revised_criteria, revised_branches):
split_criteria = []
split_branches = [[] for count in xrange(len(revised_branches))]
i = 0
for crit in revised_criteria:
split_crits = do_split_criteria(crit)
for split_crit in split_crits:
split_criteria.append(split_crit)
j = 0
for branch in revised_branches:
val = branch[i]
split_vals = do_split_val(val, crit[WIDTH])
for split_val in split_vals:
split_branches[j].append(split_val)
j += 1
i += 1
return split_criteria, split_branches
def get_parse_select_table(crit):
j = 0
while parse_select_table_boundaries[j+1] * 8 <= crit[OFFSET]:
j += 1
lowerbound = parse_select_table_boundaries[j]
upperbound = parse_select_table_boundaries[j+1]
table_name = 'tset_parse_select_%02d_%02d' % (lowerbound, upperbound - 1)
return table_name, lowerbound * 8, upperbound * 8
def get_parse_select_tables(revised_criteria):
parse_select_tables = []
for crit in revised_criteria:
parse_select_table = get_parse_select_table(crit)
if parse_select_table not in parse_select_tables:
parse_select_tables.append(parse_select_table)
return parse_select_tables
def get_mparam_indices(table, crits):
mparam_indices = []
for crit in crits:
curr_offset = crit[OFFSET]
while curr_offset < crit[OFFSET] + crit[WIDTH]:
mparam_indices.append((curr_offset - table[L_BOUND]) / 8)
curr_offset += 8
return mparam_indices
def get_branch_mparams(branch_mparams, branch, mparam_indices):
for index in mparam_indices:
branch_mparams[index] = hex(branch.pop(0)) + '&&&0xFF'
return branch_mparams
def get_ps_action(tablename):
return '[' + tablename.split('tset_')[1].upper() + ']'
def get_branch_action(pcs, pst_count, parse_select_tables, branch):
action = ''
aparams = []
if branch[BRANCH_STATE] == 'ingress':
action = 'set_next_action'
aparams.append('[PROCEED]')
aparams.append(str(pcs.pcs_id))
return action, aparams
# set_next_action or extract_more
if pst_count != len(parse_select_tables) - 1:
action = 'set_next_action'
aparams.append(get_ps_action(parse_select_tables[pst_count + 1][T_NAME]))
aparams.append(str(pcs.pcs_id))
else:
next_pcs = [child for child in pcs.children \
if child.parse_state.name == branch[BRANCH_STATE]][0]
if next_pcs.hp4_bits_extracted > pcs.hp4_bits_extracted:
action = 'extract_more'
numbytes = int(ceil(next_pcs.hp4_bits_extracted / 8.0))
aparams.append(str(numbytes))
else:
if not next_pcs.children:
action = 'set_next_action'
aparams.append('[PROCEED]')
aparams.append(str(next_pcs.pcs_id))
return action, aparams
# another select statement in next pcs - need to rewind?
n_first_criteria = sort_return_select(next_pcs)[0][0]
j = 0
while parse_select_table_boundaries[j+1] * 8 <= n_first_criteria[OFFSET]:
j += 1
if parse_select_table_boundaries[j] <= parse_select_tables[pst_count][L_BOUND]:
# rewind
action = 'extract_more'
numbytes = int(ceil(next_pcs.hp4_bits_extracted / 8.0))
aparams.append(str(numbytes))
else:
action = 'set_next_action'
next_ps_table = get_parse_select_table(n_first_criteria)
aparams.append(get_ps_action(next_ps_table[T_NAME]))
aparams.append(str(next_pcs.pcs_id))
return action, aparams
def get_parse_select_entries(pcs,
parse_select_tables,
split_criteria,
split_branches_with_dests,
default_branch):
commands = []
# for each parse_select table:
# - pop all queue items that belong to the table
# - generate table entry
for pst_count, table in enumerate(parse_select_tables):
crits = []
while (split_criteria[0][OFFSET] >= table[L_BOUND] and
split_criteria[0][OFFSET] < table[U_BOUND]):
crits.append(split_criteria.pop(0))
if not split_criteria:
break
mparam_indices = get_mparam_indices(table, crits)
mparams = ['0&&&0' for count in xrange((table[U_BOUND] - table[L_BOUND]) / 8)]
for branch in split_branches_with_dests:
branch_mparams = ['[vdev ID]', str(pcs.pcs_id)]
branch_mparams += get_branch_mparams(list(mparams), branch[BRANCH_VALUES], mparam_indices)
# determine action and action_params
branch_action, branch_aparams = get_branch_action(pcs,
pst_count,
parse_select_tables,
branch)
# priority
branch_aparams.append(HIGHEST_PRIORITY)
commands.append(HP4_Command(command='table_add',
table=table[T_NAME],
action=branch_action,
match_params=branch_mparams,
action_params=branch_aparams))
# default branch
default_mparams = ['[vdev ID]', str(pcs.pcs_id)]
default_mparams += list(mparams)
default_action, default_aparams = get_branch_action(pcs,
pst_count,
parse_select_tables,
default_branch)
default_aparams.append(LOWEST_PRIORITY)
commands.append(HP4_Command(command='table_add',
table=table[T_NAME],
action=default_action,
match_params=default_mparams,
action_params=default_aparams))
return commands
def gen_parse_select_entries(pcs, commands=None):
if commands is None:
commands = []
# base cases
if pcs.pcs_id == 0:
return gen_parse_select_entries(pcs.children[0])
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
return commands
# sort
sorted_criteria, sorted_branches, default_branch = sort_return_select(pcs)
# revise branch_values, select_criteria per parse_select table boundaries
revised_criteria, revised_branches = revise_return_select(pcs,
sorted_criteria,
sorted_branches)
split_criteria, split_branches = split_return_select(revised_criteria,
revised_branches)
parse_select_tables = get_parse_select_tables(revised_criteria)
dests = [branch[BRANCH_STATE] for branch in sorted_branches]
commands += get_parse_select_entries(pcs,
parse_select_tables,
split_criteria,
zip(split_branches, dests),
default_branch)
for child in pcs.children:
commands = gen_parse_select_entries(child, commands)
return commands
def process_parse_select_entries(ps_entries):
ret = []
for command in ps_entries:
strbounds = command.table.split('tset_parse_select_')[1].split('_')
lower, upper = [int(x) for x in strbounds]
if lower > EXT_FIRST_WIDTH:
ret.append(command)
new_mp_val = ''
new_mp_mask = ''
started = False
for mp in command.match_params[EXT_START_INDEX:]:
val, mask = [int(x, 0) for x in mp.split('&&&')]
if started or mask != 0:
started = True
valstr, maskstr = ["0x{:02x}".format(x).split('0x')[1] for x in [val, mask]]
new_mp_val += valstr
new_mp_mask += maskstr
# fill out remaining bytes until we have all 40
for j in range(upper + 1, EXT_FIRST_WIDTH):
new_mp_val += '00'
new_mp_mask += '00'
new_mp = command.match_params[0:EXT_START_INDEX]
if new_mp_val == '':
assert(new_mp_mask == '')
new_mp.append('0&&&0')
else:
new_mp.append('0x' + new_mp_val + '&&&0x' + new_mp_mask)
ret.append(HP4_Command(command='table_add',
table=command.table,
action=command.action,
match_params=new_mp,
action_params=command.action_params))
return ret
def collect_ingress_pcs(pcs, ingress_pcs_list=None):
if ingress_pcs_list is None:
ingress_pcs_list = []
if pcs.pcs_id == 0:
return collect_ingress_pcs(pcs.children[0])
ps = pcs.parse_state
if ps.return_statement[PS_RET_TYPE] == 'select':
for branch in ps.return_statement[PS_RET_BRANCHES]:
if branch[BRANCH_STATE] == 'ingress':
ingress_pcs_list.append(pcs)
break
elif ps.return_statement[PS_RET_TYPE] == 'immediate':
if ps.return_statement[PS_RET_IMM_STATE] == 'ingress':
ingress_pcs_list.append(pcs)
else:
unsupported("Unhandled ps return_statement: " + ps.return_statement[PS_RET_TYPE])
for child in pcs.children:
ingress_pcs_list = collect_ingress_pcs(child, ingress_pcs_list)
return ingress_pcs_list
def get_headerset_and_maxdepth(ingress_pcs_list):
pcs_headers = {}
longest = 0
for pcs in ingress_pcs_list:
if len(pcs.header_offsets) > longest:
longest = len(pcs.header_offsets)
headerset = [set() for count in xrange(longest)]
for j in range(longest):
for pcs in ingress_pcs_list:
if len(pcs.header_offsets) > j:
pcs_headers = sorted(pcs.header_offsets, key=pcs.header_offsets.get)
headerset[j].add(pcs_headers[j])
return headerset, longest
def get_vbits(ingress_pcs_list):
headerset, maxdepth = get_headerset_and_maxdepth(ingress_pcs_list)
vbits = {}
lshift = VBITS_WIDTH
for j in range(maxdepth):
numbits = len(headerset[j])
lshift = lshift - numbits
i = 1
for header in headerset[j]:
vbits[(j, header)] = i << lshift
i = i << 1
return vbits
def get_hp4_type(header):
if header.name == 'standard_metadata':
return 'standard_metadata'
if header.metadata == True:
return 'metadata'
return 'extracted'
def get_aparam_table_ID(table):
if len(table.match_fields) == 0:
return '[MATCHLESS]'
match = table.match_fields[0] # supporting only one match field
match_type = match[MATCH_TYPE]
if match_type.value == 'P4_MATCH_EXACT':
field = match[MATCH_OBJECT]
header = field.instance
header_hp4_type = get_hp4_type(header)
if header_hp4_type == 'standard_metadata':
if field.name == 'ingress_port':
return '[STDMETA_INGRESS_PORT_EXACT]'
elif field.name == 'packet_length':
return '[STDMETA_PACKET_LENGTH_EXACT]'
elif field.name == 'instance_type':
return '[STDMETA_INSTANCE_TYPE_EXACT]'
elif field.name == 'egress_spec':
return '[STDMETA_EGRESS_SPEC_EXACT]'
else:
unsupported("ERROR: Unsupported: match on stdmetadata field %s" % field.name)
elif header_hp4_type == 'metadata':
return '[METADATA_EXACT]'
elif header_hp4_type == 'extracted':
return '[EXTRACTED_EXACT]'
elif match_type.value == 'P4_MATCH_VALID':
return '[EXTRACTED_VALID]'
else:
unsupported("Not yet supported: " + match_type.value)
def gen_pipeline_config_entries(pcs, first_table, ingress_pcs_list, vbits):
if pcs.pcs_id == 0:
return gen_pipeline_config_entries(pcs.children[0],
first_table,
ingress_pcs_list,
vbits)
commands = []
aparam_table_ID = get_aparam_table_ID(first_table)
for pcs in ingress_pcs_list:
val = 0
for i, header in enumerate( sorted(pcs.header_offsets,
key=pcs.header_offsets.get) ):
val = val | vbits[(i, header)]
valstr = '0x' + '%x' % val
commands.append(HP4_Command('table_add',
'tset_pipeline_config',
'a_set_pipeline',
['[vdev ID]', str(pcs.pcs_id)],
[aparam_table_ID, valstr, HIGHEST_PRIORITY]))
return commands
def process_extract_statements(pcs):
for call in pcs.parse_state.call_sequence:
if call[PS_CALL_TYPE] == p4_hlir.hlir.p4_parser.parse_call.extract:
pcs.header_offsets[call[PS_CALL_H_INST].name] = pcs.p4_bits_extracted
pcs.p4_bits_extracted += call[PS_CALL_H_INST].header_type.length * 8
if pcs.hp4_bits_extracted < pcs.p4_bits_extracted:
pcs.hp4_bits_extracted = pcs.p4_bits_extracted
else:
debug()
raise Exception('Unsupported parse call: %s' % call[PS_CALL_TYPE])
def process_parse_tree_clr(pcs, h):
#print(str(pcs.pcs_id) + ' [' + pcs.parse_state.name + ']')
process_extract_statements(pcs)
def add_next(next_parse_state):
next_pcs_pcs_path = list(pcs.pcs_path)
next_pcs_pcs_path.append(pcs)
next_pcs_ps_path = list(pcs.ps_path)
next_pcs_ps_path.append(pcs.parse_state)
next_pcs = PC_State(hp4_bits_extracted = pcs.hp4_bits_extracted,
p4_bits_extracted = pcs.p4_bits_extracted,
ps_path = next_pcs_ps_path,
pcs_path = next_pcs_pcs_path,
parse_state = next_parse_state)
pcs.children.append(next_pcs)
return next_pcs
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'select':
for criteria in pcs.parse_state.return_statement[PS_RET_CRITERIA]:
if isinstance(criteria, current_call):
curr_reqmt = criteria[OFFSET] + criteria[WIDTH]
if pcs.p4_bits_extracted + curr_reqmt > pcs.hp4_bits_extracted:
pcs.hp4_bits_extracted += curr_reqmt
hp4_criteria_offset = criteria[OFFSET] + pcs.p4_bits_extracted
pcs.select_criteria.append((hp4_criteria_offset, criteria[WIDTH]))
else:
hdr_name, fld_name = criteria.split('.')
hp4_criteria_offset = h.p4_fields[criteria].offset + pcs.header_offsets[hdr_name]
pcs.select_criteria.append((hp4_criteria_offset, h.p4_fields[criteria].width))
next_parse_states = []
for branch in pcs.parse_state.return_statement[PS_RET_BRANCHES]:
# e.g., ([('value', 1108152157446)], 'parse_A')
values = []
for value in branch[BRANCH_VALUES]:
if value[VAL_TYPE] != 'value' and value[VAL_TYPE] != 'default':
debug()
raise Exception('Unsupported branch value type: %s' % value[VAL_TYPE])
if value[VAL_TYPE] == 'default':
values.append('default')
else:
values.append(value[VAL_VALUE])
pcs.select_values.append( values )
if branch[BRANCH_STATE] != 'ingress':
next_parse_state = h.p4_parse_states[branch[BRANCH_STATE]]
if next_parse_state not in next_parse_states:
next_parse_states.append(next_parse_state)
next_pcs = add_next(next_parse_state)
process_parse_tree_clr(next_pcs, h)
elif pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
next_parse_state_name = pcs.parse_state.return_statement[PS_RET_IMM_STATE]
if next_parse_state_name != 'ingress':
next_parse_state = h.p4_parse_states[next_parse_state_name]
next_pcs = add_next(next_parse_state)
process_parse_tree_clr(next_pcs, h)
else:
debug()
raise Exception('Unsupported return type: %s' % \
pcs.parse_state.return_statement[PS_RET_TYPE])
def consolidate_parse_tree_clr(pcs, h):
if pcs.parse_state.return_statement[PS_RET_TYPE] == 'immediate':
next_parse_state_name = pcs.parse_state.return_statement[PS_RET_IMM_STATE]
if next_parse_state_name != 'ingress':
next_pc_state = pcs.children[0]
next_parse_state = next_pc_state.parse_state
old_ps_name = pcs.parse_state.name
new_ps_name = pcs.parse_state.name + '-' + next_parse_state.name
new_ps_call_sequence = list(pcs.parse_state.call_sequence)
new_ps_call_sequence += next_parse_state.call_sequence
new_ps = p4_parse_state(h,
new_ps_name,
call_sequence=new_ps_call_sequence,
return_statement=next_parse_state.return_statement)
hp4_bits_diff = next_pc_state.hp4_bits_extracted - pcs.hp4_bits_extracted
pcs.hp4_bits_extracted += hp4_bits_diff
p4_bits_diff = next_pc_state.p4_bits_extracted - pcs.p4_bits_extracted
pcs.p4_bits_extracted += p4_bits_diff
pcs.parse_state = new_ps
pcs.children = list(next_pc_state.children)
prev_ps = pcs.ps_path[-1]
for i, branch in enumerate(prev_ps.return_statement[PS_RET_BRANCHES]):
if branch[BRANCH_STATE] == old_ps_name:
prev_ps.return_statement[PS_RET_BRANCHES][i] = (branch[BRANCH_VALUES], new_ps_name)
for child in pcs.children:
consolidate_parse_tree_clr(child, h)
def collect_header_offsets(pcs, header_offsets=None):
if header_offsets is None:
header_offsets = {}
for header in pcs.header_offsets:
if header in header_offsets:
if pcs.header_offsets[header] != header_offsets[header]:
unsupported("Unsupported: %s has multiple potential offsets; %db and %db" \
% (header, pcs.header_offsets[header], header_offsets[header]))
header_offsets.update(pcs.header_offsets)
for child in pcs.children:
header_offsets = collect_header_offsets(child, header_offsets)
return header_offsets
def collect_field_offsets(header_offsets, header_instances):
field_offsets = {}
for header in header_offsets:
try:
hinst = header_instances[header]
except KeyError as e:
print(e)
debug()
for field in hinst.fields:
field_offsets[header + '.' + field.name] = field.offset + header_offsets[header]
return field_offsets
def get_table_from_cs(control_statement):
if type(control_statement) is p4_table:
return control_statement
elif type(control_statement) is tuple:
return control_statement[0]
else:
unsupported("Error (get_table_from_cs): unsupported control statement type: " \
+ str(type(control_statement)))
def walk_control_block(control_block, table):
for control_statement in control_block:
cs_idx = control_block.index(control_statement)
if type(control_statement) is p4_table:
# apply_table_call
if control_statement == table:
if cs_idx == len(control_block) - 1:
return True, None
return True, get_table_from_cs(control_block[cs_idx + 1])
elif type(control_statement) is tuple:
# apply_and_select_block
if control_statement[0] == table:
if cs_idx == len(control_block) - 1:
return True, None
return True, get_table_from_cs(control_block[cs_idx + 1])
else:
for case in control_statement[1]:
found, next_table = walk_control_block(case[1], table)
if found:
if next_table != None:
return True, next_table
elif cs_idx < len(control_block) - 1:
return True, get_table_from_cs(control_block[cs_idx + 1])
else:
return True, None
else:
unsupported("Error: unsupported call_sequence entry type: " + str(type(entry)))
return False, None
def gen_tmiss_entries(tables, table_to_trep, ingress, numprimitives):
commands = []
for table_name in tables:
table = tables[table_name]
trep = table_to_trep[table]
tname = trep.name
stage = trep.stage # int
aname = 'init_program_state'
mparams = ['[vdev ID]']
if 'matchless' not in tname:
mparams.append('0&&&0')
# identify next_table so we can look up stage for aparams[0]
# aparams[0]: 'next_stage' parameter in finish_action (stages.p4/p4t)
if 'miss' in table.next_:
next_table = table.next_['miss']
else:
found, next_table = walk_control_block(ingress.call_sequence, table)
if next_table == None:
next_stage = '0'
next_table_type = '0'
else:
next_stage = str(table_to_trep[next_table].stage)
next_table_type = table_to_trep[next_table].table_type()
aparams = ['0', # action_ID
'0', # match_ID
next_stage,
next_table_type]
# zeros for remaining type / subtype parameters of init_program_state
for i in range(numprimitives):
aparams.append('0')
aparams.append('0')
if 'matchless' not in tname:
aparams.append(str(LOWEST_PRIORITY))
commands.append(HP4_Command(command="table_add",
table=tname,
action=aname,
match_params=mparams,
action_params=aparams))
return commands
# gen_t_checksum_entries(h.calculated_fields)
def gen_t_checksum_entries(calculated_fields, p4_field_list_calculations,
field_offsets, vbits):
""" detect and handle ipv4 checksum """
commands = []
cf_none_types = 0
cf_valid_types = 0
checksum_detected = False
for cf in calculated_fields:
for statement in cf[1]:
if statement[0] == 'update':
flc = p4_field_list_calculations[statement[1]]
for fl in flc.input:
count = 0
max_field_offset = 0
max_field = None
for field in fl.fields:
count += field.width
if field.offset > max_field_offset:
max_field_offset = field.offset
max_field = field
if count == 144:
if flc.algorithm == 'csum16' and flc.output_width == 16:
# Calculate rshift_base parameter
# This is the amount to R-shift extracted.data such
# that the ipv4 header is right aligned
key = max_field.instance.name + '.' + max_field.name
# TODO: remove assumption that extracted.data is 800 bits
aparam = str(800 - field_offsets[key] - max_field.width)
if statement[2] == None:
cf_none_types += 1
if (cf_none_types + cf_valid_types) > 1:
print("ERROR: Unsupported: multiple checksums")
exit()
else:
checksum_detected = True
commands.append(HP4_Command("table_add",
"t_checksum",
"a_ipv4_csum16",
['[vdev ID]', '0&&&0'],
[aparam, str(LOWEST_PRIORITY)]))
else:
if statement[2].op == 'valid':
cf_valid_types += 1
if (cf_none_types + cf_valid_types) > 1:
print("ERROR: Unsupported: multiple checksums")
exit()
else:
# TODO: reduce entries by isolating relevant bit
for key in vbits.keys():
if statement[2].right == key[1]:
mparams = ['[vdev ID]']
val = format(vbits[key], '
mparams.append(val + '&&&' + val)
checksum_detected = True
commands.append(HP4_Command("table_add",
"t_checksum",
"a_ipv4_csum16",
mparams,
[aparam, '0']))
else:
unsupported("ERROR: Unsupported if_cond op " \
+ "in calculated field: %s" % statement[2].op)
else:
unsupported("ERROR: Unsupported checksum (%s, %i)" \
% (flc.algorithm, flc.output_width))
else:
unsupported("ERROR: Unsupported checksum - field list of %i bits" \
% count)
else:
unsupported("WARNING: Unsupported update_verify_spec " \
+ "for calculated field: %s" % statement[0])
if checksum_detected == False:
commands.append(HP4_Command("table_add",
"t_checksum",
"_no_op",
['[vdev ID]', '0&&&0'],
[str(LOWEST_PRIORITY)]))
return commands
def gen_t_resize_pr_entries():
commands = []
# TODO: full implementation as the following primitives get support:
# - add_header | remove_header | truncate | push | pop | copy_header*
# * maybe (due to possibility of making previously invalid header
# valid)
# default entry handled by controller
return commands
def print_processed_parse_tree(pcs, level=0):
for line in str(pcs).split('\n'):
print '\t' * level + line
for child in pcs.children:
print_processed_parse_tree(child, level+1)
def print_commands(commands):
for command in commands:
print(command)
def launch_process_parse_tree_clr(pcs, h):
start_pcs = PC_State(pcs_path=[pcs],
parse_state=pcs.parse_state)
pcs.children.append(start_pcs)
process_parse_tree_clr(start_pcs, h)
def parse_args(args):
parser = argparse.ArgumentParser(description='Recursive Parse Tree Processing')
parser.add_argument('input', help='path for input .p4', type=str)
parser.add_argument('-o', '--output', help='path for output .hp4t file',
type=str, action="store", default='output.hp4t')
parser.add_argument('-m', '--mt_output', help='path for match template output',
type=str, action="store", default='output.hp4mt')
parser.add_argument('--numprimitives', help='maximum number of primitives \
for which HyPer4 is configured',
type=int, action="store", default=9)
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
hp4c = P4_to_HP4()
hp4c.compile_to_hp4(args.input, args.output, args.mt_output, args.numprimitives)
if __name__ == '__main__':
main()
| false | true |
f72a759aa3a375cf215b4b215a8f91d36710c4d8 | 316 | py | Python | checkv/__init__.py | wolfQK/CheckV-fork | 3519bf4bf07fb73806225946a5629ddc542b4252 | [
"BSD-3-Clause-LBNL"
] | null | null | null | checkv/__init__.py | wolfQK/CheckV-fork | 3519bf4bf07fb73806225946a5629ddc542b4252 | [
"BSD-3-Clause-LBNL"
] | null | null | null | checkv/__init__.py | wolfQK/CheckV-fork | 3519bf4bf07fb73806225946a5629ddc542b4252 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from checkv.modules import (
download_database,
update_database,
contamination,
completeness,
complete_genomes,
quality_summary,
end_to_end,
)
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
__version__ = metadata.version("checkv")
| 18.588235 | 41 | 0.740506 | from checkv.modules import (
download_database,
update_database,
contamination,
completeness,
complete_genomes,
quality_summary,
end_to_end,
)
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
__version__ = metadata.version("checkv")
| true | true |
f72a75e5797e4be5ac376e8272c48fbbb2970c1a | 126 | py | Python | incomevis/utils/__init__.py | hieumtran/incomevis | 90adca62803f767d7c96fc879e662d934dcf9123 | [
"MIT"
] | null | null | null | incomevis/utils/__init__.py | hieumtran/incomevis | 90adca62803f767d7c96fc879e662d934dcf9123 | [
"MIT"
] | null | null | null | incomevis/utils/__init__.py | hieumtran/incomevis | 90adca62803f767d7c96fc879e662d934dcf9123 | [
"MIT"
] | null | null | null | from .getColor import *
from .getDecile import *
from .getPercentile import *
from .getStateName import *
from .path import *
| 21 | 28 | 0.761905 | from .getColor import *
from .getDecile import *
from .getPercentile import *
from .getStateName import *
from .path import *
| true | true |
f72a7713a14d8330970b9d00cd1d55249c858748 | 2,944 | py | Python | defusekit/mods/complicatedwires.py | Floozutter/defuse-kit | ef43450f93f71df6a563783da93fdefd45e0c82b | [
"Unlicense"
] | 1 | 2020-01-15T03:57:25.000Z | 2020-01-15T03:57:25.000Z | defusekit/mods/complicatedwires.py | Floozutter/defuse-kit | ef43450f93f71df6a563783da93fdefd45e0c82b | [
"Unlicense"
] | 3 | 2019-11-26T00:23:21.000Z | 2019-11-29T19:07:56.000Z | defusekit/mods/complicatedwires.py | Floozutter/defuse-kit | ef43450f93f71df6a563783da93fdefd45e0c82b | [
"Unlicense"
] | null | null | null | NAME = "complicated-wires"
import curses
from defusekit import wards
from defusekit.kittypes import Window
def get_instruction(red: bool, blue: bool, star: bool, led: bool) -> str:
binstr = "".join(["1" if b else "0" for b in (red, blue, star, led)])
wirestate = int(binstr, 2)
C = "Cut the wire"
D = "Do not cut the wire"
S = "Cut the wire if serial number's last digit is even"
P = "Cut the wire if the bomb has a parallel port"
B = "Cut the wire if the bomb has 2 or more batteries"
INSTRUCTIONS = {
0b0000 : C,
0b0001 : D,
0b0010 : C,
0b0011 : B,
0b0100 : S,
0b0101 : P,
0b0110 : D,
0b0111 : P,
0b1000 : S,
0b1001 : B,
0b1010 : C,
0b1011 : B,
0b1100 : S,
0b1101 : S,
0b1110 : P,
0b1111 : D
}
return INSTRUCTIONS[wirestate]
def main(scr: Window):
wards.setup(scr)
wards.print_modulename(scr, NAME)
wards.print_controls(scr, (
("ESC", "Quit the module."),
("Q/W/E/R", "Toggle wire options."),
("TAB", "Reset all wire options to NO.")
))
scr.addstr("Wire Settings:", curses.color_pair(0))
setting_keys = ("Q", "W", "E", "R")
setting_labels = (
"Has Red coloring",
"Has Blue coloring",
"Has Star symbol",
"Has LED lit"
)
setting_states = [False, False, False, False]
setting_yxs = []
for i in range(4):
scr.addstr("\n")
scr.addstr((setting_keys[i]+" - ").rjust(6))
scr.addstr(setting_labels[i].ljust(18), curses.color_pair(0))
scr.addstr(": ", curses.color_pair(0))
setting_yxs.append(scr.getyx())
scr.addstr("\n\n")
scr.addstr("Instruction: ")
instruction_yx = scr.getyx()
while True:
# Show setting states
for i in range(4):
scr.move(setting_yxs[i][0], setting_yxs[i][1])
scr.clrtoeol()
if setting_states[i]:
scr.addstr("YES", curses.color_pair(2))
else:
scr.addstr("NO", curses.color_pair(1))
# Show instruction
scr.move(instruction_yx[0], instruction_yx[1])
scr.clrtoeol()
scr.addstr(get_instruction(
setting_states[0],
setting_states[1],
setting_states[2],
setting_states[3]
))
# Get input
c = scr.getch()
if c == 27: # Esc
return
elif c == 9: # Tab
setting_states = [False, False, False, False]
elif c in (81, 113): # Q
setting_states[0] = not setting_states[0]
elif c in (87, 119): # W
setting_states[1] = not setting_states[1]
elif c in (69, 101): # E
setting_states[2] = not setting_states[2]
elif c in (82, 114): # R
setting_states[3] = not setting_states[3]
| 29.737374 | 73 | 0.53159 | NAME = "complicated-wires"
import curses
from defusekit import wards
from defusekit.kittypes import Window
def get_instruction(red: bool, blue: bool, star: bool, led: bool) -> str:
binstr = "".join(["1" if b else "0" for b in (red, blue, star, led)])
wirestate = int(binstr, 2)
C = "Cut the wire"
D = "Do not cut the wire"
S = "Cut the wire if serial number's last digit is even"
P = "Cut the wire if the bomb has a parallel port"
B = "Cut the wire if the bomb has 2 or more batteries"
INSTRUCTIONS = {
0b0000 : C,
0b0001 : D,
0b0010 : C,
0b0011 : B,
0b0100 : S,
0b0101 : P,
0b0110 : D,
0b0111 : P,
0b1000 : S,
0b1001 : B,
0b1010 : C,
0b1011 : B,
0b1100 : S,
0b1101 : S,
0b1110 : P,
0b1111 : D
}
return INSTRUCTIONS[wirestate]
def main(scr: Window):
wards.setup(scr)
wards.print_modulename(scr, NAME)
wards.print_controls(scr, (
("ESC", "Quit the module."),
("Q/W/E/R", "Toggle wire options."),
("TAB", "Reset all wire options to NO.")
))
scr.addstr("Wire Settings:", curses.color_pair(0))
setting_keys = ("Q", "W", "E", "R")
setting_labels = (
"Has Red coloring",
"Has Blue coloring",
"Has Star symbol",
"Has LED lit"
)
setting_states = [False, False, False, False]
setting_yxs = []
for i in range(4):
scr.addstr("\n")
scr.addstr((setting_keys[i]+" - ").rjust(6))
scr.addstr(setting_labels[i].ljust(18), curses.color_pair(0))
scr.addstr(": ", curses.color_pair(0))
setting_yxs.append(scr.getyx())
scr.addstr("\n\n")
scr.addstr("Instruction: ")
instruction_yx = scr.getyx()
while True:
# Show setting states
for i in range(4):
scr.move(setting_yxs[i][0], setting_yxs[i][1])
scr.clrtoeol()
if setting_states[i]:
scr.addstr("YES", curses.color_pair(2))
else:
scr.addstr("NO", curses.color_pair(1))
# Show instruction
scr.move(instruction_yx[0], instruction_yx[1])
scr.clrtoeol()
scr.addstr(get_instruction(
setting_states[0],
setting_states[1],
setting_states[2],
setting_states[3]
))
# Get input
c = scr.getch()
if c == 27: # Esc
return
elif c == 9: # Tab
setting_states = [False, False, False, False]
elif c in (81, 113): # Q
setting_states[0] = not setting_states[0]
elif c in (87, 119): # W
setting_states[1] = not setting_states[1]
elif c in (69, 101): # E
setting_states[2] = not setting_states[2]
elif c in (82, 114): # R
setting_states[3] = not setting_states[3]
| true | true |
f72a784ed40b4a7d44700220e01f993eed20a5e0 | 1,277 | py | Python | Problem 4.py | SuhelMehta9/Project-Euler | dbb06103ea702a137bce4e8644aa07d8913b8bd6 | [
"Unlicense"
] | null | null | null | Problem 4.py | SuhelMehta9/Project-Euler | dbb06103ea702a137bce4e8644aa07d8913b8bd6 | [
"Unlicense"
] | null | null | null | Problem 4.py | SuhelMehta9/Project-Euler | dbb06103ea702a137bce4e8644aa07d8913b8bd6 | [
"Unlicense"
] | null | null | null | # Problem 4: Largest palindrome product
# A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
num = 999 # Numbers start from 999
num2 = 999
palindrome = [] # Store palindrome numbers
while num != 99: # We only require 3 digit numbers so if a 2 digit number appears then loop will stop
while num2 != 99: # Same with this loop also
result = num*num2 # Multiplying each number
orignal = result # It will be use to check palindrome number
result = str(result) # It is easy to convert string to list
result= list(result)
result.reverse() # Reverse the number
result = ''.join(result) # Convert list to string
result = int(result) # Convert list to integer
if result == orignal: # if number is palindrome
palindrome.append(orignal) # Store the number to list(palindrome)
num2 = num2-1 #decrement the number
num2 = 999 # When first number is decrreased by one then the value of other number must be 999 to get the desired result
num = num - 1 # decrease the number
print(max(palindrome)) # Print the largest palindrome number
| 55.521739 | 135 | 0.696163 |
num = 999
num2 = 999
palindrome = []
while num != 99:
while num2 != 99:
result = num*num2
orignal = result
result = str(result)
result= list(result)
result.reverse()
result = ''.join(result)
result = int(result)
if result == orignal:
palindrome.append(orignal)
num2 = num2-1
num2 = 999
num = num - 1
print(max(palindrome))
| true | true |
f72a78f9746119f4da8555bf50e0ea8d48a5abbe | 13,660 | py | Python | hiddenlayer/graph.py | pyjhzwh/hiddenlayer | 59f84299986d9aed7e0534147a87f7dd491ab08d | [
"MIT"
] | null | null | null | hiddenlayer/graph.py | pyjhzwh/hiddenlayer | 59f84299986d9aed7e0534147a87f7dd491ab08d | [
"MIT"
] | null | null | null | hiddenlayer/graph.py | pyjhzwh/hiddenlayer | 59f84299986d9aed7e0534147a87f7dd491ab08d | [
"MIT"
] | null | null | null | """
HiddenLayer
Implementation of the Graph class. A framework independent directed graph to
represent a neural network.
Written by Waleed Abdulla. Additions by Phil Ferriere.
Licensed under the MIT License
"""
from __future__ import absolute_import, division, print_function
import os
import re
from random import getrandbits
import inspect
import numpy as np
THEMES = {
"basic": {
"background_color": "#FFFFFF",
"fill_color": "#E8E8E8",
"outline_color": "#000000",
"font_color": "#000000",
"font_name": "Times",
"font_size": "10",
"margin": "0,0",
"padding": "1.0,0.5",
},
"blue": {
"background_color": "#FFFFFF",
"fill_color": "#BCD6FC",
"outline_color": "#7C96BC",
"font_color": "#202020",
"font_name": "Verdana",
"font_size": "10",
"margin": "0,0",
"padding": "1.0,0.5",
},
}
###########################################################################
# Utility Functions
###########################################################################
def detect_framework(value):
# Get all base classes
classes = inspect.getmro(value.__class__)
for c in classes:
if c.__module__.startswith("torch"):
return "torch"
elif c.__module__.startswith("tensorflow"):
return "tensorflow"
###########################################################################
# Node
###########################################################################
class Node():
"""Represents a framework-agnostic neural network layer in a directed graph."""
def __init__(self, uid, name, op, output_shape=None, params=None):
"""
uid: unique ID for the layer that doesn't repeat in the computation graph.
name: Name to display
op: Framework-agnostic operation name.
"""
self.id = uid
self.name = name # TODO: clarify the use of op vs name vs title
self.op = op
self.repeat = 1
if output_shape:
assert isinstance(output_shape, (tuple, list)),\
"output_shape must be a tuple or list but received {}".format(type(output_shape))
self.output_shape = output_shape
self.params = params if params else {}
self._caption = ""
@property
def title(self):
# Default
title = self.name or self.op
if "kernel_shape" in self.params:
# Kernel
kernel = self.params["kernel_shape"]
title += "x".join(map(str, kernel))
if "stride" in self.params:
stride = self.params["stride"]
if np.unique(stride).size == 1:
stride = stride[0]
if stride != 1:
title += "/s{}".format(str(stride))
# # Transposed
# if node.transposed:
# name = "Transposed" + name
return title
@property
def caption(self):
if self._caption:
return self._caption
caption = ""
# Stride
# if "stride" in self.params:
# stride = self.params["stride"]
# if np.unique(stride).size == 1:
# stride = stride[0]
# if stride != 1:
# caption += "/{}".format(str(stride))
return caption
def __repr__(self):
args = (self.op, self.name, self.id, self.title, self.repeat)
f = "<Node: op: {}, name: {}, id: {}, title: {}, repeat: {}"
if self.output_shape:
args += (str(self.output_shape),)
f += ", shape: {:}"
if self.params:
args += (str(self.params),)
f += ", params: {:}"
f += ">"
return f.format(*args)
###########################################################################
# Graph
###########################################################################
def build_graph(model=None, args=None, input_names=None,
transforms="default", framework_transforms="default"):
# Initialize an empty graph
g = Graph()
# Detect framwork
framework = detect_framework(model)
if framework == "torch":
from .pytorch_builder import import_graph, FRAMEWORK_TRANSFORMS
assert args is not None, "Argument args must be provided for Pytorch models."
import_graph(g, model, args)
elif framework == "tensorflow":
from .tf_builder import import_graph, FRAMEWORK_TRANSFORMS
import_graph(g, model)
else:
raise ValueError("`model` input param must be a PyTorch, TensorFlow, or Keras-with-TensorFlow-backend model.")
# Apply Transforms
if framework_transforms:
if framework_transforms == "default":
framework_transforms = FRAMEWORK_TRANSFORMS
for t in framework_transforms:
g = t.apply(g)
if transforms:
if transforms == "default":
from .transforms import SIMPLICITY_TRANSFORMS
transforms = SIMPLICITY_TRANSFORMS
for t in transforms:
g = t.apply(g)
return g
class Graph():
"""Tracks nodes and edges of a directed graph and supports basic operations on them."""
def __init__(self, model=None, args=None, input_names=None,
transforms="default", framework_transforms="default",
meaningful_ids=False):
self.nodes = {}
self.edges = []
self.meaningful_ids = meaningful_ids # TODO
self.theme = THEMES["basic"]
if model:
# Detect framwork
framework = detect_framework(model)
if framework == "torch":
from .pytorch_builder import import_graph, FRAMEWORK_TRANSFORMS
assert args is not None, "Argument args must be provided for Pytorch models."
import_graph(self, model, args)
elif framework == "tensorflow":
from .tf_builder import import_graph, FRAMEWORK_TRANSFORMS
import_graph(self, model)
# Apply Transforms
if framework_transforms:
if framework_transforms == "default":
framework_transforms = FRAMEWORK_TRANSFORMS
for t in framework_transforms:
t.apply(self)
if transforms:
if transforms == "default":
from .transforms import SIMPLICITY_TRANSFORMS
transforms = SIMPLICITY_TRANSFORMS
for t in transforms:
t.apply(self)
def id(self, node):
"""Returns a unique node identifier. If the node has an id
attribute (preferred), it's used. Otherwise, the hash() is returned."""
return node.id if hasattr(node, "id") else hash(node)
def add_node(self, node):
id = self.id(node)
# assert(id not in self.nodes)
self.nodes[id] = node
def add_edge(self, node1, node2, label=None):
# If the edge is already present, don't add it again.
# TODO: If an edge exists with a different label, still don't add it again.
edge = (self.id(node1), self.id(node2), label)
if edge not in self.edges:
self.edges.append(edge)
def add_edge_by_id(self, vid1, vid2, label=None):
self.edges.append((vid1, vid2, label))
def outgoing(self, node):
"""Returns nodes connecting out of the given node (or list of nodes)."""
nodes = node if isinstance(node, list) else [node]
node_ids = [self.id(n) for n in nodes]
# Find edges outgoing from this group but not incoming to it
outgoing = [self[e[1]] for e in self.edges
if e[0] in node_ids and e[1] not in node_ids]
return outgoing
def incoming(self, node):
"""Returns nodes connecting to the given node (or list of nodes)."""
nodes = node if isinstance(node, list) else [node]
node_ids = [self.id(n) for n in nodes]
# Find edges incoming to this group but not outgoing from it
incoming = [self[e[0]] for e in self.edges
if e[1] in node_ids and e[0] not in node_ids]
return incoming
def siblings(self, node):
"""Returns all nodes that share the same parent (incoming node) with
the given node, including the node itself.
"""
incoming = self.incoming(node)
# TODO: Not handling the case of multiple incoming nodes yet
if len(incoming) == 1:
incoming = incoming[0]
siblings = self.outgoing(incoming)
return siblings
else:
return [node]
def __getitem__(self, key):
if isinstance(key, list):
return [self.nodes.get(k) for k in key]
else:
return self.nodes.get(key)
def remove(self, nodes):
"""Remove a node and its edges."""
nodes = nodes if isinstance(nodes, list) else [nodes]
for node in nodes:
k = self.id(node)
self.edges = list(filter(lambda e: e[0] != k and e[1] != k, self.edges))
del self.nodes[k]
def replace(self, nodes, node):
"""Replace nodes with node. Edges incoming to nodes[0] are connected to
the new node, and nodes outgoing from nodes[-1] become outgoing from
the new node."""
nodes = nodes if isinstance(nodes, list) else [nodes]
# Is the new node part of the replace nodes (i.e. want to collapse
# a group of nodes into one of them)?
collapse = self.id(node) in self.nodes
# Add new node and edges
if not collapse:
self.add_node(node)
for in_node in self.incoming(nodes):
# TODO: check specifically for output_shape is not generic. Consider refactoring.
self.add_edge(in_node, node, in_node.output_shape if hasattr(in_node, "output_shape") else None)
for out_node in self.outgoing(nodes):
self.add_edge(node, out_node, node.output_shape if hasattr(node, "output_shape") else None)
# Remove the old nodes
for n in nodes:
if collapse and n == node:
continue
self.remove(n)
def search(self, pattern):
"""Searches the graph for a sub-graph that matches the given pattern
and returns the first match it finds.
"""
for node in self.nodes.values():
match, following = pattern.match(self, node)
if match:
return match, following
return [], None
def sequence_id(self, sequence):
"""Make up an ID for a sequence (list) of nodes.
Note: `getrandbits()` is very uninformative as a "readable" ID. Here, we build a name
such that when the mouse hovers over the drawn node in Jupyter, one can figure out
which original nodes make up the sequence. This is actually quite useful.
"""
if self.meaningful_ids:
# TODO: This might fail if the ID becomes too long
return "><".join([node.id for node in sequence])
else:
return getrandbits(64)
def build_dot(self):
"""Generate a GraphViz Dot graph.
Returns a GraphViz Digraph object.
"""
from graphviz import Digraph
# Build GraphViz Digraph
dot = Digraph()
dot.attr("graph",
bgcolor=self.theme["background_color"],
color=self.theme["outline_color"],
fontsize=self.theme["font_size"],
fontcolor=self.theme["font_color"],
fontname=self.theme["font_name"],
margin=self.theme["margin"],
rankdir="LR",
pad=self.theme["padding"])
dot.attr("node", shape="box",
style="filled", margin="0,0",
fillcolor=self.theme["fill_color"],
color=self.theme["outline_color"],
fontsize=self.theme["font_size"],
fontcolor=self.theme["font_color"],
fontname=self.theme["font_name"])
dot.attr("edge", style="solid",
color=self.theme["outline_color"],
fontsize=self.theme["font_size"],
fontcolor=self.theme["font_color"],
fontname=self.theme["font_name"])
for k, n in self.nodes.items():
label = "<tr><td cellpadding='6'>{}</td></tr>".format(n.title)
if n.caption:
label += "<tr><td>{}</td></tr>".format(n.caption)
if n.repeat > 1:
label += "<tr><td align='right' cellpadding='2'>x{}</td></tr>".format(n.repeat)
label = "<<table border='0' cellborder='0' cellpadding='0'>" + label + "</table>>"
dot.node(str(k), label)
for a, b, label in self.edges:
if isinstance(label, (list, tuple)):
label = "x".join([str(l or "?") for l in label])
dot.edge(str(a), str(b), label)
return dot
def _repr_svg_(self):
"""Allows Jupyter notebook to render the graph automatically."""
return self.build_dot()._repr_image_svg_xml()
def save(self, path, format="pdf"):
# TODO: assert on acceptable format values
dot = self.build_dot()
dot.format = format
directory, file_name = os.path.split(path)
# Remove extension from file name. dot.render() adds it.
file_name = file_name.replace("." + format, "")
dot.render(file_name, directory=directory, cleanup=True)
| 37.01897 | 119 | 0.553075 | from __future__ import absolute_import, division, print_function
import os
import re
from random import getrandbits
import inspect
import numpy as np
THEMES = {
"basic": {
"background_color": "#FFFFFF",
"fill_color": "#E8E8E8",
"outline_color": "#000000",
"font_color": "#000000",
"font_name": "Times",
"font_size": "10",
"margin": "0,0",
"padding": "1.0,0.5",
},
"blue": {
"background_color": "#FFFFFF",
"fill_color": "#BCD6FC",
"outline_color": "#7C96BC",
"font_color": "#202020",
"font_name": "Verdana",
"font_size": "10",
"margin": "0,0",
"padding": "1.0,0.5",
},
}
| true | true |
f72a79a9ba5f4c1690d21261e6cc829e1ab1cdfe | 17,260 | py | Python | scripts/greaseweazle/usb.py | zxrepo/keirf.Greaseweazle | a023ad364eb813856bb6632da87cde7e513d73d1 | [
"Unlicense"
] | 1 | 2022-03-05T14:50:48.000Z | 2022-03-05T14:50:48.000Z | scripts/greaseweazle/usb.py | zxrepo/keirf.Greaseweazle | a023ad364eb813856bb6632da87cde7e513d73d1 | [
"Unlicense"
] | null | null | null | scripts/greaseweazle/usb.py | zxrepo/keirf.Greaseweazle | a023ad364eb813856bb6632da87cde7e513d73d1 | [
"Unlicense"
] | null | null | null | # greaseweazle/usb.py
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct
import itertools as it
from greaseweazle import version
from greaseweazle import error
from greaseweazle.flux import Flux
## Control-Path command set
class ControlCmd:
ClearComms = 10000
Normal = 9600
## Command set
class Cmd:
GetInfo = 0
Update = 1
Seek = 2
Side = 3
SetParams = 4
GetParams = 5
Motor = 6
ReadFlux = 7
WriteFlux = 8
GetFluxStatus = 9
GetIndexTimes = 10
SwitchFwMode = 11
Select = 12
Deselect = 13
SetBusType = 14
SetPin = 15
Reset = 16
EraseFlux = 17
SourceBytes = 18
SinkBytes = 19
str = {
GetInfo: "GetInfo",
Update: "Update",
Seek: "Seek",
Side: "Side",
SetParams: "SetParams",
GetParams: "GetParams",
Motor: "Motor",
ReadFlux: "ReadFlux",
WriteFlux: "WriteFlux",
GetFluxStatus: "GetFluxStatus",
GetIndexTimes: "GetIndexTimes",
SwitchFwMode: "SwitchFwMode",
Select: "Select",
Deselect: "Deselect",
SetBusType: "SetBusType",
SetPin: "SetPin",
Reset: "Reset",
EraseFlux: "EraseFlux",
SourceBytes: "SourceBytes",
SinkBytes: "SinkBytes"
}
## Command responses/acknowledgements
class Ack:
Okay = 0
BadCommand = 1
NoIndex = 2
NoTrk0 = 3
FluxOverflow = 4
FluxUnderflow = 5
Wrprot = 6
NoUnit = 7
NoBus = 8
BadUnit = 9
BadPin = 10
BadCylinder = 11
str = {
Okay: "Okay",
BadCommand: "Bad Command",
NoIndex: "No Index",
NoTrk0: "Track 0 not found",
FluxOverflow: "Flux Overflow",
FluxUnderflow: "Flux Underflow",
Wrprot: "Disk is Write Protected",
NoUnit: "No drive unit selected",
NoBus: "No bus type (eg. Shugart, IBM/PC) specified",
BadUnit: "Invalid unit number",
BadPin: "Not a modifiable pin",
BadCylinder: "Invalid cylinder"
}
## Cmd.GetInfo indexes
class GetInfo:
Firmware = 0
BandwidthStats = 1
## Cmd.{Get,Set}Params indexes
class Params:
Delays = 0
## Cmd.SetBusType values
class BusType:
Invalid = 0
IBMPC = 1
Shugart = 2
## Flux read stream opcodes, preceded by 0xFF byte
class FluxOp:
Index = 1
Space = 2
Astable = 3
## CmdError: Encapsulates a command acknowledgement.
class CmdError(Exception):
def __init__(self, cmd, code):
self.cmd = cmd
self.code = code
def cmd_str(self):
return Cmd.str.get(self.cmd[0], "UnknownCmd")
def errcode_str(self):
if self.code == Ack.BadCylinder:
s = Ack.str[Ack.BadCylinder]
return s + " %d" % struct.unpack('2Bb', self.cmd)[2]
return Ack.str.get(self.code, "Unknown Error (%u)" % self.code)
def __str__(self):
return "%s: %s" % (self.cmd_str(), self.errcode_str())
class Unit:
## Unit information, instance variables:
## major, minor: Greaseweazle firmware version number
## max_cmd: Maximum Cmd number accepted by this unit
## sample_freq: Resolution of all time values passed to/from this unit
## update_mode: True iff the Greaseweazle unit is in update mode
## Unit(ser):
## Accepts a Pyserial instance for Greaseweazle communications.
def __init__(self, ser):
self.ser = ser
self.reset()
# Copy firmware info to instance variables (see above for definitions).
self._send_cmd(struct.pack("3B", Cmd.GetInfo, 3, GetInfo.Firmware))
x = struct.unpack("<4BI3B21x", self.ser.read(32))
(self.major, self.minor, is_main_firmware,
self.max_cmd, self.sample_freq, self.hw_model,
self.hw_submodel, self.usb_speed) = x
# Old firmware doesn't report HW type but runs on STM32F1 only.
if self.hw_model == 0:
self.hw_model = 1
# Check whether firmware is in update mode: limited command set if so.
self.update_mode = (is_main_firmware == 0)
if self.update_mode:
self.update_jumpered = (self.sample_freq & 1)
del self.sample_freq
return
# We are running main firmware: Check whether an update is needed.
# We can use only the GetInfo command if the firmware is out of date.
self.update_needed = (version.major != self.major
or version.minor != self.minor)
if self.update_needed:
return
# Initialise the delay properties with current firmware values.
self._send_cmd(struct.pack("4B", Cmd.GetParams, 4, Params.Delays, 10))
(self._select_delay, self._step_delay,
self._seek_settle_delay, self._motor_delay,
self._auto_off_delay) = struct.unpack("<5H", self.ser.read(10))
## reset:
## Resets communications with Greaseweazle.
def reset(self):
self.ser.reset_output_buffer()
self.ser.baudrate = ControlCmd.ClearComms
self.ser.baudrate = ControlCmd.Normal
self.ser.reset_input_buffer()
## _send_cmd:
## Send given command byte sequence to Greaseweazle.
## Raise a CmdError if command fails.
def _send_cmd(self, cmd):
self.ser.write(cmd)
(c,r) = struct.unpack("2B", self.ser.read(2))
error.check(c == cmd[0], "Command returned garbage (%02x != %02x)"
% (c, cmd[0]))
if r != 0:
raise CmdError(cmd, r)
## seek:
## Seek the selected drive's heads to the specified track (cyl, side).
def seek(self, cyl, side):
self._send_cmd(struct.pack("2Bb", Cmd.Seek, 3, cyl))
self._send_cmd(struct.pack("3B", Cmd.Side, 3, side))
## set_bus_type:
## Set the floppy bus type.
def set_bus_type(self, type):
self._send_cmd(struct.pack("3B", Cmd.SetBusType, 3, type))
## set_pin:
## Set a pin level.
def set_pin(self, pin, level):
self._send_cmd(struct.pack("4B", Cmd.SetPin, 4, pin, int(level)))
## power_on_reset:
## Re-initialise to power-on defaults.
def power_on_reset(self):
self._send_cmd(struct.pack("2B", Cmd.Reset, 2))
## drive_select:
## Select the specified drive unit.
def drive_select(self, unit):
self._send_cmd(struct.pack("3B", Cmd.Select, 3, unit))
## drive_deselect:
## Deselect currently-selected drive unit (if any).
def drive_deselect(self):
self._send_cmd(struct.pack("2B", Cmd.Deselect, 2))
## drive_motor:
## Turn the specified drive's motor on/off.
def drive_motor(self, unit, state):
self._send_cmd(struct.pack("4B", Cmd.Motor, 4, unit, int(state)))
## switch_fw_mode:
## Switch between update bootloader and main firmware.
def switch_fw_mode(self, mode):
self._send_cmd(struct.pack("3B", Cmd.SwitchFwMode, 3, int(mode)))
## update_firmware:
## Update Greaseweazle to the given new firmware.
def update_firmware(self, dat):
self._send_cmd(struct.pack("<2BI", Cmd.Update, 6, len(dat)))
self.ser.write(dat)
(ack,) = struct.unpack("B", self.ser.read(1))
return ack
## update_bootloader:
## Update Greaseweazle with the given new bootloader.
def update_bootloader(self, dat):
self._send_cmd(struct.pack("<2B2I", Cmd.Update, 10,
len(dat), 0xdeafbee3))
self.ser.write(dat)
(ack,) = struct.unpack("B", self.ser.read(1))
return ack
## _decode_flux:
## Decode the Greaseweazle data stream into a list of flux samples.
def _decode_flux(self, dat):
flux, index = [], []
assert dat[-1] == 0
dat_i = it.islice(dat, 0, len(dat)-1)
ticks, ticks_since_index = 0, 0
def _read_28bit():
val = (next(dat_i) & 254) >> 1
val += (next(dat_i) & 254) << 6
val += (next(dat_i) & 254) << 13
val += (next(dat_i) & 254) << 20
return val
try:
while True:
i = next(dat_i)
if i == 255:
opcode = next(dat_i)
if opcode == FluxOp.Index:
val = _read_28bit()
index.append(ticks_since_index + ticks + val)
ticks_since_index = -(ticks + val)
elif opcode == FluxOp.Space:
ticks += _read_28bit()
else:
raise error.Fatal("Bad opcode in flux stream (%d)"
% opcode)
else:
if i < 250:
val = i
else:
val = 250 + (i - 250) * 255
val += next(dat_i) - 1
ticks += val
flux.append(ticks)
ticks_since_index += ticks
ticks = 0
except StopIteration:
pass
return flux, index
## _encode_flux:
## Convert the given flux timings into an encoded data stream.
def _encode_flux(self, flux):
nfa_thresh = round(150e-6 * self.sample_freq) # 150us
nfa_period = round(1.25e-6 * self.sample_freq) # 1.25us
dat = bytearray()
def _write_28bit(x):
dat.append(1 | (x<<1) & 255)
dat.append(1 | (x>>6) & 255)
dat.append(1 | (x>>13) & 255)
dat.append(1 | (x>>20) & 255)
for val in flux:
if val == 0:
pass
elif val < 250:
dat.append(val)
elif val > nfa_thresh:
dat.append(255)
dat.append(FluxOp.Space)
_write_28bit(val)
dat.append(255)
dat.append(FluxOp.Astable)
_write_28bit(nfa_period)
else:
high = (val-250) // 255
if high < 5:
dat.append(250 + high)
dat.append(1 + (val-250) % 255)
else:
dat.append(255)
dat.append(FluxOp.Space)
_write_28bit(val - 249)
dat.append(249)
dat.append(0) # End of Stream
return dat
## _read_track:
## Private helper which issues command requests to Greaseweazle.
def _read_track(self, nr_revs):
# Request and read all flux timings for this track.
dat = bytearray()
self._send_cmd(struct.pack("<2BH", Cmd.ReadFlux, 4, nr_revs+1))
while True:
dat += self.ser.read(1)
dat += self.ser.read(self.ser.in_waiting)
if dat[-1] == 0:
break
# Check flux status. An exception is raised if there was an error.
self._send_cmd(struct.pack("2B", Cmd.GetFluxStatus, 2))
return dat
## read_track:
## Read and decode flux and index timings for the current track.
def read_track(self, nr_revs, nr_retries=5):
retry = 0
while True:
try:
dat = self._read_track(nr_revs)
except CmdError as error:
# An error occurred. We may retry on transient overflows.
if error.code == Ack.FluxOverflow and retry < nr_retries:
retry += 1
else:
raise error
else:
# Success!
break
# Decode the flux list and read the index-times list.
flux_list, index_list = self._decode_flux(dat)
# Clip the initial partial revolution.
to_index = index_list[0]
for i in range(len(flux_list)):
to_index -= flux_list[i]
if to_index < 0:
flux_list[i] = -to_index
flux_list = flux_list[i:]
break
if to_index >= 0:
# We ran out of flux.
flux_list = []
index_list = index_list[1:]
# Success: Return the requested full index-to-index revolutions.
return Flux(index_list, flux_list, self.sample_freq)
## write_track:
## Write the given flux stream to the current track via Greaseweazle.
def write_track(self, flux_list, terminate_at_index, nr_retries=5):
# Create encoded data stream.
dat = self._encode_flux(flux_list)
retry = 0
while True:
try:
# Write the flux stream to the track via Greaseweazle.
self._send_cmd(struct.pack("3B", Cmd.WriteFlux, 3,
int(terminate_at_index)))
self.ser.write(dat)
self.ser.read(1) # Sync with Greaseweazle
self._send_cmd(struct.pack("2B", Cmd.GetFluxStatus, 2))
except CmdError as error:
# An error occurred. We may retry on transient underflows.
if error.code == Ack.FluxUnderflow and retry < nr_retries:
retry += 1
else:
raise error
else:
# Success!
break
## erase_track:
## Erase the current track via Greaseweazle.
def erase_track(self, ticks):
self._send_cmd(struct.pack("<2BI", Cmd.EraseFlux, 6, int(ticks)))
self.ser.read(1) # Sync with Greaseweazle
self._send_cmd(struct.pack("2B", Cmd.GetFluxStatus, 2))
## source_bytes:
## Command Greaseweazle to source 'nr' garbage bytes.
def source_bytes(self, nr):
self._send_cmd(struct.pack("<2BI", Cmd.SourceBytes, 6, nr))
while nr > 0:
self.ser.read(1)
waiting = self.ser.in_waiting
self.ser.read(waiting)
nr -= 1 + waiting
## sink_bytes:
## Command Greaseweazle to sink 'nr' garbage bytes.
def sink_bytes(self, nr):
self._send_cmd(struct.pack("<2BI", Cmd.SinkBytes, 6, nr))
dat = bytes(1024*1024)
while nr > len(dat):
self.ser.write(dat)
nr -= len(dat)
self.ser.write(dat[:nr])
self.ser.read(1) # Sync with Greaseweazle
## bw_stats:
## Get min/max bandwidth for previous source/sink command. Mbps (float).
def bw_stats(self):
self._send_cmd(struct.pack("3B", Cmd.GetInfo, 3,
GetInfo.BandwidthStats))
min_bytes, min_usecs, max_bytes, max_usecs = struct.unpack(
"<4I16x", self.ser.read(32))
min_bw = (8 * min_bytes) / min_usecs
max_bw = (8 * max_bytes) / max_usecs
return min_bw, max_bw
##
## Delay-property public getters and setters:
## select_delay: Delay (usec) after asserting drive select
## step_delay: Delay (usec) after issuing a head-step command
## seek_settle_delay: Delay (msec) after completing a head-seek operation
## motor_delay: Delay (msec) after turning on drive spindle motor
## auto_off_delay: Timeout (msec) since last command upon which all
## drives are deselected and spindle motors turned off
##
def _set_delays(self):
self._send_cmd(struct.pack("<3B5H", Cmd.SetParams,
3+5*2, Params.Delays,
self._select_delay, self._step_delay,
self._seek_settle_delay,
self._motor_delay, self._auto_off_delay))
@property
def select_delay(self):
return self._select_delay
@select_delay.setter
def select_delay(self, select_delay):
self._select_delay = select_delay
self._set_delays()
@property
def step_delay(self):
return self._step_delay
@step_delay.setter
def step_delay(self, step_delay):
self._step_delay = step_delay
self._set_delays()
@property
def seek_settle_delay(self):
return self._seek_settle_delay
@seek_settle_delay.setter
def seek_settle_delay(self, seek_settle_delay):
self._seek_settle_delay = seek_settle_delay
self._set_delays()
@property
def motor_delay(self):
return self._motor_delay
@motor_delay.setter
def motor_delay(self, motor_delay):
self._motor_delay = motor_delay
self._set_delays()
@property
def auto_off_delay(self):
return self._auto_off_delay
@auto_off_delay.setter
def auto_off_delay(self, auto_off_delay):
self._auto_off_delay = auto_off_delay
self._set_delays()
# Local variables:
# python-indent: 4
# End:
| 32.081784 | 79 | 0.554693 |
import struct
import itertools as it
from greaseweazle import version
from greaseweazle import error
from greaseweazle.flux import Flux
arComms = 10000
Normal = 9600
GetInfo = 0
Update = 1
Seek = 2
Side = 3
SetParams = 4
GetParams = 5
Motor = 6
ReadFlux = 7
WriteFlux = 8
GetFluxStatus = 9
GetIndexTimes = 10
SwitchFwMode = 11
Select = 12
Deselect = 13
SetBusType = 14
SetPin = 15
Reset = 16
EraseFlux = 17
SourceBytes = 18
SinkBytes = 19
str = {
GetInfo: "GetInfo",
Update: "Update",
Seek: "Seek",
Side: "Side",
SetParams: "SetParams",
GetParams: "GetParams",
Motor: "Motor",
ReadFlux: "ReadFlux",
WriteFlux: "WriteFlux",
GetFluxStatus: "GetFluxStatus",
GetIndexTimes: "GetIndexTimes",
SwitchFwMode: "SwitchFwMode",
Select: "Select",
Deselect: "Deselect",
SetBusType: "SetBusType",
SetPin: "SetPin",
Reset: "Reset",
EraseFlux: "EraseFlux",
SourceBytes: "SourceBytes",
SinkBytes: "SinkBytes"
}
BadCommand = 1
NoIndex = 2
NoTrk0 = 3
FluxOverflow = 4
FluxUnderflow = 5
Wrprot = 6
NoUnit = 7
NoBus = 8
BadUnit = 9
BadPin = 10
BadCylinder = 11
str = {
Okay: "Okay",
BadCommand: "Bad Command",
NoIndex: "No Index",
NoTrk0: "Track 0 not found",
FluxOverflow: "Flux Overflow",
FluxUnderflow: "Flux Underflow",
Wrprot: "Disk is Write Protected",
NoUnit: "No drive unit selected",
NoBus: "No bus type (eg. Shugart, IBM/PC) specified",
BadUnit: "Invalid unit number",
BadPin: "Not a modifiable pin",
BadCylinder: "Invalid cylinder"
}
irmware = 0
BandwidthStats = 1
= 0
alid = 0
IBMPC = 1
Shugart = 2
= 2
Astable = 3
cmd, code):
self.cmd = cmd
self.code = code
def cmd_str(self):
return Cmd.str.get(self.cmd[0], "UnknownCmd")
def errcode_str(self):
if self.code == Ack.BadCylinder:
s = Ack.str[Ack.BadCylinder]
return s + " %d" % struct.unpack('2Bb', self.cmd)[2]
return Ack.str.get(self.code, "Unknown Error (%u)" % self.code)
def __str__(self):
return "%s: %s" % (self.cmd_str(), self.errcode_str())
class Unit:
self.hw_submodel, self.usb_speed) = x
if self.hw_model == 0:
self.hw_model = 1
# Check whether firmware is in update mode: limited command set if so.
self.update_mode = (is_main_firmware == 0)
if self.update_mode:
self.update_jumpered = (self.sample_freq & 1)
del self.sample_freq
return
# We are running main firmware: Check whether an update is needed.
# We can use only the GetInfo command if the firmware is out of date.
self.update_needed = (version.major != self.major
or version.minor != self.minor)
if self.update_needed:
return
# Initialise the delay properties with current firmware values.
self._send_cmd(struct.pack("4B", Cmd.GetParams, 4, Params.Delays, 10))
(self._select_delay, self._step_delay,
self._seek_settle_delay, self._motor_delay,
self._auto_off_delay) = struct.unpack("<5H", self.ser.read(10))
## reset:
## Resets communications with Greaseweazle.
def reset(self):
self.ser.reset_output_buffer()
self.ser.baudrate = ControlCmd.ClearComms
self.ser.baudrate = ControlCmd.Normal
self.ser.reset_input_buffer()
## _send_cmd:
## Send given command byte sequence to Greaseweazle.
## Raise a CmdError if command fails.
def _send_cmd(self, cmd):
self.ser.write(cmd)
(c,r) = struct.unpack("2B", self.ser.read(2))
error.check(c == cmd[0], "Command returned garbage (%02x != %02x)"
% (c, cmd[0]))
if r != 0:
raise CmdError(cmd, r)
## seek:
## Seek the selected drive's heads to the specified track (cyl, side).
def seek(self, cyl, side):
self._send_cmd(struct.pack("2Bb", Cmd.Seek, 3, cyl))
self._send_cmd(struct.pack("3B", Cmd.Side, 3, side))
self._send_cmd(struct.pack("3B", Cmd.SetBusType, 3, type))
pin, level):
self._send_cmd(struct.pack("4B", Cmd.SetPin, 4, pin, int(level)))
_cmd(struct.pack("2B", Cmd.Reset, 2))
elf._send_cmd(struct.pack("3B", Cmd.Select, 3, unit))
ack("2B", Cmd.Deselect, 2))
lf._send_cmd(struct.pack("4B", Cmd.Motor, 4, unit, int(state)))
## switch_fw_mode:
## Switch between update bootloader and main firmware.
def switch_fw_mode(self, mode):
self._send_cmd(struct.pack("3B", Cmd.SwitchFwMode, 3, int(mode)))
## update_firmware:
## Update Greaseweazle to the given new firmware.
def update_firmware(self, dat):
self._send_cmd(struct.pack("<2BI", Cmd.Update, 6, len(dat)))
self.ser.write(dat)
(ack,) = struct.unpack("B", self.ser.read(1))
return ack
## update_bootloader:
## Update Greaseweazle with the given new bootloader.
def update_bootloader(self, dat):
self._send_cmd(struct.pack("<2B2I", Cmd.Update, 10,
len(dat), 0xdeafbee3))
self.ser.write(dat)
(ack,) = struct.unpack("B", self.ser.read(1))
return ack
## _decode_flux:
## Decode the Greaseweazle data stream into a list of flux samples.
def _decode_flux(self, dat):
flux, index = [], []
assert dat[-1] == 0
dat_i = it.islice(dat, 0, len(dat)-1)
ticks, ticks_since_index = 0, 0
def _read_28bit():
val = (next(dat_i) & 254) >> 1
val += (next(dat_i) & 254) << 6
val += (next(dat_i) & 254) << 13
val += (next(dat_i) & 254) << 20
return val
try:
while True:
i = next(dat_i)
if i == 255:
opcode = next(dat_i)
if opcode == FluxOp.Index:
val = _read_28bit()
index.append(ticks_since_index + ticks + val)
ticks_since_index = -(ticks + val)
elif opcode == FluxOp.Space:
ticks += _read_28bit()
else:
raise error.Fatal("Bad opcode in flux stream (%d)"
% opcode)
else:
if i < 250:
val = i
else:
val = 250 + (i - 250) * 255
val += next(dat_i) - 1
ticks += val
flux.append(ticks)
ticks_since_index += ticks
ticks = 0
except StopIteration:
pass
return flux, index
## _encode_flux:
## Convert the given flux timings into an encoded data stream.
def _encode_flux(self, flux):
nfa_thresh = round(150e-6 * self.sample_freq) # 150us
nfa_period = round(1.25e-6 * self.sample_freq) # 1.25us
dat = bytearray()
def _write_28bit(x):
dat.append(1 | (x<<1) & 255)
dat.append(1 | (x>>6) & 255)
dat.append(1 | (x>>13) & 255)
dat.append(1 | (x>>20) & 255)
for val in flux:
if val == 0:
pass
elif val < 250:
dat.append(val)
elif val > nfa_thresh:
dat.append(255)
dat.append(FluxOp.Space)
_write_28bit(val)
dat.append(255)
dat.append(FluxOp.Astable)
_write_28bit(nfa_period)
else:
high = (val-250) // 255
if high < 5:
dat.append(250 + high)
dat.append(1 + (val-250) % 255)
else:
dat.append(255)
dat.append(FluxOp.Space)
_write_28bit(val - 249)
dat.append(249)
dat.append(0) # End of Stream
return dat
## _read_track:
## Private helper which issues command requests to Greaseweazle.
def _read_track(self, nr_revs):
# Request and read all flux timings for this track.
dat = bytearray()
self._send_cmd(struct.pack("<2BH", Cmd.ReadFlux, 4, nr_revs+1))
while True:
dat += self.ser.read(1)
dat += self.ser.read(self.ser.in_waiting)
if dat[-1] == 0:
break
# Check flux status. An exception is raised if there was an error.
self._send_cmd(struct.pack("2B", Cmd.GetFluxStatus, 2))
return dat
## read_track:
## Read and decode flux and index timings for the current track.
def read_track(self, nr_revs, nr_retries=5):
retry = 0
while True:
try:
dat = self._read_track(nr_revs)
except CmdError as error:
# An error occurred. We may retry on transient overflows.
if error.code == Ack.FluxOverflow and retry < nr_retries:
retry += 1
else:
raise error
else:
# Success!
break
# Decode the flux list and read the index-times list.
flux_list, index_list = self._decode_flux(dat)
# Clip the initial partial revolution.
to_index = index_list[0]
for i in range(len(flux_list)):
to_index -= flux_list[i]
if to_index < 0:
flux_list[i] = -to_index
flux_list = flux_list[i:]
break
if to_index >= 0:
# We ran out of flux.
flux_list = []
index_list = index_list[1:]
# Success: Return the requested full index-to-index revolutions.
return Flux(index_list, flux_list, self.sample_freq)
## write_track:
## Write the given flux stream to the current track via Greaseweazle.
def write_track(self, flux_list, terminate_at_index, nr_retries=5):
# Create encoded data stream.
dat = self._encode_flux(flux_list)
retry = 0
while True:
try:
# Write the flux stream to the track via Greaseweazle.
self._send_cmd(struct.pack("3B", Cmd.WriteFlux, 3,
int(terminate_at_index)))
self.ser.write(dat)
self.ser.read(1) # Sync with Greaseweazle
self._send_cmd(struct.pack("2B", Cmd.GetFluxStatus, 2))
except CmdError as error:
# An error occurred. We may retry on transient underflows.
if error.code == Ack.FluxUnderflow and retry < nr_retries:
retry += 1
else:
raise error
else:
# Success!
break
## erase_track:
## Erase the current track via Greaseweazle.
def erase_track(self, ticks):
self._send_cmd(struct.pack("<2BI", Cmd.EraseFlux, 6, int(ticks)))
self.ser.read(1) # Sync with Greaseweazle
self._send_cmd(struct.pack("2B", Cmd.GetFluxStatus, 2))
## source_bytes:
## Command Greaseweazle to source 'nr' garbage bytes.
def source_bytes(self, nr):
self._send_cmd(struct.pack("<2BI", Cmd.SourceBytes, 6, nr))
while nr > 0:
self.ser.read(1)
waiting = self.ser.in_waiting
self.ser.read(waiting)
nr -= 1 + waiting
## sink_bytes:
## Command Greaseweazle to sink 'nr' garbage bytes.
def sink_bytes(self, nr):
self._send_cmd(struct.pack("<2BI", Cmd.SinkBytes, 6, nr))
dat = bytes(1024*1024)
while nr > len(dat):
self.ser.write(dat)
nr -= len(dat)
self.ser.write(dat[:nr])
self.ser.read(1) # Sync with Greaseweazle
## bw_stats:
## Get min/max bandwidth for previous source/sink command. Mbps (float).
def bw_stats(self):
self._send_cmd(struct.pack("3B", Cmd.GetInfo, 3,
GetInfo.BandwidthStats))
min_bytes, min_usecs, max_bytes, max_usecs = struct.unpack(
"<4I16x", self.ser.read(32))
min_bw = (8 * min_bytes) / min_usecs
max_bw = (8 * max_bytes) / max_usecs
return min_bw, max_bw
##
## Delay-property public getters and setters:
## select_delay: Delay (usec) after asserting drive select
## step_delay: Delay (usec) after issuing a head-step command
## seek_settle_delay: Delay (msec) after completing a head-seek operation
## motor_delay: Delay (msec) after turning on drive spindle motor
## auto_off_delay: Timeout (msec) since last command upon which all
## drives are deselected and spindle motors turned off
##
def _set_delays(self):
self._send_cmd(struct.pack("<3B5H", Cmd.SetParams,
3+5*2, Params.Delays,
self._select_delay, self._step_delay,
self._seek_settle_delay,
self._motor_delay, self._auto_off_delay))
@property
def select_delay(self):
return self._select_delay
@select_delay.setter
def select_delay(self, select_delay):
self._select_delay = select_delay
self._set_delays()
@property
def step_delay(self):
return self._step_delay
@step_delay.setter
def step_delay(self, step_delay):
self._step_delay = step_delay
self._set_delays()
@property
def seek_settle_delay(self):
return self._seek_settle_delay
@seek_settle_delay.setter
def seek_settle_delay(self, seek_settle_delay):
self._seek_settle_delay = seek_settle_delay
self._set_delays()
@property
def motor_delay(self):
return self._motor_delay
@motor_delay.setter
def motor_delay(self, motor_delay):
self._motor_delay = motor_delay
self._set_delays()
@property
def auto_off_delay(self):
return self._auto_off_delay
@auto_off_delay.setter
def auto_off_delay(self, auto_off_delay):
self._auto_off_delay = auto_off_delay
self._set_delays()
# Local variables:
# python-indent: 4
# End:
| true | true |
f72a79d3d2c56e71938e0ba2cdbc1fa44b6c0c29 | 3,576 | py | Python | Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/performance.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 46 | 2017-05-15T11:15:08.000Z | 2018-07-02T03:32:52.000Z | Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/performance.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | null | null | null | Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/performance.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 24 | 2017-05-17T03:26:17.000Z | 2018-07-09T07:00:50.000Z |
import ops.cmd
import util.ip
DATA_TYPES = ['all', 'browser', 'cache', 'expensive', 'icmp', 'ip', 'jobobject', 'jobobjectdetails', 'logicaldisk', 'memory', 'networkinterface', 'objects', 'pagingfile', 'physicaldisk', 'process', 'processor', 'system', 'tcp', 'telephony', 'terminalservices', 'thread', 'udp']
class PerformanceCommand(ops.cmd.DszCommand, ):
def __init__(self, plugin='performance', data=None, objectNumber=None, initialBufferSize=None, bare=False, target=None, **optdict):
self.data = data
self.objectNumber = objectNumber
self.initialBufferSize = initialBufferSize
self.bare = bare
self.target = target
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
def _getInitialBufferSize(self):
return self._opt_initial
def _setInitialBufferSize(self, bufferSize):
assert ((bufferSize is None) or ((type(bufferSize) is int) and (bufferSize > 0))), 'bufferSize must be an integer greater than zero; or None to clear this option.'
self._opt_initial = bufferSize
initialBufferSize = property(_getInitialBufferSize, _setInitialBufferSize)
def _getObjectNumber(self):
return self._opt_objectNumber
def _setObjectNumber(self, objectNumber):
assert ((objectNumber is None) or ((type(objectNumber) is int) and (objectNumber >= 0))), 'Object number must be a positive integer or zero; or None to clear this option.'
self._opt_objectNumber = objectNumber
objectNumber = property(_getObjectNumber, _setObjectNumber)
def _getData(self):
return self._opt_data
def _setData(self, data):
assert ((type(data) is str) or (type(data) is unicode) or (data is None)), 'Data must be a string value or None to clear this option.'
assert ((data is None) or (data.lower() in DATA_TYPES)), 'Data must be one of the valid data type queries.'
self._opt_data = data
data = property(_getData, _setData)
def _getBare(self):
return self._opt_bare
def _setBare(self, bare):
assert (type(bare) is bool), 'Bare must be Boolean.'
self._opt_bare = bare
bare = property(_getBare, _setBare)
def _getTarget(self):
return self._opt_target
def _setTarget(self, target):
assert ((type(target) is str) or (type(target) is unicode) or (target is None)), 'Target must be a string representation or None to clear.'
assert ((target is None) or util.ip.validate(target)), 'Target address must be a valid IPv4 or IPv6 address.'
self._opt_target = target
target = property(_getTarget, _setTarget)
def validateInput(self):
if ((self.data is not None) and (self.objectNumber is not None)):
return False
if ((self.data is None) and (self.objectNumber is None)):
return False
return True
def __str__(self):
cmdstr = u''
for prefix in self.prefixes:
cmdstr += ('%s ' % prefix)
cmdstr += (self.plugin + ' ')
if self.initialBufferSize:
cmdstr += ('-initial %s ' % self.initalBufferSize)
if self.objectNumber:
cmdstr += ('-objectnum %s ' % self.objectNumber)
if self.data:
cmdstr += ('-data %s ' % self.data)
if self.bare:
cmdstr += '-bare '
if self.target:
cmdstr += ('-target %s ' % self.target)
return ops.utf8(cmdstr)
ops.cmd.command_classes['performance'] = PerformanceCommand | 44.148148 | 278 | 0.635906 |
import ops.cmd
import util.ip
DATA_TYPES = ['all', 'browser', 'cache', 'expensive', 'icmp', 'ip', 'jobobject', 'jobobjectdetails', 'logicaldisk', 'memory', 'networkinterface', 'objects', 'pagingfile', 'physicaldisk', 'process', 'processor', 'system', 'tcp', 'telephony', 'terminalservices', 'thread', 'udp']
class PerformanceCommand(ops.cmd.DszCommand, ):
def __init__(self, plugin='performance', data=None, objectNumber=None, initialBufferSize=None, bare=False, target=None, **optdict):
self.data = data
self.objectNumber = objectNumber
self.initialBufferSize = initialBufferSize
self.bare = bare
self.target = target
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
def _getInitialBufferSize(self):
return self._opt_initial
def _setInitialBufferSize(self, bufferSize):
assert ((bufferSize is None) or ((type(bufferSize) is int) and (bufferSize > 0))), 'bufferSize must be an integer greater than zero; or None to clear this option.'
self._opt_initial = bufferSize
initialBufferSize = property(_getInitialBufferSize, _setInitialBufferSize)
def _getObjectNumber(self):
return self._opt_objectNumber
def _setObjectNumber(self, objectNumber):
assert ((objectNumber is None) or ((type(objectNumber) is int) and (objectNumber >= 0))), 'Object number must be a positive integer or zero; or None to clear this option.'
self._opt_objectNumber = objectNumber
objectNumber = property(_getObjectNumber, _setObjectNumber)
def _getData(self):
return self._opt_data
def _setData(self, data):
assert ((type(data) is str) or (type(data) is unicode) or (data is None)), 'Data must be a string value or None to clear this option.'
assert ((data is None) or (data.lower() in DATA_TYPES)), 'Data must be one of the valid data type queries.'
self._opt_data = data
data = property(_getData, _setData)
def _getBare(self):
return self._opt_bare
def _setBare(self, bare):
assert (type(bare) is bool), 'Bare must be Boolean.'
self._opt_bare = bare
bare = property(_getBare, _setBare)
def _getTarget(self):
return self._opt_target
def _setTarget(self, target):
assert ((type(target) is str) or (type(target) is unicode) or (target is None)), 'Target must be a string representation or None to clear.'
assert ((target is None) or util.ip.validate(target)), 'Target address must be a valid IPv4 or IPv6 address.'
self._opt_target = target
target = property(_getTarget, _setTarget)
def validateInput(self):
if ((self.data is not None) and (self.objectNumber is not None)):
return False
if ((self.data is None) and (self.objectNumber is None)):
return False
return True
def __str__(self):
cmdstr = u''
for prefix in self.prefixes:
cmdstr += ('%s ' % prefix)
cmdstr += (self.plugin + ' ')
if self.initialBufferSize:
cmdstr += ('-initial %s ' % self.initalBufferSize)
if self.objectNumber:
cmdstr += ('-objectnum %s ' % self.objectNumber)
if self.data:
cmdstr += ('-data %s ' % self.data)
if self.bare:
cmdstr += '-bare '
if self.target:
cmdstr += ('-target %s ' % self.target)
return ops.utf8(cmdstr)
ops.cmd.command_classes['performance'] = PerformanceCommand | true | true |
f72a79de3ba6052a6dc1da390b52adbd516b2242 | 19,851 | py | Python | train/t2m2/run.py | SungbinChoi/traffic4cast2021 | 3d63b7e90ad0d9c7346f2a6c6c89d605849bf49e | [
"Apache-2.0"
] | null | null | null | train/t2m2/run.py | SungbinChoi/traffic4cast2021 | 3d63b7e90ad0d9c7346f2a6c6c89d605849bf49e | [
"Apache-2.0"
] | null | null | null | train/t2m2/run.py | SungbinChoi/traffic4cast2021 | 3d63b7e90ad0d9c7346f2a6c6c89d605849bf49e | [
"Apache-2.0"
] | null | null | null | import random
from random import shuffle
import numpy as np
from datetime import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from typing import Any, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
target_city = 'ANTWERP'
other_city_list = ['ANTWERP', 'BANGKOK', 'BARCELONA', 'MOSCOW', 'BERLIN', 'CHICAGO', 'ISTANBUL', 'MELBOURNE', ]
input_train_data_folder_path = '../../0_data/' + target_city + '/' + 'training'
input_static_data_path = '../../0_data/' + target_city + '/' + target_city + "_static.h5"
out_dir = 'output'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
SEED = int(time.time())
num_train_file = 180
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
num_frame_out = 6
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height=495
width =436
num_channel=8
num_channel_out=8
num_channel_static = 9
visual_input_channels=105
visual_output_channels=48
vector_input_channels=1
num_epoch_to_train = 100000000
save_per_iteration = 5000
global_step_start = 0
initial_checkpoint = None
initial_checkpoint_optimizer = None
LEARNING_RATE = 3e-4
batch_size = 2
batch_size_val = 1
num_thread=2
num_groups = 8
EPS = 1e-12
np.set_printoptions(precision=8)
NUM_INPUT_CHANNEL = visual_input_channels
NUM_OUTPUT_CHANNEL = visual_output_channels
def get_data_filepath_list_by_year(input_data_folder_path):
data_filepath_list_1 = []
data_filepath_list_2 = []
for filename in os.listdir(input_data_folder_path):
if filename.split('.')[-1] != 'h5':
continue
if filename.startswith('2019'):
data_filepath_list_1.append(os.path.join(input_data_folder_path, filename))
elif filename.startswith('2020'):
data_filepath_list_2.append(os.path.join(input_data_folder_path, filename))
else:
print('Error - Unknown data year\t', filename)
exit(-1)
data_filepath_list_1 = sorted(data_filepath_list_1)
data_filepath_list_2 = sorted(data_filepath_list_2)
return data_filepath_list_1, data_filepath_list_2
class Deconv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Deconv3x3Block, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_size, h_size, kernel_size=3, stride=2, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class Conv1x1Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv1x1Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=1, stride=1, padding=0, bias=True))
class Conv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv3x3Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class AvgBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(AvgBlock, self).__init__()
self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class MaxBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(MaxBlock, self).__init__()
self.add_module('pool', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class DownBlock(nn.Module):
def __init__(self,
in_size: int,
h_size: int,
out_size: int,
do_pool: int = True):
super(DownBlock, self).__init__()
self.do_pool = do_pool
in_size_cum = in_size
self.conv_1 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_3 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_2 = Conv1x1Block( in_size=in_size_cum, h_size=out_size)
def forward(self, x):
batch_size = len(x)
if self.do_pool:
x = F.interpolate(x, scale_factor=0.7, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x_list = []
x_list.append(x)
x = self.conv_1(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_3(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_2(x)
return x
def cuda(self, ):
super(DownBlock, self).cuda()
self.conv_1.cuda()
self.conv_3.cuda()
self.conv_2.cuda()
return self
class UpBlock(nn.Module):
def __init__(self,
in_size: int,
in_size_2: int,
h_size: int,
out_size: int,
):
super(UpBlock, self).__init__()
self.deconv = Conv3x3Block( in_size=in_size, h_size=h_size)
self.out_conv = Conv3x3Block( in_size=h_size + in_size_2, h_size=out_size)
def forward(self, x1, x2):
x1 = self.deconv(x1)
x1 = F.interpolate(x1, size=x2.size()[2:4], scale_factor=None, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x = torch.cat([x2, x1], dim=1)
return self.out_conv(x)
def cuda(self, ):
super(UpBlock, self).cuda()
self.deconv.cuda()
self.out_conv.cuda()
return self
class NetA(nn.Module):
def __init__(self,):
super(NetA, self).__init__()
self.block0 = DownBlock(in_size=NUM_INPUT_CHANNEL, h_size=128, out_size=128, do_pool=False)
self.block1 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block2 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block3 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block4 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block5 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block6 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block7 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block20 = Conv3x3Block(in_size=128, h_size=128)
self.block16 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block15 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block14 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block13 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block12 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block11 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.block10 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.out_conv = nn.Sequential(nn.Conv2d(128*1, NUM_OUTPUT_CHANNEL, kernel_size=3, stride=1, padding=1, bias=True))
if 1:
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = len(x)
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
x6 = self.block6(x5)
x7 = self.block7(x6)
x = self.block20(x7)
x = self.block16(x, x6)
x = self.block15(x, x5)
x = self.block14(x, x4)
x = self.block13(x, x3)
x = self.block12(x, x2)
x = self.block11(x, x1)
x = self.block10(x, x0)
x = self.out_conv(x)
x = torch.sigmoid(x)
return x
def cuda(self, ):
super(NetA, self).cuda()
self.block0.cuda()
self.block1.cuda()
self.block2.cuda()
self.block3.cuda()
self.block4.cuda()
self.block5.cuda()
self.block6.cuda()
self.block7.cuda()
self.block20.cuda()
self.block16.cuda()
self.block15.cuda()
self.block14.cuda()
self.block13.cuda()
self.block12.cuda()
self.block11.cuda()
self.block10.cuda()
self.out_conv.cuda()
return self
if __name__ == '__main__':
if initial_checkpoint == None:
assert global_step_start == 0
else:
assert global_step_start > 0
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
try:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
except Exception:
print('out_dir not made')
net = NetA().cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=LEARNING_RATE)
loss_func2 = nn.MSELoss()
if initial_checkpoint is not None:
print('Loading ', initial_checkpoint)
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict, strict=True)
optimizer_state_dict_ = torch.load(initial_checkpoint_optimizer, map_location=lambda storage, loc: storage)
optimizer_state_dict = optimizer_state_dict_['optimizer']
optimizer.load_state_dict(optimizer_state_dict)
static_data = None
if 1:
file_path = input_static_data_path
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data = data[np.newaxis,:,:,:]
static_data = static_data.astype(np.float32)
static_data = static_data / 255.0
static_data_list = []
if 1:
for other_city in other_city_list:
file_path = '../../0_data/' + other_city + '/' + other_city + "_static.h5"
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data_ = data[np.newaxis,:,:,:]
static_data_ = static_data_.astype(np.float32)
static_data_ = static_data_ / 255.0
static_data_list.append(static_data_)
train_static_data_index_list = []
train_data_filepath_list, val_data_filepath_list = get_data_filepath_list_by_year(input_train_data_folder_path)
target_city_i = other_city_list.index(target_city)
for _ in range(len(train_data_filepath_list)):
train_static_data_index_list.append(target_city_i)
for o, other_city in enumerate(other_city_list):
if o == target_city_i:
continue
train_data_filepath_list_one, _ = get_data_filepath_list_by_year('../../0_data/' + other_city + '/' + 'training')
for _ in range(len(train_data_filepath_list_one)):
train_static_data_index_list.append(o)
train_data_filepath_list += train_data_filepath_list_one
train_set = []
for i in range(len(train_data_filepath_list)):
for j in range(num_sequence_per_day):
train_set.append( (i,j) )
num_iteration_per_epoch = int(len(train_set) / batch_size)
print('num_iteration_per_epoch:', num_iteration_per_epoch)
assert num_iteration_per_epoch > 10
val_set = []
val_skip_k = 0
val_skip_ratio = 5
for i in range(len(val_data_filepath_list)):
for j in range(0, num_sequence_per_day, num_frame_sequence):
val_skip_k += 1
if val_skip_k % val_skip_ratio == 0:
val_set.append( (i,j) )
num_val_iteration_per_epoch = int(len(val_set) / batch_size_val)
print('num_val_iteration_per_epoch:', num_val_iteration_per_epoch)
train_input_queue = queue.Queue()
train_output_queue = queue.Queue()
def load_train_multithread():
while True:
if train_input_queue.empty() or train_output_queue.qsize() > 8:
time.sleep(0.1)
continue
i_j_list = train_input_queue.get()
train_orig_data_batch_list = []
train_data_batch_list = []
train_data_mask_list = []
train_stat_batch_list = []
train_static_data_batch_list = []
for train_i_j in i_j_list:
(i,j) = train_i_j
file_path = train_data_filepath_list[i]
train_static_data_batch_list.append(static_data_list[train_static_data_index_list[i]])
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
train_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
train_data_batch = np.concatenate(train_data_batch_list, axis=0)
train_static_data_batch = np.concatenate(train_static_data_batch_list,axis=0)
input_data = train_data_batch[:,:num_frame_before ,:,:,:]
orig_label = train_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:] ), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
flip_dr = np.random.randint(0,2)
if flip_dr == 1:
input_data_flipped = copy.deepcopy(input_data)
input_data_flipped[:,:,:,:,4:8] = input_data[:,:,:,:,0:4]
input_data_flipped[:,:,:,:,0:4] = input_data[:,:,:,:,4:8]
input_data = input_data_flipped[:,:,::-1,::-1,:]
true_label_flipped = copy.deepcopy(true_label)
true_label_flipped[:,:,:,:,4:8] = true_label[:,:,:,:,0:4]
true_label_flipped[:,:,:,:,0:4] = true_label[:,:,:,:,4:8]
true_label = true_label_flipped[:,:,::-1,::-1,:]
train_static_data_batch_flipped = copy.deepcopy(train_static_data_batch)
train_static_data_batch_flipped[:,5:9,:,:] = train_static_data_batch[:,1:5,:,:]
train_static_data_batch_flipped[:,1:5,:,:] = train_static_data_batch[:,5:9,:,:]
train_static_data_batch = train_static_data_batch_flipped[:,:,::-1,::-1]
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size, -1, height, width))
input_data = np.concatenate((input_data, train_static_data_batch), axis=1)
train_output_queue.put( (input_data, true_label) )
thread_list = []
assert num_thread > 0
for i in range(num_thread):
t = threading.Thread(target=load_train_multithread)
t.start()
net.train()
sum_train_loss = 0.0
sum_train_iter = 0
global_step = global_step_start
for epoch in range(num_epoch_to_train):
np.random.shuffle(train_set)
for a in range(num_iteration_per_epoch):
i_j_list = []
for train_i_j in train_set[a * batch_size : (a+1) * batch_size]:
i_j_list.append(train_i_j)
train_input_queue.put(i_j_list)
for a in range(num_iteration_per_epoch):
if global_step % save_per_iteration == 0:
net.eval()
state_dict_0 = copy.deepcopy(net.state_dict())
torch.save(state_dict_0, out_dir + '/%09d_model.pth' % (global_step))
torch.save(
{
'optimizer': optimizer.state_dict(),
'global_step': global_step,
'epoch': epoch,
},
out_dir + '/%09d_optimizer.pth' % (global_step))
eval_loss_list = list()
eval_loss_list = [0]
with torch.no_grad():
for a in range(num_val_iteration_per_epoch):
val_orig_data_batch_list = []
val_data_batch_list = []
val_data_mask_list = []
val_stat_batch_list = []
for i_j in val_set[a * batch_size_val : (a+1) * batch_size_val]:
(i,j) = i_j
file_path = val_data_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
val_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
val_data_batch = np.concatenate(val_data_batch_list, axis=0)
input_data = val_data_batch[:,:num_frame_before ,:,:,:]
orig_label = val_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:]), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size_val, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size_val, -1, height, width))
input_data = np.concatenate((input_data,np.repeat(static_data, batch_size_val, axis=0)), axis=1)
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
eval_loss_list.append(loss.item())
avg_train_loss = sum_train_loss / (float(sum_train_iter)+EPS)
sum_train_loss = 0.0
sum_train_iter = 0
print('global_step:', global_step, '\t', 'epoch:', epoch, \
'\t', 'train_loss:', avg_train_loss, \
'\t', 'eval_loss:', np.mean(eval_loss_list), \
'\t', datetime.now(), )
debug_out = open('res.txt', 'a')
debug_out.write(str(global_step))
debug_out.write('\t')
debug_out.write('%.8f' % float(avg_train_loss))
debug_out.write('\t')
debug_out.write('%.8f' % float(np.mean(eval_loss_list)))
debug_out.write('\n')
debug_out.close()
net.train()
while train_output_queue.empty():
time.sleep(0.1)
(input_data, true_label) = train_output_queue.get()
optimizer.zero_grad()
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
sum_train_iter += 1
sum_train_loss += loss.item()
loss.backward()
optimizer.step()
global_step += 1
| 37.596591 | 137 | 0.622236 | import random
from random import shuffle
import numpy as np
from datetime import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from typing import Any, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
target_city = 'ANTWERP'
other_city_list = ['ANTWERP', 'BANGKOK', 'BARCELONA', 'MOSCOW', 'BERLIN', 'CHICAGO', 'ISTANBUL', 'MELBOURNE', ]
input_train_data_folder_path = '../../0_data/' + target_city + '/' + 'training'
input_static_data_path = '../../0_data/' + target_city + '/' + target_city + "_static.h5"
out_dir = 'output'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
SEED = int(time.time())
num_train_file = 180
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
num_frame_out = 6
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height=495
width =436
num_channel=8
num_channel_out=8
num_channel_static = 9
visual_input_channels=105
visual_output_channels=48
vector_input_channels=1
num_epoch_to_train = 100000000
save_per_iteration = 5000
global_step_start = 0
initial_checkpoint = None
initial_checkpoint_optimizer = None
LEARNING_RATE = 3e-4
batch_size = 2
batch_size_val = 1
num_thread=2
num_groups = 8
EPS = 1e-12
np.set_printoptions(precision=8)
NUM_INPUT_CHANNEL = visual_input_channels
NUM_OUTPUT_CHANNEL = visual_output_channels
def get_data_filepath_list_by_year(input_data_folder_path):
data_filepath_list_1 = []
data_filepath_list_2 = []
for filename in os.listdir(input_data_folder_path):
if filename.split('.')[-1] != 'h5':
continue
if filename.startswith('2019'):
data_filepath_list_1.append(os.path.join(input_data_folder_path, filename))
elif filename.startswith('2020'):
data_filepath_list_2.append(os.path.join(input_data_folder_path, filename))
else:
print('Error - Unknown data year\t', filename)
exit(-1)
data_filepath_list_1 = sorted(data_filepath_list_1)
data_filepath_list_2 = sorted(data_filepath_list_2)
return data_filepath_list_1, data_filepath_list_2
class Deconv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Deconv3x3Block, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_size, h_size, kernel_size=3, stride=2, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class Conv1x1Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv1x1Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=1, stride=1, padding=0, bias=True))
class Conv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv3x3Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class AvgBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(AvgBlock, self).__init__()
self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class MaxBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(MaxBlock, self).__init__()
self.add_module('pool', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class DownBlock(nn.Module):
def __init__(self,
in_size: int,
h_size: int,
out_size: int,
do_pool: int = True):
super(DownBlock, self).__init__()
self.do_pool = do_pool
in_size_cum = in_size
self.conv_1 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_3 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_2 = Conv1x1Block( in_size=in_size_cum, h_size=out_size)
def forward(self, x):
batch_size = len(x)
if self.do_pool:
x = F.interpolate(x, scale_factor=0.7, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x_list = []
x_list.append(x)
x = self.conv_1(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_3(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_2(x)
return x
def cuda(self, ):
super(DownBlock, self).cuda()
self.conv_1.cuda()
self.conv_3.cuda()
self.conv_2.cuda()
return self
class UpBlock(nn.Module):
def __init__(self,
in_size: int,
in_size_2: int,
h_size: int,
out_size: int,
):
super(UpBlock, self).__init__()
self.deconv = Conv3x3Block( in_size=in_size, h_size=h_size)
self.out_conv = Conv3x3Block( in_size=h_size + in_size_2, h_size=out_size)
def forward(self, x1, x2):
x1 = self.deconv(x1)
x1 = F.interpolate(x1, size=x2.size()[2:4], scale_factor=None, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x = torch.cat([x2, x1], dim=1)
return self.out_conv(x)
def cuda(self, ):
super(UpBlock, self).cuda()
self.deconv.cuda()
self.out_conv.cuda()
return self
class NetA(nn.Module):
def __init__(self,):
super(NetA, self).__init__()
self.block0 = DownBlock(in_size=NUM_INPUT_CHANNEL, h_size=128, out_size=128, do_pool=False)
self.block1 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block2 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block3 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block4 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block5 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block6 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block7 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block20 = Conv3x3Block(in_size=128, h_size=128)
self.block16 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block15 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block14 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block13 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block12 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block11 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.block10 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.out_conv = nn.Sequential(nn.Conv2d(128*1, NUM_OUTPUT_CHANNEL, kernel_size=3, stride=1, padding=1, bias=True))
if 1:
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = len(x)
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
x6 = self.block6(x5)
x7 = self.block7(x6)
x = self.block20(x7)
x = self.block16(x, x6)
x = self.block15(x, x5)
x = self.block14(x, x4)
x = self.block13(x, x3)
x = self.block12(x, x2)
x = self.block11(x, x1)
x = self.block10(x, x0)
x = self.out_conv(x)
x = torch.sigmoid(x)
return x
def cuda(self, ):
super(NetA, self).cuda()
self.block0.cuda()
self.block1.cuda()
self.block2.cuda()
self.block3.cuda()
self.block4.cuda()
self.block5.cuda()
self.block6.cuda()
self.block7.cuda()
self.block20.cuda()
self.block16.cuda()
self.block15.cuda()
self.block14.cuda()
self.block13.cuda()
self.block12.cuda()
self.block11.cuda()
self.block10.cuda()
self.out_conv.cuda()
return self
if __name__ == '__main__':
if initial_checkpoint == None:
assert global_step_start == 0
else:
assert global_step_start > 0
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
try:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
except Exception:
print('out_dir not made')
net = NetA().cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=LEARNING_RATE)
loss_func2 = nn.MSELoss()
if initial_checkpoint is not None:
print('Loading ', initial_checkpoint)
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict, strict=True)
optimizer_state_dict_ = torch.load(initial_checkpoint_optimizer, map_location=lambda storage, loc: storage)
optimizer_state_dict = optimizer_state_dict_['optimizer']
optimizer.load_state_dict(optimizer_state_dict)
static_data = None
if 1:
file_path = input_static_data_path
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data = data[np.newaxis,:,:,:]
static_data = static_data.astype(np.float32)
static_data = static_data / 255.0
static_data_list = []
if 1:
for other_city in other_city_list:
file_path = '../../0_data/' + other_city + '/' + other_city + "_static.h5"
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data_ = data[np.newaxis,:,:,:]
static_data_ = static_data_.astype(np.float32)
static_data_ = static_data_ / 255.0
static_data_list.append(static_data_)
train_static_data_index_list = []
train_data_filepath_list, val_data_filepath_list = get_data_filepath_list_by_year(input_train_data_folder_path)
target_city_i = other_city_list.index(target_city)
for _ in range(len(train_data_filepath_list)):
train_static_data_index_list.append(target_city_i)
for o, other_city in enumerate(other_city_list):
if o == target_city_i:
continue
train_data_filepath_list_one, _ = get_data_filepath_list_by_year('../../0_data/' + other_city + '/' + 'training')
for _ in range(len(train_data_filepath_list_one)):
train_static_data_index_list.append(o)
train_data_filepath_list += train_data_filepath_list_one
train_set = []
for i in range(len(train_data_filepath_list)):
for j in range(num_sequence_per_day):
train_set.append( (i,j) )
num_iteration_per_epoch = int(len(train_set) / batch_size)
print('num_iteration_per_epoch:', num_iteration_per_epoch)
assert num_iteration_per_epoch > 10
val_set = []
val_skip_k = 0
val_skip_ratio = 5
for i in range(len(val_data_filepath_list)):
for j in range(0, num_sequence_per_day, num_frame_sequence):
val_skip_k += 1
if val_skip_k % val_skip_ratio == 0:
val_set.append( (i,j) )
num_val_iteration_per_epoch = int(len(val_set) / batch_size_val)
print('num_val_iteration_per_epoch:', num_val_iteration_per_epoch)
train_input_queue = queue.Queue()
train_output_queue = queue.Queue()
def load_train_multithread():
while True:
if train_input_queue.empty() or train_output_queue.qsize() > 8:
time.sleep(0.1)
continue
i_j_list = train_input_queue.get()
train_orig_data_batch_list = []
train_data_batch_list = []
train_data_mask_list = []
train_stat_batch_list = []
train_static_data_batch_list = []
for train_i_j in i_j_list:
(i,j) = train_i_j
file_path = train_data_filepath_list[i]
train_static_data_batch_list.append(static_data_list[train_static_data_index_list[i]])
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
train_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
train_data_batch = np.concatenate(train_data_batch_list, axis=0)
train_static_data_batch = np.concatenate(train_static_data_batch_list,axis=0)
input_data = train_data_batch[:,:num_frame_before ,:,:,:]
orig_label = train_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:] ), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
flip_dr = np.random.randint(0,2)
if flip_dr == 1:
input_data_flipped = copy.deepcopy(input_data)
input_data_flipped[:,:,:,:,4:8] = input_data[:,:,:,:,0:4]
input_data_flipped[:,:,:,:,0:4] = input_data[:,:,:,:,4:8]
input_data = input_data_flipped[:,:,::-1,::-1,:]
true_label_flipped = copy.deepcopy(true_label)
true_label_flipped[:,:,:,:,4:8] = true_label[:,:,:,:,0:4]
true_label_flipped[:,:,:,:,0:4] = true_label[:,:,:,:,4:8]
true_label = true_label_flipped[:,:,::-1,::-1,:]
train_static_data_batch_flipped = copy.deepcopy(train_static_data_batch)
train_static_data_batch_flipped[:,5:9,:,:] = train_static_data_batch[:,1:5,:,:]
train_static_data_batch_flipped[:,1:5,:,:] = train_static_data_batch[:,5:9,:,:]
train_static_data_batch = train_static_data_batch_flipped[:,:,::-1,::-1]
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size, -1, height, width))
input_data = np.concatenate((input_data, train_static_data_batch), axis=1)
train_output_queue.put( (input_data, true_label) )
thread_list = []
assert num_thread > 0
for i in range(num_thread):
t = threading.Thread(target=load_train_multithread)
t.start()
net.train()
sum_train_loss = 0.0
sum_train_iter = 0
global_step = global_step_start
for epoch in range(num_epoch_to_train):
np.random.shuffle(train_set)
for a in range(num_iteration_per_epoch):
i_j_list = []
for train_i_j in train_set[a * batch_size : (a+1) * batch_size]:
i_j_list.append(train_i_j)
train_input_queue.put(i_j_list)
for a in range(num_iteration_per_epoch):
if global_step % save_per_iteration == 0:
net.eval()
state_dict_0 = copy.deepcopy(net.state_dict())
torch.save(state_dict_0, out_dir + '/%09d_model.pth' % (global_step))
torch.save(
{
'optimizer': optimizer.state_dict(),
'global_step': global_step,
'epoch': epoch,
},
out_dir + '/%09d_optimizer.pth' % (global_step))
eval_loss_list = list()
eval_loss_list = [0]
with torch.no_grad():
for a in range(num_val_iteration_per_epoch):
val_orig_data_batch_list = []
val_data_batch_list = []
val_data_mask_list = []
val_stat_batch_list = []
for i_j in val_set[a * batch_size_val : (a+1) * batch_size_val]:
(i,j) = i_j
file_path = val_data_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
val_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
val_data_batch = np.concatenate(val_data_batch_list, axis=0)
input_data = val_data_batch[:,:num_frame_before ,:,:,:]
orig_label = val_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:]), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size_val, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size_val, -1, height, width))
input_data = np.concatenate((input_data,np.repeat(static_data, batch_size_val, axis=0)), axis=1)
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
eval_loss_list.append(loss.item())
avg_train_loss = sum_train_loss / (float(sum_train_iter)+EPS)
sum_train_loss = 0.0
sum_train_iter = 0
print('global_step:', global_step, '\t', 'epoch:', epoch, \
'\t', 'train_loss:', avg_train_loss, \
'\t', 'eval_loss:', np.mean(eval_loss_list), \
'\t', datetime.now(), )
debug_out = open('res.txt', 'a')
debug_out.write(str(global_step))
debug_out.write('\t')
debug_out.write('%.8f' % float(avg_train_loss))
debug_out.write('\t')
debug_out.write('%.8f' % float(np.mean(eval_loss_list)))
debug_out.write('\n')
debug_out.close()
net.train()
while train_output_queue.empty():
time.sleep(0.1)
(input_data, true_label) = train_output_queue.get()
optimizer.zero_grad()
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
sum_train_iter += 1
sum_train_loss += loss.item()
loss.backward()
optimizer.step()
global_step += 1
| true | true |
f72a7bae230cca8397b711afd302b87ab9ee0214 | 995 | py | Python | techminer2/co_citation_network_degree_plot.py | jdvelasq/techminer-api | d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da | [
"MIT"
] | null | null | null | techminer2/co_citation_network_degree_plot.py | jdvelasq/techminer-api | d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da | [
"MIT"
] | null | null | null | techminer2/co_citation_network_degree_plot.py | jdvelasq/techminer-api | d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da | [
"MIT"
] | null | null | null | """
Co-citation Network / Degree Plot
===============================================================================
>>> from techminer2 import *
>>> directory = "/workspaces/techminer2/data/"
>>> file_name = "/workspaces/techminer2/sphinx/images/co_citation_network_degree_plot.png"
>>> co_citation_network_degree_plot(directory=directory).savefig(file_name)
.. image:: images/co_citation_network_degree_plot.png
:width: 700px
:align: center
"""
from .co_citation_matrix import co_citation_matrix
from .network import network
from .network_degree_plot import network_degree_plot
def co_citation_network_degree_plot(
top_n=50,
clustering_method="louvain",
figsize=(8, 8),
directory="./",
):
matrix = co_citation_matrix(
top_n=top_n,
directory=directory,
)
network_ = network(
matrix=matrix,
clustering_method=clustering_method,
)
return network_degree_plot(
network_,
figsize=figsize,
)
| 22.613636 | 90 | 0.650251 |
from .co_citation_matrix import co_citation_matrix
from .network import network
from .network_degree_plot import network_degree_plot
def co_citation_network_degree_plot(
top_n=50,
clustering_method="louvain",
figsize=(8, 8),
directory="./",
):
matrix = co_citation_matrix(
top_n=top_n,
directory=directory,
)
network_ = network(
matrix=matrix,
clustering_method=clustering_method,
)
return network_degree_plot(
network_,
figsize=figsize,
)
| true | true |
f72a7ebb2f409e0799fadb3a1f5eb1b4ad602fb0 | 7,089 | py | Python | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/query_compare_result_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/query_compare_result_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/query_compare_result_req.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class QueryCompareResultReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str',
'object_level_compare_id': 'str',
'line_compare_id': 'str',
'content_compare_id': 'str',
'current_page': 'int',
'per_page': 'int'
}
attribute_map = {
'job_id': 'job_id',
'object_level_compare_id': 'object_level_compare_id',
'line_compare_id': 'line_compare_id',
'content_compare_id': 'content_compare_id',
'current_page': 'current_page',
'per_page': 'per_page'
}
def __init__(self, job_id=None, object_level_compare_id=None, line_compare_id=None, content_compare_id=None, current_page=None, per_page=None):
"""QueryCompareResultReq - a model defined in huaweicloud sdk"""
self._job_id = None
self._object_level_compare_id = None
self._line_compare_id = None
self._content_compare_id = None
self._current_page = None
self._per_page = None
self.discriminator = None
self.job_id = job_id
if object_level_compare_id is not None:
self.object_level_compare_id = object_level_compare_id
if line_compare_id is not None:
self.line_compare_id = line_compare_id
if content_compare_id is not None:
self.content_compare_id = content_compare_id
self.current_page = current_page
self.per_page = per_page
@property
def job_id(self):
"""Gets the job_id of this QueryCompareResultReq.
任务id。
:return: The job_id of this QueryCompareResultReq.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this QueryCompareResultReq.
任务id。
:param job_id: The job_id of this QueryCompareResultReq.
:type: str
"""
self._job_id = job_id
@property
def object_level_compare_id(self):
"""Gets the object_level_compare_id of this QueryCompareResultReq.
请求查询结果的对象级对比任务id。
:return: The object_level_compare_id of this QueryCompareResultReq.
:rtype: str
"""
return self._object_level_compare_id
@object_level_compare_id.setter
def object_level_compare_id(self, object_level_compare_id):
"""Sets the object_level_compare_id of this QueryCompareResultReq.
请求查询结果的对象级对比任务id。
:param object_level_compare_id: The object_level_compare_id of this QueryCompareResultReq.
:type: str
"""
self._object_level_compare_id = object_level_compare_id
@property
def line_compare_id(self):
"""Gets the line_compare_id of this QueryCompareResultReq.
请求查询结果的行对比任务id。
:return: The line_compare_id of this QueryCompareResultReq.
:rtype: str
"""
return self._line_compare_id
@line_compare_id.setter
def line_compare_id(self, line_compare_id):
"""Sets the line_compare_id of this QueryCompareResultReq.
请求查询结果的行对比任务id。
:param line_compare_id: The line_compare_id of this QueryCompareResultReq.
:type: str
"""
self._line_compare_id = line_compare_id
@property
def content_compare_id(self):
"""Gets the content_compare_id of this QueryCompareResultReq.
请求查询结果的内容对比任务id。
:return: The content_compare_id of this QueryCompareResultReq.
:rtype: str
"""
return self._content_compare_id
@content_compare_id.setter
def content_compare_id(self, content_compare_id):
"""Sets the content_compare_id of this QueryCompareResultReq.
请求查询结果的内容对比任务id。
:param content_compare_id: The content_compare_id of this QueryCompareResultReq.
:type: str
"""
self._content_compare_id = content_compare_id
@property
def current_page(self):
"""Gets the current_page of this QueryCompareResultReq.
分页查询的当前页码,对查询对比任务的结果生效。
:return: The current_page of this QueryCompareResultReq.
:rtype: int
"""
return self._current_page
@current_page.setter
def current_page(self, current_page):
"""Sets the current_page of this QueryCompareResultReq.
分页查询的当前页码,对查询对比任务的结果生效。
:param current_page: The current_page of this QueryCompareResultReq.
:type: int
"""
self._current_page = current_page
@property
def per_page(self):
"""Gets the per_page of this QueryCompareResultReq.
分页查询的每页个数,对查询对比任务的结果生效。
:return: The per_page of this QueryCompareResultReq.
:rtype: int
"""
return self._per_page
@per_page.setter
def per_page(self, per_page):
"""Sets the per_page of this QueryCompareResultReq.
分页查询的每页个数,对查询对比任务的结果生效。
:param per_page: The per_page of this QueryCompareResultReq.
:type: int
"""
self._per_page = per_page
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueryCompareResultReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.700405 | 147 | 0.616871 |
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class QueryCompareResultReq:
sensitive_list = []
openapi_types = {
'job_id': 'str',
'object_level_compare_id': 'str',
'line_compare_id': 'str',
'content_compare_id': 'str',
'current_page': 'int',
'per_page': 'int'
}
attribute_map = {
'job_id': 'job_id',
'object_level_compare_id': 'object_level_compare_id',
'line_compare_id': 'line_compare_id',
'content_compare_id': 'content_compare_id',
'current_page': 'current_page',
'per_page': 'per_page'
}
def __init__(self, job_id=None, object_level_compare_id=None, line_compare_id=None, content_compare_id=None, current_page=None, per_page=None):
self._job_id = None
self._object_level_compare_id = None
self._line_compare_id = None
self._content_compare_id = None
self._current_page = None
self._per_page = None
self.discriminator = None
self.job_id = job_id
if object_level_compare_id is not None:
self.object_level_compare_id = object_level_compare_id
if line_compare_id is not None:
self.line_compare_id = line_compare_id
if content_compare_id is not None:
self.content_compare_id = content_compare_id
self.current_page = current_page
self.per_page = per_page
@property
def job_id(self):
return self._job_id
@job_id.setter
def job_id(self, job_id):
self._job_id = job_id
@property
def object_level_compare_id(self):
return self._object_level_compare_id
@object_level_compare_id.setter
def object_level_compare_id(self, object_level_compare_id):
self._object_level_compare_id = object_level_compare_id
@property
def line_compare_id(self):
return self._line_compare_id
@line_compare_id.setter
def line_compare_id(self, line_compare_id):
self._line_compare_id = line_compare_id
@property
def content_compare_id(self):
return self._content_compare_id
@content_compare_id.setter
def content_compare_id(self, content_compare_id):
self._content_compare_id = content_compare_id
@property
def current_page(self):
return self._current_page
@current_page.setter
def current_page(self, current_page):
self._current_page = current_page
@property
def per_page(self):
return self._per_page
@per_page.setter
def per_page(self, per_page):
self._per_page = per_page
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, QueryCompareResultReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72a7fa08a350d2bd0ecab68ced86f7e2f4dcc7d | 1,205 | py | Python | ch05/solar_ce/polysol.py | jabrena/space-math | 313d66f48ef642d7ce4d2c9765d55bc4bdade17b | [
"Apache-2.0"
] | 1 | 2021-07-26T18:40:15.000Z | 2021-07-26T18:40:15.000Z | ch05/solar_ce/polysol.py | jabrena/space-math | 313d66f48ef642d7ce4d2c9765d55bc4bdade17b | [
"Apache-2.0"
] | 24 | 2021-07-10T15:31:05.000Z | 2022-03-17T06:43:36.000Z | ch05/solar_ce/polysol.py | jabrena/space-math | 313d66f48ef642d7ce4d2c9765d55bc4bdade17b | [
"Apache-2.0"
] | null | null | null | #cas
def get_infos():
import ti_graphics, ti_system
fnop = lambda : None
screen_w, screen_h, screen_y0, font_w, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_get_key, poly_draw_string = 320, 210, 30, 10, 15, fnop, fnop, fnop, fnop, ti_system.wait_key, fnop
def poly_fill_rect(x, y, w, h, c):
ti_graphics.setColor(c)
ti_graphics.fillRect(x, y + screen_y0, w, h)
def poly_set_pixel(x, y, c):
ti_graphics.setPixel(x, y + screen_y0, c)
def poly_draw_ellipse(x, y, rx, ry, c):
ti_graphics.setColor(c)
x0, y0 = x - rx, y - ry
for dy in range(1 + (y0 > int(y0))):
for dx in range(1 + (x0 > int(x0))):
ti_graphics.drawArc(x0 + dx, y0 + dy + screen_y0, 2 * rx, 2 * ry, 0, 3600)
def poly_fill_circle(x, y, r, c):
ti_graphics.setColor(c)
ti_graphics.fillCircle(xx, y + screen_y0, r)
def poly_draw_string(s, x, y, cf, cb):
poly_fill_rect(x, y, font_w, font_h, cb)
ti_graphics.setColor(cf)
ti_graphics.drawString(s, x, y + screen_y0)
return screen_w, screen_h, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_draw_string, poly_get_key
| 41.551724 | 222 | 0.66556 |
def get_infos():
import ti_graphics, ti_system
fnop = lambda : None
screen_w, screen_h, screen_y0, font_w, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_get_key, poly_draw_string = 320, 210, 30, 10, 15, fnop, fnop, fnop, fnop, ti_system.wait_key, fnop
def poly_fill_rect(x, y, w, h, c):
ti_graphics.setColor(c)
ti_graphics.fillRect(x, y + screen_y0, w, h)
def poly_set_pixel(x, y, c):
ti_graphics.setPixel(x, y + screen_y0, c)
def poly_draw_ellipse(x, y, rx, ry, c):
ti_graphics.setColor(c)
x0, y0 = x - rx, y - ry
for dy in range(1 + (y0 > int(y0))):
for dx in range(1 + (x0 > int(x0))):
ti_graphics.drawArc(x0 + dx, y0 + dy + screen_y0, 2 * rx, 2 * ry, 0, 3600)
def poly_fill_circle(x, y, r, c):
ti_graphics.setColor(c)
ti_graphics.fillCircle(xx, y + screen_y0, r)
def poly_draw_string(s, x, y, cf, cb):
poly_fill_rect(x, y, font_w, font_h, cb)
ti_graphics.setColor(cf)
ti_graphics.drawString(s, x, y + screen_y0)
return screen_w, screen_h, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_draw_string, poly_get_key
| true | true |
f72a7fd3a97aca945537b27072920ef5265cb56e | 6,666 | py | Python | QUBEKit/GUI/gui.py | cole-group/QUBEK | 50f8a9c06396f2222a6fe058bf764a6bd7021e38 | [
"MIT"
] | 14 | 2018-10-19T12:32:39.000Z | 2022-01-07T05:06:06.000Z | QUBEKit/GUI/gui.py | cole-group/QUBEK | 50f8a9c06396f2222a6fe058bf764a6bd7021e38 | [
"MIT"
] | null | null | null | QUBEKit/GUI/gui.py | cole-group/QUBEK | 50f8a9c06396f2222a6fe058bf764a6bd7021e38 | [
"MIT"
] | 6 | 2019-02-26T13:32:58.000Z | 2021-06-01T15:11:27.000Z | from QUBEKit.ligand import Ligand
import os
import sys
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView
import qdarkstyle
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, molecule=None, parent=None):
super(MainWindow, self).__init__(parent)
# get the current directory
self.cwd = os.getcwd()
self.layout = QtWidgets.QVBoxLayout()
self.tabs = QtWidgets.QTabWidget()
self.ligand_tab = LigandTab(molecule_file=molecule)
self.tabs.addTab(self.ligand_tab, "Ligand")
self.layout.addWidget(self.tabs)
# Central widget
central = QtWidgets.QWidget()
central.setLayout(self.layout)
self.setCentralWidget(central)
class LigandTab(QtWidgets.QWidget):
def __init__(self, molecule_file=None, parent=None):
super(LigandTab, self).__init__(parent)
# Try to load the molecule if we have been passed a name
if molecule_file is not None:
self.molecule = Ligand(molecule_file)
else:
self.molecule = None
# Set the main layout
self.layout = QtWidgets.QVBoxLayout()
# Add the main label
self.main_label = QtWidgets.QLabel("QUBEKit Ligand setup")
self.main_label.setFont(QtGui.QFont("Aerial", 16, QtGui.QFont.Bold))
if self.molecule is None:
self.ligand_name = QtWidgets.QLabel('Load molecule .pdb .mol2 file')
else:
self.ligand_name = QtWidgets.QLabel(f'{self.molecule.name}')
# Add the file loader button
self.file_button = QtWidgets.QPushButton('Load Molecule')
self.file_button.clicked.connect(self.load_molecule)
# Put the label and Button in there own box
top_row = QtWidgets.QHBoxLayout()
top_row.addWidget(self.ligand_name)
top_row.addWidget(self.file_button)
if molecule_file is not None:
self.viewer = Viewer(self.molecule.filename)
else:
self.viewer = Viewer()
# Add representation settings for the ligand
self.representation_label = QtWidgets.QLabel('Representation')
self.representation = QtWidgets.QComboBox()
self.representation.addItems(['Licorice', 'hyperball', 'spheres', 'partialCharge', 'ball+stick', 'spacefill'])
# Add text change logic
self.representation.currentTextChanged.connect(self.change_rep)
# Add own box for layout
repersentation = QtWidgets.QHBoxLayout()
repersentation.addWidget(self.representation_label)
repersentation.addWidget(self.representation)
# Add a surface control box
self.surface_group = QtWidgets.QGroupBox('Surface Controls')
self.surface_group.setCheckable(True)
self.surface_label = QtWidgets.QLabel('Surface file')
self.surface_file = QtWidgets.QPushButton('Load surface file')
self.surface_file.clicked.connect(self.load_surface)
# self.find
# Set the master layout
self.layout.addWidget(self.main_label)
self.layout.addLayout(top_row)
self.layout.addWidget(self.viewer.view)
self.layout.addLayout(repersentation)
self.setLayout(self.layout)
def load_molecule(self):
"""Load the molecule into the gui and make an instance of the Ligand class."""
# Open up the file explorer
filename = self.load_file(['pdb', 'mol2', 'mol', 'sdf'])
if '.pdb' in filename or '.mol2' in filename or 'mol' in filename:
# Instance the QUBEKit class
self.molecule = Ligand(filename)
self.viewer.load_molecule(filename)
self.ligand_name.setText(f'{self.molecule.name}')
def load_surface(self):
"""Check if we have a valid surface file then load it into the viewer."""
filename = self.load_file(['cube'])
def load_file(self, types):
"""Load a file name through pyqt5"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
# Make the file type selector string
file_types = 'All Files (*);;'
for file_type in types:
file_types += f'{str(file_type).upper()} Files (*.{file_type});;'
# Get the file name from the window
filename, _ = QtWidgets.QFileDialog.getOpenFileName(None, "QUBEKit File Selector", "", file_types,
options=options)
return filename
def change_rep(self, representation):
"""Change the representation of the ligand."""
# Pass the representation to the viewer
self.viewer.change_view(representation)
class Viewer:
def __init__(self, molecule_file=None):
self.molecule_file = molecule_file
self.view = QWebEngineView()
self.view.setPage(WebEnginePage(self.view))
self.view.loadFinished.connect(self.on_load_finish)
self.html_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "view.html"))
self.local_url = QUrl.fromLocalFile(self.html_path)
self.view.load(self.local_url)
def on_load_finish(self, ok):
if ok and self.molecule_file is not None:
file = os.path.abspath(self.molecule_file)
self.load_molecule(file)
def ready(self, return_value):
print(return_value)
def change_view(self, representation):
print(representation)
self.view.page().runJavaScript(f'ChangeView("{representation}")', self.ready)
def load_molecule(self, file):
self.view.page().runJavaScript(f'moleculeLoader("{file}")', self.ready)
def main():
sys.argv.append("--disable-web-security")
app = QtWidgets.QApplication(sys.argv)
if '.pdb' in sys.argv[1] or '.mol2' in sys.argv[1]:
molecule_name = sys.argv[1]
gui = MainWindow(molecule=molecule_name)
else:
gui = MainWindow()
gui.setWindowTitle('QUBEKit-gui')
# TODO make sure this is the correct error
try:
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except ReferenceError:
pass
gui.show()
sys.exit(app.exec_())
# The molecule file is the last sys argument
# Now need to pass it into the web java script
class WebEnginePage(QWebEnginePage):
"""Class to overwirte the java script console log so it prints to terminal for debugging"""
def javaScriptConsoleMessage(self, level, message, lineNumber, sourceID):
print("javaScriptConsoleMessage: ", level, message, lineNumber, sourceID)
| 34.360825 | 118 | 0.659466 | from QUBEKit.ligand import Ligand
import os
import sys
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView
import qdarkstyle
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, molecule=None, parent=None):
super(MainWindow, self).__init__(parent)
self.cwd = os.getcwd()
self.layout = QtWidgets.QVBoxLayout()
self.tabs = QtWidgets.QTabWidget()
self.ligand_tab = LigandTab(molecule_file=molecule)
self.tabs.addTab(self.ligand_tab, "Ligand")
self.layout.addWidget(self.tabs)
central = QtWidgets.QWidget()
central.setLayout(self.layout)
self.setCentralWidget(central)
class LigandTab(QtWidgets.QWidget):
def __init__(self, molecule_file=None, parent=None):
super(LigandTab, self).__init__(parent)
if molecule_file is not None:
self.molecule = Ligand(molecule_file)
else:
self.molecule = None
self.layout = QtWidgets.QVBoxLayout()
self.main_label = QtWidgets.QLabel("QUBEKit Ligand setup")
self.main_label.setFont(QtGui.QFont("Aerial", 16, QtGui.QFont.Bold))
if self.molecule is None:
self.ligand_name = QtWidgets.QLabel('Load molecule .pdb .mol2 file')
else:
self.ligand_name = QtWidgets.QLabel(f'{self.molecule.name}')
self.file_button = QtWidgets.QPushButton('Load Molecule')
self.file_button.clicked.connect(self.load_molecule)
top_row = QtWidgets.QHBoxLayout()
top_row.addWidget(self.ligand_name)
top_row.addWidget(self.file_button)
if molecule_file is not None:
self.viewer = Viewer(self.molecule.filename)
else:
self.viewer = Viewer()
self.representation_label = QtWidgets.QLabel('Representation')
self.representation = QtWidgets.QComboBox()
self.representation.addItems(['Licorice', 'hyperball', 'spheres', 'partialCharge', 'ball+stick', 'spacefill'])
self.representation.currentTextChanged.connect(self.change_rep)
repersentation = QtWidgets.QHBoxLayout()
repersentation.addWidget(self.representation_label)
repersentation.addWidget(self.representation)
self.surface_group = QtWidgets.QGroupBox('Surface Controls')
self.surface_group.setCheckable(True)
self.surface_label = QtWidgets.QLabel('Surface file')
self.surface_file = QtWidgets.QPushButton('Load surface file')
self.surface_file.clicked.connect(self.load_surface)
self.layout.addWidget(self.main_label)
self.layout.addLayout(top_row)
self.layout.addWidget(self.viewer.view)
self.layout.addLayout(repersentation)
self.setLayout(self.layout)
def load_molecule(self):
filename = self.load_file(['pdb', 'mol2', 'mol', 'sdf'])
if '.pdb' in filename or '.mol2' in filename or 'mol' in filename:
self.molecule = Ligand(filename)
self.viewer.load_molecule(filename)
self.ligand_name.setText(f'{self.molecule.name}')
def load_surface(self):
filename = self.load_file(['cube'])
def load_file(self, types):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
file_types = 'All Files (*);;'
for file_type in types:
file_types += f'{str(file_type).upper()} Files (*.{file_type});;'
filename, _ = QtWidgets.QFileDialog.getOpenFileName(None, "QUBEKit File Selector", "", file_types,
options=options)
return filename
def change_rep(self, representation):
self.viewer.change_view(representation)
class Viewer:
def __init__(self, molecule_file=None):
self.molecule_file = molecule_file
self.view = QWebEngineView()
self.view.setPage(WebEnginePage(self.view))
self.view.loadFinished.connect(self.on_load_finish)
self.html_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "view.html"))
self.local_url = QUrl.fromLocalFile(self.html_path)
self.view.load(self.local_url)
def on_load_finish(self, ok):
if ok and self.molecule_file is not None:
file = os.path.abspath(self.molecule_file)
self.load_molecule(file)
def ready(self, return_value):
print(return_value)
def change_view(self, representation):
print(representation)
self.view.page().runJavaScript(f'ChangeView("{representation}")', self.ready)
def load_molecule(self, file):
self.view.page().runJavaScript(f'moleculeLoader("{file}")', self.ready)
def main():
sys.argv.append("--disable-web-security")
app = QtWidgets.QApplication(sys.argv)
if '.pdb' in sys.argv[1] or '.mol2' in sys.argv[1]:
molecule_name = sys.argv[1]
gui = MainWindow(molecule=molecule_name)
else:
gui = MainWindow()
gui.setWindowTitle('QUBEKit-gui')
try:
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except ReferenceError:
pass
gui.show()
sys.exit(app.exec_())
class WebEnginePage(QWebEnginePage):
def javaScriptConsoleMessage(self, level, message, lineNumber, sourceID):
print("javaScriptConsoleMessage: ", level, message, lineNumber, sourceID)
| true | true |
f72a8051d6d4870f84f6443fa5863030117e9922 | 12,663 | py | Python | rmrb/rmrb_play_ctrl/core/dns/renderer.py | fjfhccfkuk/h_s_x_r_m_r_b_python | 46fe249b1b71f1245296c8b2dbd6e7c29dadade4 | [
"Unlicense"
] | null | null | null | rmrb/rmrb_play_ctrl/core/dns/renderer.py | fjfhccfkuk/h_s_x_r_m_r_b_python | 46fe249b1b71f1245296c8b2dbd6e7c29dadade4 | [
"Unlicense"
] | null | null | null | rmrb/rmrb_play_ctrl/core/dns/renderer.py | fjfhccfkuk/h_s_x_r_m_r_b_python | 46fe249b1b71f1245296c8b2dbd6e7c29dadade4 | [
"Unlicense"
] | null | null | null | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Help for building DNS wire format messages"""
from io import BytesIO
import struct
import random
import time
import rmrb_play_ctrl.core.dns.exception
import rmrb_play_ctrl.core.dns.tsig
from ._compat import long
QUESTION = 0
ANSWER = 1
AUTHORITY = 2
ADDITIONAL = 3
class Renderer(object):
"""Helper class for building DNS wire-format messages.
Most applications can use the higher-level L{rmrb_play_ctrl.core.dns.message.Message}
class and its to_wire() method to generate wire-format messages.
This class is for those applications which need finer control
over the generation of messages.
Typical use::
r = rmrb_play_ctrl.core.dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
r.add_question(qname, qtype, qclass)
r.add_rrset(rmrb_play_ctrl.core.dns.renderer.ANSWER, rrset_1)
r.add_rrset(rmrb_play_ctrl.core.dns.renderer.ANSWER, rrset_2)
r.add_rrset(rmrb_play_ctrl.core.dns.renderer.AUTHORITY, ns_rrset)
r.add_edns(0, 0, 4096)
r.add_rrset(rmrb_play_ctrl.core.dns.renderer.ADDTIONAL, ad_rrset_1)
r.add_rrset(rmrb_play_ctrl.core.dns.renderer.ADDTIONAL, ad_rrset_2)
r.write_header()
r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
wire = r.get_wire()
@ivar output: where rendering is written
@type output: BytesIO object
@ivar id: the message id
@type id: int
@ivar flags: the message flags
@type flags: int
@ivar max_size: the maximum size of the message
@type max_size: int
@ivar origin: the origin to use when rendering relative names
@type origin: rmrb_play_ctrl.core.dns.name.Name object
@ivar compress: the compression table
@type compress: dict
@ivar section: the section currently being rendered
@type section: int (rmrb_play_ctrl.core.dns.renderer.QUESTION, rmrb_play_ctrl.core.dns.renderer.ANSWER,
rmrb_play_ctrl.core.dns.renderer.AUTHORITY, or rmrb_play_ctrl.core.dns.renderer.ADDITIONAL)
@ivar counts: list of the number of RRs in each section
@type counts: int list of length 4
@ivar mac: the MAC of the rendered message (if TSIG was used)
@type mac: string
"""
def __init__(self, id=None, flags=0, max_size=65535, origin=None):
"""Initialize a new renderer.
@param id: the message id
@type id: int
@param flags: the DNS message flags
@type flags: int
@param max_size: the maximum message size; the default is 65535.
If rendering results in a message greater than I{max_size},
then L{rmrb_play_ctrl.core.dns.exception.TooBig} will be raised.
@type max_size: int
@param origin: the origin to use when rendering relative names
@type origin: rmrb_play_ctrl.core.dns.name.Name or None.
"""
self.output = BytesIO()
if id is None:
self.id = random.randint(0, 65535)
else:
self.id = id
self.flags = flags
self.max_size = max_size
self.origin = origin
self.compress = {}
self.section = QUESTION
self.counts = [0, 0, 0, 0]
self.output.write(b'\x00' * 12)
self.mac = ''
def _rollback(self, where):
"""Truncate the output buffer at offset I{where}, and remove any
compression table entries that pointed beyond the truncation
point.
@param where: the offset
@type where: int
"""
self.output.seek(where)
self.output.truncate()
keys_to_delete = []
for k, v in self.compress.items():
if v >= where:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.compress[k]
def _set_section(self, section):
"""Set the renderer's current section.
Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
ADDITIONAL. Sections may be empty.
@param section: the section
@type section: int
@raises rmrb_play_ctrl.core.dns.exception.FormError: an attempt was made to set
a section value less than the current section.
"""
if self.section != section:
if self.section > section:
raise rmrb_play_ctrl.core.dns.exception.FormError
self.section = section
def add_question(self, qname, rdtype, rdclass=rmrb_play_ctrl.core.dns.rdataclass.IN):
"""Add a question to the message.
@param qname: the question name
@type qname: rmrb_play_ctrl.core.dns.name.Name
@param rdtype: the question rdata type
@type rdtype: int
@param rdclass: the question rdata class
@type rdclass: int
"""
self._set_section(QUESTION)
before = self.output.tell()
qname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack("!HH", rdtype, rdclass))
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[QUESTION] += 1
def add_rrset(self, section, rrset, **kw):
"""Add the rrset to the specified section.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param rrset: the rrset
@type rrset: rmrb_play_ctrl.core.dns.rrset.RRset object
"""
self._set_section(section)
before = self.output.tell()
n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[section] += n
def add_rdataset(self, section, name, rdataset, **kw):
"""Add the rdataset to the specified section, using the specified
name as the owner name.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param name: the owner name
@type name: rmrb_play_ctrl.core.dns.name.Name object
@param rdataset: the rdataset
@type rdataset: rmrb_play_ctrl.core.dns.rdataset.Rdataset object
"""
self._set_section(section)
before = self.output.tell()
n = rdataset.to_wire(name, self.output, self.compress, self.origin,
**kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[section] += n
def add_edns(self, edns, ednsflags, payload, options=None):
"""Add an EDNS OPT record to the message.
@param edns: The EDNS level to use.
@type edns: int
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param options: The EDNS options list
@type options: list of rmrb_play_ctrl.core.dns.ermrb_play_ctrl.core.dns.Option instances
@see: RFC 2671
"""
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= long(0xFF00FFFF)
ednsflags |= (edns << 16)
self._set_section(ADDITIONAL)
before = self.output.tell()
self.output.write(struct.pack('!BHHIH', 0, rmrb_play_ctrl.core.dns.rdatatype.OPT, payload,
ednsflags, 0))
if options is not None:
lstart = self.output.tell()
for opt in options:
stuff = struct.pack("!HH", opt.otype, 0)
self.output.write(stuff)
start = self.output.tell()
opt.to_wire(self.output)
end = self.output.tell()
assert end - start < 65536
self.output.seek(start - 2)
stuff = struct.pack("!H", end - start)
self.output.write(stuff)
self.output.seek(0, 2)
lend = self.output.tell()
assert lend - lstart < 65536
self.output.seek(lstart - 2)
stuff = struct.pack("!H", lend - lstart)
self.output.write(stuff)
self.output.seek(0, 2)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[ADDITIONAL] += 1
def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
request_mac, algorithm=rmrb_play_ctrl.core.dns.tsig.default_algorithm):
"""Add a TSIG signature to the message.
@param keyname: the TSIG key name
@type keyname: rmrb_play_ctrl.core.dns.name.Name object
@param secret: the secret to use
@type secret: string
@param fudge: TSIG time fudge
@type fudge: int
@param id: the message id to encode in the tsig signature
@type id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param request_mac: This message is a response to the request which
had the specified MAC.
@type request_mac: string
@param algorithm: the TSIG algorithm to use
@type algorithm: rmrb_play_ctrl.core.dns.name.Name object
"""
self._set_section(ADDITIONAL)
before = self.output.tell()
s = self.output.getvalue()
(tsig_rdata, self.mac, ctx) = rmrb_play_ctrl.core.dns.tsig.sign(s,
keyname,
secret,
int(time.time()),
fudge,
id,
tsig_error,
other_data,
request_mac,
algorithm=algorithm)
keyname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack('!HHIH', rmrb_play_ctrl.core.dns.rdatatype.TSIG,
rmrb_play_ctrl.core.dns.rdataclass.ANY, 0, 0))
rdata_start = self.output.tell()
self.output.write(tsig_rdata)
after = self.output.tell()
assert after - rdata_start < 65536
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.output.seek(rdata_start - 2)
self.output.write(struct.pack('!H', after - rdata_start))
self.counts[ADDITIONAL] += 1
self.output.seek(10)
self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
self.output.seek(0, 2)
def write_header(self):
"""Write the DNS message header.
Writing the DNS message header is done after all sections
have been rendered, but before the optional TSIG signature
is added.
"""
self.output.seek(0)
self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
self.counts[0], self.counts[1],
self.counts[2], self.counts[3]))
self.output.seek(0, 2)
def get_wire(self):
"""Return the wire format message.
@rtype: string
"""
return self.output.getvalue()
| 38.372727 | 107 | 0.60657 |
from io import BytesIO
import struct
import random
import time
import rmrb_play_ctrl.core.dns.exception
import rmrb_play_ctrl.core.dns.tsig
from ._compat import long
QUESTION = 0
ANSWER = 1
AUTHORITY = 2
ADDITIONAL = 3
class Renderer(object):
def __init__(self, id=None, flags=0, max_size=65535, origin=None):
self.output = BytesIO()
if id is None:
self.id = random.randint(0, 65535)
else:
self.id = id
self.flags = flags
self.max_size = max_size
self.origin = origin
self.compress = {}
self.section = QUESTION
self.counts = [0, 0, 0, 0]
self.output.write(b'\x00' * 12)
self.mac = ''
def _rollback(self, where):
self.output.seek(where)
self.output.truncate()
keys_to_delete = []
for k, v in self.compress.items():
if v >= where:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.compress[k]
def _set_section(self, section):
if self.section != section:
if self.section > section:
raise rmrb_play_ctrl.core.dns.exception.FormError
self.section = section
def add_question(self, qname, rdtype, rdclass=rmrb_play_ctrl.core.dns.rdataclass.IN):
self._set_section(QUESTION)
before = self.output.tell()
qname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack("!HH", rdtype, rdclass))
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[QUESTION] += 1
def add_rrset(self, section, rrset, **kw):
self._set_section(section)
before = self.output.tell()
n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[section] += n
def add_rdataset(self, section, name, rdataset, **kw):
self._set_section(section)
before = self.output.tell()
n = rdataset.to_wire(name, self.output, self.compress, self.origin,
**kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[section] += n
def add_edns(self, edns, ednsflags, payload, options=None):
ednsflags &= long(0xFF00FFFF)
ednsflags |= (edns << 16)
self._set_section(ADDITIONAL)
before = self.output.tell()
self.output.write(struct.pack('!BHHIH', 0, rmrb_play_ctrl.core.dns.rdatatype.OPT, payload,
ednsflags, 0))
if options is not None:
lstart = self.output.tell()
for opt in options:
stuff = struct.pack("!HH", opt.otype, 0)
self.output.write(stuff)
start = self.output.tell()
opt.to_wire(self.output)
end = self.output.tell()
assert end - start < 65536
self.output.seek(start - 2)
stuff = struct.pack("!H", end - start)
self.output.write(stuff)
self.output.seek(0, 2)
lend = self.output.tell()
assert lend - lstart < 65536
self.output.seek(lstart - 2)
stuff = struct.pack("!H", lend - lstart)
self.output.write(stuff)
self.output.seek(0, 2)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.counts[ADDITIONAL] += 1
def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
request_mac, algorithm=rmrb_play_ctrl.core.dns.tsig.default_algorithm):
self._set_section(ADDITIONAL)
before = self.output.tell()
s = self.output.getvalue()
(tsig_rdata, self.mac, ctx) = rmrb_play_ctrl.core.dns.tsig.sign(s,
keyname,
secret,
int(time.time()),
fudge,
id,
tsig_error,
other_data,
request_mac,
algorithm=algorithm)
keyname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack('!HHIH', rmrb_play_ctrl.core.dns.rdatatype.TSIG,
rmrb_play_ctrl.core.dns.rdataclass.ANY, 0, 0))
rdata_start = self.output.tell()
self.output.write(tsig_rdata)
after = self.output.tell()
assert after - rdata_start < 65536
if after >= self.max_size:
self._rollback(before)
raise rmrb_play_ctrl.core.dns.exception.TooBig
self.output.seek(rdata_start - 2)
self.output.write(struct.pack('!H', after - rdata_start))
self.counts[ADDITIONAL] += 1
self.output.seek(10)
self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
self.output.seek(0, 2)
def write_header(self):
self.output.seek(0)
self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
self.counts[0], self.counts[1],
self.counts[2], self.counts[3]))
self.output.seek(0, 2)
def get_wire(self):
return self.output.getvalue()
| true | true |
f72a80ec4e6d319a9bd40ffec2bbee1e64fbb6c7 | 16,207 | py | Python | espei/datasets.py | jwsiegel2510/ESPEI | cb72f676138c96d560d8b83cea6b7ca2da100078 | [
"MIT"
] | null | null | null | espei/datasets.py | jwsiegel2510/ESPEI | cb72f676138c96d560d8b83cea6b7ca2da100078 | [
"MIT"
] | null | null | null | espei/datasets.py | jwsiegel2510/ESPEI | cb72f676138c96d560d8b83cea6b7ca2da100078 | [
"MIT"
] | null | null | null | import fnmatch, warnings, json, os
import numpy as np
from six import string_types
from tinydb.storages import MemoryStorage
from tinydb import where
from espei.utils import PickleableTinyDB
from espei.core_utils import recursive_map
class DatasetError(Exception):
"""Exception raised when datasets are invalid."""
pass
def check_dataset(dataset):
"""Ensure that the dataset is valid and consistent.
Currently supports the following validation checks:
* data shape is valid
* phases and components used match phases and components entered
* individual shapes of keys, such as ZPF, sublattice configs and site ratios
Planned validation checks:
* all required keys are present
Note that this follows some of the implicit assumptions in ESPEI at the time
of writing, such that conditions are only P, T, configs for single phase and
essentially only T for ZPF data.
Parameters
----------
dataset : dict
Dictionary of the standard ESPEI dataset.
Returns
-------
None
Raises
------
DatasetError
If an error is found in the dataset
"""
is_activity = dataset['output'].startswith('ACR')
is_zpf = dataset['output'] == 'ZPF'
is_single_phase = (not is_zpf) and (not is_activity)
components = dataset['components']
conditions = dataset['conditions']
values = dataset['values']
phases = dataset['phases']
if is_single_phase:
solver = dataset['solver']
sublattice_configurations = solver['sublattice_configurations']
sublattice_site_ratios = solver['sublattice_site_ratios']
sublattice_occupancies = solver.get('sublattice_occupancies', None)
# check for mixing
is_mixing = any([any([isinstance(subl, list) for subl in config]) for config in sublattice_configurations])
# pad the values of sublattice occupancies if there is no mixing
if sublattice_occupancies is None and not is_mixing:
sublattice_occupancies = [None]*len(sublattice_configurations)
elif sublattice_occupancies is None:
raise DatasetError('At least one sublattice in the following sublattice configurations is mixing, but the "sublattice_occupancies" key is empty: {}'.format(sublattice_configurations))
if is_activity:
conditions = dataset['conditions']
ref_state = dataset['reference_state']
comp_conditions = {k: v for k, v in conditions.items() if k.startswith('X_')}
# check that the shape of conditions match the values
num_pressure = np.atleast_1d(conditions['P']).size
num_temperature = np.atleast_1d(conditions['T']).size
if is_activity:
values_shape = np.array(values).shape
# check each composition condition is the same shape
num_x_conds = [len(v) for _, v in comp_conditions.items()]
if num_x_conds.count(num_x_conds[0]) != len(num_x_conds):
raise DatasetError('All compositions in conditions are not the same shape. Note that conditions cannot be broadcast. Composition conditions are {}'.format(comp_conditions))
conditions_shape = (num_pressure, num_temperature, num_x_conds[0])
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (P, T, compositions): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
elif is_single_phase:
values_shape = np.array(values).shape
num_configs = len(dataset['solver']['sublattice_configurations'])
conditions_shape = (num_pressure, num_temperature, num_configs)
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (P, T, configs): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
elif is_zpf:
values_shape = (len(values))
conditions_shape = (num_temperature)
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (T): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
# check that all of the correct phases are present
if is_zpf:
phases_entered = set(phases)
phases_used = set()
for zpf in values:
for tieline in zpf:
phases_used.add(tieline[0])
if len(phases_entered - phases_used) > 0:
raise DatasetError('Phases entered {} do not match phases used {}.'.format(phases_entered, phases_used))
# check that all of the components used match the components entered
components_entered = set(components)
components_used = set()
if is_single_phase:
for config in sublattice_configurations:
for sl in config:
if isinstance(sl, list):
components_used.update(set(sl))
else:
components_used.add(sl)
comp_dof = 0
elif is_activity:
components_used.update({c.split('_')[1] for c in comp_conditions.keys()})
# mass balance of components
comp_dof = len(comp_conditions.keys())
elif is_zpf:
for zpf in values:
for tieline in zpf:
tieline_comps = set(tieline[1])
components_used.update(tieline_comps)
if len(components_entered - tieline_comps - {'VA'}) != 1:
raise DatasetError('Degree of freedom error for entered components {} in tieline {} of ZPF {}'.format(components_entered, tieline, zpf))
# handle special case of mass balance in ZPFs
comp_dof = 1
if len(components_entered - components_used - {'VA'}) > comp_dof or len(components_used - components_entered) > 0:
raise DatasetError('Components entered {} do not match components used {}.'.format(components_entered, components_used))
# check that the ZPF values are formatted properly
if is_zpf:
for zpf in values:
for tieline in zpf:
phase = tieline[0]
component_list = tieline[1]
mole_fraction_list = tieline[2]
# check that the phase is a string, components a list of strings,
# and the fractions are a list of float
if not isinstance(phase, string_types):
raise DatasetError('The first element in the tieline {} for the ZPF point {} should be a string. Instead it is a {} of value {}'.format(tieline, zpf, type(phase), phase))
if not all([isinstance(comp, string_types) for comp in component_list]):
raise DatasetError('The second element in the tieline {} for the ZPF point {} should be a list of strings. Instead it is a {} of value {}'.format(tieline, zpf, type(component_list), component_list))
if not all([(isinstance(mole_frac, (int, float)) or mole_frac is None) for mole_frac in mole_fraction_list]):
raise DatasetError('The last element in the tieline {} for the ZPF point {} should be a list of numbers. Instead it is a {} of value {}'.format(tieline, zpf, type(mole_fraction_list), mole_fraction_list))
# check that the shape of components list and mole fractions list is the same
if len(component_list) != len(mole_fraction_list):
raise DatasetError('The length of the components list and mole fractions list in tieline {} for the ZPF point {} should be the same.'.format(tieline, zpf))
# check that all mole fractions are less than one
mf_sum = np.nansum(np.array(mole_fraction_list, dtype=np.float))
if any([mf is not None for mf in mole_fraction_list]) and mf_sum > 1.0:
raise DatasetError('Mole fractions for tieline {} for the ZPF point {} sum to greater than one.'.format(tieline, zpf))
# check that the site ratios are valid as well as site occupancies, if applicable
if is_single_phase:
nconfigs = len(sublattice_configurations)
noccupancies = len(sublattice_occupancies)
if nconfigs != noccupancies:
raise DatasetError('Number of sublattice configurations ({}) does not match the number of sublattice occupancies ({})'.format(nconfigs, noccupancies))
for configuration, occupancy in zip(sublattice_configurations, sublattice_occupancies):
if len(configuration) != len(sublattice_site_ratios):
raise DatasetError('Sublattice configuration {} and sublattice site ratio {} describe different numbers of sublattices ({} and {}).'.format(configuration, sublattice_site_ratios, len(configuration), len(sublattice_site_ratios)))
if is_mixing:
configuration_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in configuration)
occupancy_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in occupancy)
if configuration_shape != occupancy_shape:
raise DatasetError('The shape of sublattice configuration {} ({}) does not match the shape of occupancies {} ({})'.format(configuration, configuration_shape, occupancy, occupancy_shape))
# check that sublattice interactions are in sorted. Related to sorting in espei.core_utils.get_samples
for subl in configuration:
if isinstance(subl, (list, tuple)) and sorted(subl) != subl:
raise DatasetError('Sublattice {} in configuration {} is must be sorted in alphabetic order ({})'.format(subl, configuration, sorted(subl)))
def clean_dataset(dataset):
"""
Clean an ESPEI dataset dictionary.
Parameters
----------
dataset : dict
Dictionary of the standard ESPEI dataset. dataset : dic
Returns
-------
dict
Modified dataset that has been cleaned
Notes
-----
Assumes a valid, checked dataset. Currently handles
* Converting expected numeric values to floats
"""
dataset["conditions"] = {k: recursive_map(float, v) for k, v in dataset["conditions"].items()}
solver = dataset.get("solver")
if solver is not None:
solver["sublattice_site_ratios"] = recursive_map(float, solver["sublattice_site_ratios"])
occupancies = solver.get("sublattice_occupancies")
if occupancies is not None:
solver["sublattice_occupancies"] = recursive_map(float, occupancies)
if dataset["output"] == "ZPF":
values = dataset["values"]
new_values = []
for tieline in values:
new_tieline = []
for tieline_point in tieline:
if all([comp is None for comp in tieline_point[2]]):
# this is a null tieline point
new_tieline.append(tieline_point)
else:
new_tieline.append([tieline_point[0], tieline_point[1], recursive_map(float, tieline_point[2])])
new_values.append(new_tieline)
dataset["values"] = new_values
else:
# values should be all numerical
dataset["values"] = recursive_map(float, dataset["values"])
return dataset
def apply_tags(datasets, tags):
"""
Modify datasets using the tags system
Parameters
----------
datasets : PickleableTinyDB
Datasets to modify
tags : dict
Dictionary of {tag: update_dict}
Returns
-------
PickleableTinyDB
Notes
-----
In general, everything replaces or is additive. We use the following update rules:
1. If the update value is a list, extend the existing list (empty list if key does not exist)
2. If the update value is scalar, override the previous (deleting any old value, if present)
3. If the update value is a dict, update the exist dict (empty dict if dict does not exist)
4. Otherwise, the value is updated, overriding the previous
Examples
--------
>>> from espei.utils import PickleableTinyDB
>>> from tinydb.storages import MemoryStorage
>>> ds = PickleableTinyDB(storage=MemoryStorage)
>>> doc_id = ds.insert({'tags': ['dft'], 'excluded_model_contributions': ['contrib']})
>>> my_tags = {'dft': {'excluded_model_contributions': ['idmix', 'mag'], 'weight': 5.0}}
>>> from espei.datasets import apply_tags
>>> apply_tags(ds, my_tags)
>>> all_data = ds.all()
>>> all(d['excluded_model_contributions'] == ['contrib', 'idmix', 'mag'] for d in all_data)
True
>>> all(d['weight'] == 5.0 for d in all_data)
True
"""
for tag, update_dict in tags.items():
matching_datasets = datasets.search(where("tags").test(lambda x: tag in x))
for newkey, newval in update_dict.items():
for match in matching_datasets:
if isinstance(newval, list):
match[newkey] = match.get(newkey, []) + newval
elif np.isscalar(newval):
match[newkey] = newval
elif isinstance(newval, dict):
d = match.get(newkey, dict())
d.update(newval)
match[newkey] = d
else:
match[newkey] = newval
datasets.write_back(matching_datasets)
def add_ideal_exclusions(datasets):
"""
If there are single phase datasets present and none of them have an
`excluded_model_contributions` key, add ideal exclusions automatically and
emit a DeprecationWarning that this feature will be going away.
Parameters
----------
datasets : PickleableTinyDB
Returns
-------
PickleableTinyDB
"""
all_single_phase = datasets.search(where('solver').exists())
no_exclusions = datasets.search(where('solver').exists() & (~where('excluded_model_contributions').exists()))
if len(all_single_phase) > 0 and len(all_single_phase) == len(no_exclusions):
idmix_warning = "Single phase datasets are present, but there are no specified `excluded_model_contributions` keys present. " + \
"'idmix' exclusion will be added automatically for backwards compatibility, but this will go away in ESPEI v0.8. " + \
"If you want ideal mixing contributions to be excluded, see the documentation for building datasets: http://espei.org/en/latest/input_data.html"
warnings.warn(idmix_warning, DeprecationWarning)
print(idmix_warning)
import espei
if int(espei.__version__.split('.')[1]) >= 8 or int(espei.__version__.split('.')[0]) > 0:
raise ValueError("ESPEI developer: remove the automatic addition of ideal mixing exclusions")
for ds in all_single_phase:
ds['excluded_model_contributions'] = ['idmix']
datasets.write_back(all_single_phase)
return datasets
def load_datasets(dataset_filenames):
"""
Create a PickelableTinyDB with the data from a list of filenames.
Parameters
----------
dataset_filenames : [str]
List of filenames to load as datasets
Returns
-------
PickleableTinyDB
"""
ds_database = PickleableTinyDB(storage=MemoryStorage)
for fname in dataset_filenames:
with open(fname) as file_:
try:
d = json.load(file_)
check_dataset(d)
ds_database.insert(clean_dataset(d))
except ValueError as e:
raise ValueError('JSON Error in {}: {}'.format(fname, e))
except DatasetError as e:
raise DatasetError('Dataset Error in {}: {}'.format(fname, e))
return ds_database
def recursive_glob(start, pattern='*.json'):
"""
Recursively glob for the given pattern from the start directory.
Parameters
----------
start : str
Path of the directory to walk while for file globbing
pattern : str
Filename pattern to match in the glob.
Returns
-------
[str]
List of matched filenames
"""
matches = []
for root, dirnames, filenames in os.walk(start):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return sorted(matches)
| 45.144847 | 244 | 0.649596 | import fnmatch, warnings, json, os
import numpy as np
from six import string_types
from tinydb.storages import MemoryStorage
from tinydb import where
from espei.utils import PickleableTinyDB
from espei.core_utils import recursive_map
class DatasetError(Exception):
pass
def check_dataset(dataset):
is_activity = dataset['output'].startswith('ACR')
is_zpf = dataset['output'] == 'ZPF'
is_single_phase = (not is_zpf) and (not is_activity)
components = dataset['components']
conditions = dataset['conditions']
values = dataset['values']
phases = dataset['phases']
if is_single_phase:
solver = dataset['solver']
sublattice_configurations = solver['sublattice_configurations']
sublattice_site_ratios = solver['sublattice_site_ratios']
sublattice_occupancies = solver.get('sublattice_occupancies', None)
is_mixing = any([any([isinstance(subl, list) for subl in config]) for config in sublattice_configurations])
if sublattice_occupancies is None and not is_mixing:
sublattice_occupancies = [None]*len(sublattice_configurations)
elif sublattice_occupancies is None:
raise DatasetError('At least one sublattice in the following sublattice configurations is mixing, but the "sublattice_occupancies" key is empty: {}'.format(sublattice_configurations))
if is_activity:
conditions = dataset['conditions']
ref_state = dataset['reference_state']
comp_conditions = {k: v for k, v in conditions.items() if k.startswith('X_')}
num_pressure = np.atleast_1d(conditions['P']).size
num_temperature = np.atleast_1d(conditions['T']).size
if is_activity:
values_shape = np.array(values).shape
num_x_conds = [len(v) for _, v in comp_conditions.items()]
if num_x_conds.count(num_x_conds[0]) != len(num_x_conds):
raise DatasetError('All compositions in conditions are not the same shape. Note that conditions cannot be broadcast. Composition conditions are {}'.format(comp_conditions))
conditions_shape = (num_pressure, num_temperature, num_x_conds[0])
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (P, T, compositions): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
elif is_single_phase:
values_shape = np.array(values).shape
num_configs = len(dataset['solver']['sublattice_configurations'])
conditions_shape = (num_pressure, num_temperature, num_configs)
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (P, T, configs): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
elif is_zpf:
values_shape = (len(values))
conditions_shape = (num_temperature)
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (T): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
if is_zpf:
phases_entered = set(phases)
phases_used = set()
for zpf in values:
for tieline in zpf:
phases_used.add(tieline[0])
if len(phases_entered - phases_used) > 0:
raise DatasetError('Phases entered {} do not match phases used {}.'.format(phases_entered, phases_used))
components_entered = set(components)
components_used = set()
if is_single_phase:
for config in sublattice_configurations:
for sl in config:
if isinstance(sl, list):
components_used.update(set(sl))
else:
components_used.add(sl)
comp_dof = 0
elif is_activity:
components_used.update({c.split('_')[1] for c in comp_conditions.keys()})
comp_dof = len(comp_conditions.keys())
elif is_zpf:
for zpf in values:
for tieline in zpf:
tieline_comps = set(tieline[1])
components_used.update(tieline_comps)
if len(components_entered - tieline_comps - {'VA'}) != 1:
raise DatasetError('Degree of freedom error for entered components {} in tieline {} of ZPF {}'.format(components_entered, tieline, zpf))
comp_dof = 1
if len(components_entered - components_used - {'VA'}) > comp_dof or len(components_used - components_entered) > 0:
raise DatasetError('Components entered {} do not match components used {}.'.format(components_entered, components_used))
if is_zpf:
for zpf in values:
for tieline in zpf:
phase = tieline[0]
component_list = tieline[1]
mole_fraction_list = tieline[2]
if not isinstance(phase, string_types):
raise DatasetError('The first element in the tieline {} for the ZPF point {} should be a string. Instead it is a {} of value {}'.format(tieline, zpf, type(phase), phase))
if not all([isinstance(comp, string_types) for comp in component_list]):
raise DatasetError('The second element in the tieline {} for the ZPF point {} should be a list of strings. Instead it is a {} of value {}'.format(tieline, zpf, type(component_list), component_list))
if not all([(isinstance(mole_frac, (int, float)) or mole_frac is None) for mole_frac in mole_fraction_list]):
raise DatasetError('The last element in the tieline {} for the ZPF point {} should be a list of numbers. Instead it is a {} of value {}'.format(tieline, zpf, type(mole_fraction_list), mole_fraction_list))
if len(component_list) != len(mole_fraction_list):
raise DatasetError('The length of the components list and mole fractions list in tieline {} for the ZPF point {} should be the same.'.format(tieline, zpf))
mf_sum = np.nansum(np.array(mole_fraction_list, dtype=np.float))
if any([mf is not None for mf in mole_fraction_list]) and mf_sum > 1.0:
raise DatasetError('Mole fractions for tieline {} for the ZPF point {} sum to greater than one.'.format(tieline, zpf))
if is_single_phase:
nconfigs = len(sublattice_configurations)
noccupancies = len(sublattice_occupancies)
if nconfigs != noccupancies:
raise DatasetError('Number of sublattice configurations ({}) does not match the number of sublattice occupancies ({})'.format(nconfigs, noccupancies))
for configuration, occupancy in zip(sublattice_configurations, sublattice_occupancies):
if len(configuration) != len(sublattice_site_ratios):
raise DatasetError('Sublattice configuration {} and sublattice site ratio {} describe different numbers of sublattices ({} and {}).'.format(configuration, sublattice_site_ratios, len(configuration), len(sublattice_site_ratios)))
if is_mixing:
configuration_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in configuration)
occupancy_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in occupancy)
if configuration_shape != occupancy_shape:
raise DatasetError('The shape of sublattice configuration {} ({}) does not match the shape of occupancies {} ({})'.format(configuration, configuration_shape, occupancy, occupancy_shape))
for subl in configuration:
if isinstance(subl, (list, tuple)) and sorted(subl) != subl:
raise DatasetError('Sublattice {} in configuration {} is must be sorted in alphabetic order ({})'.format(subl, configuration, sorted(subl)))
def clean_dataset(dataset):
dataset["conditions"] = {k: recursive_map(float, v) for k, v in dataset["conditions"].items()}
solver = dataset.get("solver")
if solver is not None:
solver["sublattice_site_ratios"] = recursive_map(float, solver["sublattice_site_ratios"])
occupancies = solver.get("sublattice_occupancies")
if occupancies is not None:
solver["sublattice_occupancies"] = recursive_map(float, occupancies)
if dataset["output"] == "ZPF":
values = dataset["values"]
new_values = []
for tieline in values:
new_tieline = []
for tieline_point in tieline:
if all([comp is None for comp in tieline_point[2]]):
new_tieline.append(tieline_point)
else:
new_tieline.append([tieline_point[0], tieline_point[1], recursive_map(float, tieline_point[2])])
new_values.append(new_tieline)
dataset["values"] = new_values
else:
dataset["values"] = recursive_map(float, dataset["values"])
return dataset
def apply_tags(datasets, tags):
for tag, update_dict in tags.items():
matching_datasets = datasets.search(where("tags").test(lambda x: tag in x))
for newkey, newval in update_dict.items():
for match in matching_datasets:
if isinstance(newval, list):
match[newkey] = match.get(newkey, []) + newval
elif np.isscalar(newval):
match[newkey] = newval
elif isinstance(newval, dict):
d = match.get(newkey, dict())
d.update(newval)
match[newkey] = d
else:
match[newkey] = newval
datasets.write_back(matching_datasets)
def add_ideal_exclusions(datasets):
all_single_phase = datasets.search(where('solver').exists())
no_exclusions = datasets.search(where('solver').exists() & (~where('excluded_model_contributions').exists()))
if len(all_single_phase) > 0 and len(all_single_phase) == len(no_exclusions):
idmix_warning = "Single phase datasets are present, but there are no specified `excluded_model_contributions` keys present. " + \
"'idmix' exclusion will be added automatically for backwards compatibility, but this will go away in ESPEI v0.8. " + \
"If you want ideal mixing contributions to be excluded, see the documentation for building datasets: http://espei.org/en/latest/input_data.html"
warnings.warn(idmix_warning, DeprecationWarning)
print(idmix_warning)
import espei
if int(espei.__version__.split('.')[1]) >= 8 or int(espei.__version__.split('.')[0]) > 0:
raise ValueError("ESPEI developer: remove the automatic addition of ideal mixing exclusions")
for ds in all_single_phase:
ds['excluded_model_contributions'] = ['idmix']
datasets.write_back(all_single_phase)
return datasets
def load_datasets(dataset_filenames):
ds_database = PickleableTinyDB(storage=MemoryStorage)
for fname in dataset_filenames:
with open(fname) as file_:
try:
d = json.load(file_)
check_dataset(d)
ds_database.insert(clean_dataset(d))
except ValueError as e:
raise ValueError('JSON Error in {}: {}'.format(fname, e))
except DatasetError as e:
raise DatasetError('Dataset Error in {}: {}'.format(fname, e))
return ds_database
def recursive_glob(start, pattern='*.json'):
matches = []
for root, dirnames, filenames in os.walk(start):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return sorted(matches)
| true | true |
f72a8175c066c50bcc168821cbcb8091f9cd439a | 6,618 | py | Python | Collections-a-installer/community-general-2.4.0/plugins/modules/ipa_service.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/ipa_service.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 1 | 2022-03-12T02:25:26.000Z | 2022-03-12T02:25:26.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/ipa_service.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ipa_service
author: Cédric Parent (@cprh)
short_description: Manage FreeIPA service
description:
- Add and delete an IPA service using IPA API.
options:
krbcanonicalname:
description:
- Principal of the service.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
type: str
hosts:
description:
- Defines the list of 'ManagedBy' hosts.
required: false
type: list
elements: str
force:
description:
- Force principal name even if host is not in DNS.
required: false
type: bool
state:
description: State to ensure.
required: false
default: present
choices: ["absent", "present"]
type: str
extends_documentation_fragment:
- community.general.ipa.documentation
'''
EXAMPLES = r'''
- name: Ensure service is present
community.general.ipa_service:
name: http/host01.example.com
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
- name: Ensure service is absent
community.general.ipa_service:
name: http/host01.example.com
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
- name: Changing Managing hosts list
community.general.ipa_service:
name: http/host01.example.com
host:
- host01.example.com
- host02.example.com
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = r'''
service:
description: Service as returned by IPA API.
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class ServiceIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(ServiceIPAClient, self).__init__(module, host, port, protocol)
def service_find(self, name):
return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
def service_add(self, name, service):
return self._post_json(method='service_add', name=name, item=service)
def service_mod(self, name, service):
return self._post_json(method='service_mod', name=name, item=service)
def service_del(self, name):
return self._post_json(method='service_del', name=name)
def service_disable(self, name):
return self._post_json(method='service_disable', name=name)
def service_add_host(self, name, item):
return self._post_json(method='service_add_host', name=name, item={'host': item})
def service_remove_host(self, name, item):
return self._post_json(method='service_remove_host', name=name, item={'host': item})
def get_service_dict(force=None, krbcanonicalname=None):
data = {}
if force is not None:
data['force'] = force
if krbcanonicalname is not None:
data['krbcanonicalname'] = krbcanonicalname
return data
def get_service_diff(client, ipa_host, module_service):
non_updateable_keys = ['force', 'krbcanonicalname']
for key in non_updateable_keys:
if key in module_service:
del module_service[key]
return client.get_diff(ipa_data=ipa_host, module_data=module_service)
def ensure(module, client):
name = module.params['krbcanonicalname']
state = module.params['state']
hosts = module.params['hosts']
ipa_service = client.service_find(name=name)
module_service = get_service_dict(force=module.params['force'])
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_service:
changed = True
if not module.check_mode:
client.service_add(name=name, service=module_service)
else:
diff = get_service_diff(client, ipa_service, module_service)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_service.get(key)
client.service_mod(name=name, service=data)
if hosts is not None:
if 'managedby_host' in ipa_service:
for host in ipa_service['managedby_host']:
if host not in hosts:
if not module.check_mode:
client.service_remove_host(name=name, item=host)
changed = True
for host in hosts:
if host not in ipa_service['managedby_host']:
if not module.check_mode:
client.service_add_host(name=name, item=host)
changed = True
else:
for host in hosts:
if not module.check_mode:
client.service_add_host(name=name, item=host)
changed = True
else:
if ipa_service:
changed = True
if not module.check_mode:
client.service_del(name=name)
return changed, client.service_find(name=name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(
krbcanonicalname=dict(type='str', required=True, aliases=['name']),
force=dict(type='bool', required=False),
hosts=dict(type='list', required=False, elements='str'),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent']))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
client = ServiceIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, host = ensure(module, client)
module.exit_json(changed=changed, host=host)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| 31.665072 | 110 | 0.636144 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ipa_service
author: Cédric Parent (@cprh)
short_description: Manage FreeIPA service
description:
- Add and delete an IPA service using IPA API.
options:
krbcanonicalname:
description:
- Principal of the service.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
type: str
hosts:
description:
- Defines the list of 'ManagedBy' hosts.
required: false
type: list
elements: str
force:
description:
- Force principal name even if host is not in DNS.
required: false
type: bool
state:
description: State to ensure.
required: false
default: present
choices: ["absent", "present"]
type: str
extends_documentation_fragment:
- community.general.ipa.documentation
'''
EXAMPLES = r'''
- name: Ensure service is present
community.general.ipa_service:
name: http/host01.example.com
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
- name: Ensure service is absent
community.general.ipa_service:
name: http/host01.example.com
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
- name: Changing Managing hosts list
community.general.ipa_service:
name: http/host01.example.com
host:
- host01.example.com
- host02.example.com
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = r'''
service:
description: Service as returned by IPA API.
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class ServiceIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(ServiceIPAClient, self).__init__(module, host, port, protocol)
def service_find(self, name):
return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
def service_add(self, name, service):
return self._post_json(method='service_add', name=name, item=service)
def service_mod(self, name, service):
return self._post_json(method='service_mod', name=name, item=service)
def service_del(self, name):
return self._post_json(method='service_del', name=name)
def service_disable(self, name):
return self._post_json(method='service_disable', name=name)
def service_add_host(self, name, item):
return self._post_json(method='service_add_host', name=name, item={'host': item})
def service_remove_host(self, name, item):
return self._post_json(method='service_remove_host', name=name, item={'host': item})
def get_service_dict(force=None, krbcanonicalname=None):
data = {}
if force is not None:
data['force'] = force
if krbcanonicalname is not None:
data['krbcanonicalname'] = krbcanonicalname
return data
def get_service_diff(client, ipa_host, module_service):
non_updateable_keys = ['force', 'krbcanonicalname']
for key in non_updateable_keys:
if key in module_service:
del module_service[key]
return client.get_diff(ipa_data=ipa_host, module_data=module_service)
def ensure(module, client):
name = module.params['krbcanonicalname']
state = module.params['state']
hosts = module.params['hosts']
ipa_service = client.service_find(name=name)
module_service = get_service_dict(force=module.params['force'])
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_service:
changed = True
if not module.check_mode:
client.service_add(name=name, service=module_service)
else:
diff = get_service_diff(client, ipa_service, module_service)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_service.get(key)
client.service_mod(name=name, service=data)
if hosts is not None:
if 'managedby_host' in ipa_service:
for host in ipa_service['managedby_host']:
if host not in hosts:
if not module.check_mode:
client.service_remove_host(name=name, item=host)
changed = True
for host in hosts:
if host not in ipa_service['managedby_host']:
if not module.check_mode:
client.service_add_host(name=name, item=host)
changed = True
else:
for host in hosts:
if not module.check_mode:
client.service_add_host(name=name, item=host)
changed = True
else:
if ipa_service:
changed = True
if not module.check_mode:
client.service_del(name=name)
return changed, client.service_find(name=name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(
krbcanonicalname=dict(type='str', required=True, aliases=['name']),
force=dict(type='bool', required=False),
hosts=dict(type='list', required=False, elements='str'),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent']))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
client = ServiceIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, host = ensure(module, client)
module.exit_json(changed=changed, host=host)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| true | true |
f72a82afacbfe8c4a9d386c3c67b25d21ca8d4cb | 3,193 | py | Python | var/spack/repos/builtin/packages/meson/package.py | jjellio/spack | 1fa16c1b9b08119be429821fbd2f3251458d5063 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-12-28T14:38:41.000Z | 2020-12-28T14:38:41.000Z | var/spack/repos/builtin/packages/meson/package.py | jjellio/spack | 1fa16c1b9b08119be429821fbd2f3251458d5063 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2021-02-15T01:32:22.000Z | 2022-03-31T06:06:05.000Z | var/spack/repos/builtin/packages/meson/package.py | karcaw/spack | 2b3ca6c3e39f486c9cf277f2ca8fef12d54300c2 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Meson(PythonPackage):
"""Meson is a portable open source build system meant to be both
extremely fast, and as user friendly as possible."""
homepage = "http://mesonbuild.com/"
url = "https://github.com/mesonbuild/meson/archive/0.49.0.tar.gz"
maintainers = ['michaelkuhn']
version('0.57.1', sha256='0c043c9b5350e9087cd4f6becf6c0d10b1d618ca3f919e0dcca2cdf342360d5d')
version('0.57.0', sha256='fd26a27c1a509240c668ebd29d280649d9239cf8684ead51d5cb499d1e1188bd')
version('0.56.2', sha256='aaae961c3413033789248ffe6762589e80b6cf487c334d0b808e31a32c48f35f')
version('0.56.0', sha256='a9ca7adf66dc69fbb7e583f7c7aef16b9fe56ec2874a3d58747e69a3affdf300')
version('0.55.3', sha256='2b276df50c5b13ccdbfb14d3333141e9e7985aca31b60400b3f3e0be2ee6897e')
version('0.55.2', sha256='56244896e56c2b619f819d047b6de412ecc5250975ee8717f1e329113d178e06')
version('0.55.1', sha256='c7ebf2fff5934a974c7edd1aebb5fc9c3e1da5ae3184a29581fde917638eea39')
version('0.55.0', sha256='9034c943c8cf4d734c0e18e5ba038dd762fcdcc614c45b41703305da8382e90c')
version('0.54.3', sha256='c25caff342b5368bfe33fab6108f454fcf12e2f2cef70817205872ddef669e8b')
version('0.54.2', sha256='85cafdc70ae7d1d9d506e7356b917c649c4df2077bd6a0382db37648aa4ecbdb')
version('0.54.1', sha256='854e8b94ab36e5aece813d2b2aee8a639bd52201dfea50890722ac9128e2f59e')
version('0.54.0', sha256='95efdbaa7cb3e915ab9a7b26b1412475398fdc3e834842a780f1646c7764f2d9')
version('0.53.2', sha256='eab4f5d5dde12d002b7ddd958a9a0658589b63622b6cea2715e0235b95917888')
version('0.49.1', sha256='a944e7f25a2bc8e4ba3502ab5835d8a8b8f2530415c9d6fcffb53e0abaea2ced')
version('0.49.0', sha256='11bc959e7173e714e4a4e85dd2bd9d0149b0a51c8ba82d5f44cc63735f603c74')
version('0.42.0', sha256='6c318a2da3859326a37f8a380e3c50e97aaabff6990067218dffffea674ed76f')
version('0.41.2', sha256='2daf448d3f2479d60e30617451f09bf02d26304dd1bd12ee1de936a53e42c7a4')
version('0.41.1', sha256='a48901f02ffeb9ff5cf5361d71b1fca202f9cd72998043ad011fc5de0294cf8b')
depends_on('python@3.6:', when='@0.57.0:', type=('build', 'run'))
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('ninja', type='run')
# By default, Meson strips the rpath on installation. This patch disables
# rpath modification completely to make sure that Spack's rpath changes
# are not reverted.
patch('rpath-0.49.patch', when='@0.49:0.53')
patch('rpath-0.54.patch', when='@0.54:0.55')
patch('rpath-0.56.patch', when='@0.56')
executables = ['^meson$']
@classmethod
def determine_version(cls, exe):
return Executable(exe)('--version', output=str, error=str).rstrip()
def setup_dependent_build_environment(self, env, dependent_spec):
# https://github.com/pybind/pybind11/issues/595
if self.spec.satisfies('platform=darwin'):
env.set('STRIP', 'strip -x')
| 53.216667 | 96 | 0.757282 |
from spack import *
class Meson(PythonPackage):
homepage = "http://mesonbuild.com/"
url = "https://github.com/mesonbuild/meson/archive/0.49.0.tar.gz"
maintainers = ['michaelkuhn']
version('0.57.1', sha256='0c043c9b5350e9087cd4f6becf6c0d10b1d618ca3f919e0dcca2cdf342360d5d')
version('0.57.0', sha256='fd26a27c1a509240c668ebd29d280649d9239cf8684ead51d5cb499d1e1188bd')
version('0.56.2', sha256='aaae961c3413033789248ffe6762589e80b6cf487c334d0b808e31a32c48f35f')
version('0.56.0', sha256='a9ca7adf66dc69fbb7e583f7c7aef16b9fe56ec2874a3d58747e69a3affdf300')
version('0.55.3', sha256='2b276df50c5b13ccdbfb14d3333141e9e7985aca31b60400b3f3e0be2ee6897e')
version('0.55.2', sha256='56244896e56c2b619f819d047b6de412ecc5250975ee8717f1e329113d178e06')
version('0.55.1', sha256='c7ebf2fff5934a974c7edd1aebb5fc9c3e1da5ae3184a29581fde917638eea39')
version('0.55.0', sha256='9034c943c8cf4d734c0e18e5ba038dd762fcdcc614c45b41703305da8382e90c')
version('0.54.3', sha256='c25caff342b5368bfe33fab6108f454fcf12e2f2cef70817205872ddef669e8b')
version('0.54.2', sha256='85cafdc70ae7d1d9d506e7356b917c649c4df2077bd6a0382db37648aa4ecbdb')
version('0.54.1', sha256='854e8b94ab36e5aece813d2b2aee8a639bd52201dfea50890722ac9128e2f59e')
version('0.54.0', sha256='95efdbaa7cb3e915ab9a7b26b1412475398fdc3e834842a780f1646c7764f2d9')
version('0.53.2', sha256='eab4f5d5dde12d002b7ddd958a9a0658589b63622b6cea2715e0235b95917888')
version('0.49.1', sha256='a944e7f25a2bc8e4ba3502ab5835d8a8b8f2530415c9d6fcffb53e0abaea2ced')
version('0.49.0', sha256='11bc959e7173e714e4a4e85dd2bd9d0149b0a51c8ba82d5f44cc63735f603c74')
version('0.42.0', sha256='6c318a2da3859326a37f8a380e3c50e97aaabff6990067218dffffea674ed76f')
version('0.41.2', sha256='2daf448d3f2479d60e30617451f09bf02d26304dd1bd12ee1de936a53e42c7a4')
version('0.41.1', sha256='a48901f02ffeb9ff5cf5361d71b1fca202f9cd72998043ad011fc5de0294cf8b')
depends_on('python@3.6:', when='@0.57.0:', type=('build', 'run'))
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('ninja', type='run')
# are not reverted.
patch('rpath-0.49.patch', when='@0.49:0.53')
patch('rpath-0.54.patch', when='@0.54:0.55')
patch('rpath-0.56.patch', when='@0.56')
executables = ['^meson$']
@classmethod
def determine_version(cls, exe):
return Executable(exe)('--version', output=str, error=str).rstrip()
def setup_dependent_build_environment(self, env, dependent_spec):
# https://github.com/pybind/pybind11/issues/595
if self.spec.satisfies('platform=darwin'):
env.set('STRIP', 'strip -x')
| true | true |
f72a82e3656b23a98910f3bc1d1712b42ddd274d | 30,279 | py | Python | neutron/plugins/openvswitch/ovs_neutron_plugin.py | ksshanam/neutron-vrrp | f9fb7f9b41adc0de401cc118a4d97026d3abb6e0 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/openvswitch/ovs_neutron_plugin.py | ksshanam/neutron-vrrp | f9fb7f9b41adc0de401cc118a4d97026d3abb6e0 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/openvswitch/ovs_neutron_plugin.py | ksshanam/neutron-vrrp | f9fb7f9b41adc0de401cc118a4d97026d3abb6e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
LOG = logging.getLogger(__name__)
class OVSRpcCallbacks(n_rpc.RpcCallback,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
RPC_API_VERSION = '1.2'
def __init__(self, notifier, tunnel_type):
super(OVSRpcCallbacks, self).__init__()
self.notifier = notifier
self.tunnel_type = tunnel_type
@classmethod
def get_port_from_device(cls, device):
port = ovs_db_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up'],
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'physical_network': binding.physical_network}
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
ovs_db_v2.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
return [
self.get_device_details(
rpc_context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
if port:
entry = {'device': device,
'exists': True}
plugin = manager.NeutronManager.get_plugin()
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
elif port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
if port:
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return
elif port['status'] != q_const.PORT_STATUS_ACTIVE:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the datbase with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
# Update the database with the IP
tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
tunnels = ovs_db_v2.get_tunnel_endpoints()
entry = dict()
entry['tunnels'] = tunnels
# Notify all other listening agents
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel.id, self.tunnel_type)
# Return the list of tunnels IP's to the agent
return entry
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id,
tunnel_type=tunnel_type),
topic=self.topic_tunnel_update)
class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
addr_pair_db.AllowedAddressPairsMixin):
"""Implement the Neutron abstractions using Open vSwitch.
Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or
a new VLAN is created for each network. An agent is relied upon to
perform the actual OVS configuration on each host.
The provider extension is also supported. As discussed in
https://bugs.launchpad.net/neutron/+bug/1023156, this class could
be simplified, and filtering on extended attributes could be
handled, by adding support for extended attributes to the
NeutronDbPluginV2 base class. When that occurs, this class should
be updated to take advantage of it.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler",
"extra_dhcp_opt",
"allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_extend_network_dict_provider_ovs'])
def __init__(self, configfile=None):
super(OVSNeutronPluginV2, self).__init__()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True}}
self._parse_network_vlan_ranges()
ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
if self.tenant_network_type not in [svc_constants.TYPE_LOCAL,
svc_constants.TYPE_VLAN,
svc_constants.TYPE_GRE,
svc_constants.TYPE_VXLAN,
svc_constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Server terminated!"),
self.tenant_network_type)
sys.exit(1)
self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
self.tunnel_type = None
if self.enable_tunneling:
self.tunnel_type = (cfg.CONF.OVS.tunnel_type or
svc_constants.TYPE_GRE)
elif cfg.CONF.OVS.tunnel_type:
self.tunnel_type = cfg.CONF.OVS.tunnel_type
self.enable_tunneling = True
self.tunnel_id_ranges = []
if self.enable_tunneling:
self._parse_tunnel_id_ranges()
ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
"Server terminated!"), self.tenant_network_type)
sys.exit(1)
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = n_rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI()
)
self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type),
dhcp_rpc.DhcpRpcCallback(),
l3_rpc.L3RpcCallback(),
agents_db.AgentExtRpcCallback()]
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.OVS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _parse_tunnel_id_ranges(self):
for entry in cfg.CONF.OVS.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: "
"'%(range)s' - %(e)s. Server terminated!"),
{'range': entry, 'e': ex})
sys.exit(1)
LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
def _extend_network_dict_provider_ovs(self, network, net_db,
net_binding=None):
# this method used in two cases: when binding is provided explicitly
# and when it is a part of db model object
binding = net_db.binding if net_db else net_binding
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == svc_constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == svc_constants.TYPE_VLAN:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == svc_constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise n_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == svc_constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise n_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise n_exc.InvalidInput(error_message=msg)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
if not self.enable_tunneling:
msg = _("%s networks are not enabled") % network_type
raise n_exc.InvalidInput(error_message=msg)
if physical_network_set:
msg = _("provider:physical_network specified for %s "
"network") % network_type
raise n_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
else:
segmentation_id = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise n_exc.InvalidInput(error_message=msg)
if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise n_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise n_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
segmentation_id) = self._process_provider_create(context,
network['network'])
session = context.session
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with session.begin(subtransactions=True):
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == svc_constants.TYPE_NONE:
raise n_exc.TenantNetworksDisabled()
elif network_type == svc_constants.TYPE_VLAN:
(physical_network,
segmentation_id) = ovs_db_v2.reserve_vlan(session)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
segmentation_id = ovs_db_v2.reserve_tunnel(session)
# no reservation needed for TYPE_LOCAL
else:
# provider network
if network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
ovs_db_v2.reserve_specific_vlan(session, physical_network,
segmentation_id)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
# no reservation needed for TYPE_LOCAL
net = super(OVSNeutronPluginV2, self).create_network(context,
network)
binding = ovs_db_v2.add_network_binding(session, net['id'],
network_type,
physical_network,
segmentation_id)
self._process_l3_create(context, net, network['network'])
# passing None as db model to use binding object
self._extend_network_dict_provider_ovs(net, None, binding)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = ovs_db_v2.get_network_binding(session, id)
self._process_l3_delete(context, id)
super(OVSNeutronPluginV2, self).delete_network(context, id)
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.release_tunnel(session, binding.segmentation_id,
self.tunnel_id_ranges)
elif binding.network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
ovs_db_v2.release_vlan(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).get_network(context,
id, None)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None,
limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(OVSNeutronPluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port_data = port['port']
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
port = super(OVSNeutronPluginV2, self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data, port)
self._process_port_create_security_group(context, port, sgids)
self._process_port_create_extra_dhcp_opts(context, port,
dhcp_opts)
port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, port,
port_data.get(addr_pair.ADDRESS_PAIRS)))
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
original_port = super(OVSNeutronPluginV2, self).get_port(
context, id)
updated_port = super(OVSNeutronPluginV2, self).update_port(
context, id, port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
secgrp_member_updated = self.is_security_group_member_updated(
context, original_port, updated_port)
need_port_update_notify |= secgrp_member_updated
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = ovs_db_v2.get_network_binding(None,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.network_type,
binding.segmentation_id,
binding.physical_network)
if secgrp_member_updated:
old_set = set(original_port.get(ext_sg.SECURITYGROUPS))
new_set = set(updated_port.get(ext_sg.SECURITYGROUPS))
self.notifier.security_groups_member_updated(
context,
old_set ^ new_set)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, id, do_notify=False)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(OVSNeutronPluginV2, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
self.notify_security_groups_member_updated(context, port)
| 46.726852 | 79 | 0.597114 |
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_db
from neutron.db import quota_db
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.openvswitch.common import config
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
LOG = logging.getLogger(__name__)
class OVSRpcCallbacks(n_rpc.RpcCallback,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
RPC_API_VERSION = '1.2'
def __init__(self, notifier, tunnel_type):
super(OVSRpcCallbacks, self).__init__()
self.notifier = notifier
self.tunnel_type = tunnel_type
@classmethod
def get_port_from_device(cls, device):
port = ovs_db_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up'],
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'physical_network': binding.physical_network}
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
ovs_db_v2.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
return [
self.get_device_details(
rpc_context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def update_device_down(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
if port:
entry = {'device': device,
'exists': True}
plugin = manager.NeutronManager.get_plugin()
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
elif port['status'] != q_const.PORT_STATUS_DOWN:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
if port:
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return
elif port['status'] != q_const.PORT_STATUS_ACTIVE:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
def tunnel_sync(self, rpc_context, **kwargs):
tunnel_ip = kwargs.get('tunnel_ip')
tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
tunnels = ovs_db_v2.get_tunnel_endpoints()
entry = dict()
entry['tunnels'] = tunnels
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel.id, self.tunnel_type)
return entry
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id,
tunnel_type=tunnel_type),
topic=self.topic_tunnel_update)
class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
addr_pair_db.AllowedAddressPairsMixin):
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler",
"extra_dhcp_opt",
"allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_extend_network_dict_provider_ovs'])
def __init__(self, configfile=None):
super(OVSNeutronPluginV2, self).__init__()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True}}
self._parse_network_vlan_ranges()
ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
if self.tenant_network_type not in [svc_constants.TYPE_LOCAL,
svc_constants.TYPE_VLAN,
svc_constants.TYPE_GRE,
svc_constants.TYPE_VXLAN,
svc_constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Server terminated!"),
self.tenant_network_type)
sys.exit(1)
self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
self.tunnel_type = None
if self.enable_tunneling:
self.tunnel_type = (cfg.CONF.OVS.tunnel_type or
svc_constants.TYPE_GRE)
elif cfg.CONF.OVS.tunnel_type:
self.tunnel_type = cfg.CONF.OVS.tunnel_type
self.enable_tunneling = True
self.tunnel_id_ranges = []
if self.enable_tunneling:
self._parse_tunnel_id_ranges()
ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
"Server terminated!"), self.tenant_network_type)
sys.exit(1)
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = n_rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI()
)
self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type),
dhcp_rpc.DhcpRpcCallback(),
l3_rpc.L3RpcCallback(),
agents_db.AgentExtRpcCallback()]
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.OVS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _parse_tunnel_id_ranges(self):
for entry in cfg.CONF.OVS.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: "
"'%(range)s' - %(e)s. Server terminated!"),
{'range': entry, 'e': ex})
sys.exit(1)
LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
def _extend_network_dict_provider_ovs(self, network, net_db,
net_binding=None):
# this method used in two cases: when binding is provided explicitly
# and when it is a part of db model object
binding = net_db.binding if net_db else net_binding
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == svc_constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == svc_constants.TYPE_VLAN:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == svc_constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise n_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == svc_constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise n_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise n_exc.InvalidInput(error_message=msg)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
if not self.enable_tunneling:
msg = _("%s networks are not enabled") % network_type
raise n_exc.InvalidInput(error_message=msg)
if physical_network_set:
msg = _("provider:physical_network specified for %s "
"network") % network_type
raise n_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
else:
segmentation_id = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise n_exc.InvalidInput(error_message=msg)
if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise n_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise n_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
segmentation_id) = self._process_provider_create(context,
network['network'])
session = context.session
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with session.begin(subtransactions=True):
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == svc_constants.TYPE_NONE:
raise n_exc.TenantNetworksDisabled()
elif network_type == svc_constants.TYPE_VLAN:
(physical_network,
segmentation_id) = ovs_db_v2.reserve_vlan(session)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
segmentation_id = ovs_db_v2.reserve_tunnel(session)
# no reservation needed for TYPE_LOCAL
else:
# provider network
if network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
ovs_db_v2.reserve_specific_vlan(session, physical_network,
segmentation_id)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
# no reservation needed for TYPE_LOCAL
net = super(OVSNeutronPluginV2, self).create_network(context,
network)
binding = ovs_db_v2.add_network_binding(session, net['id'],
network_type,
physical_network,
segmentation_id)
self._process_l3_create(context, net, network['network'])
# passing None as db model to use binding object
self._extend_network_dict_provider_ovs(net, None, binding)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = ovs_db_v2.get_network_binding(session, id)
self._process_l3_delete(context, id)
super(OVSNeutronPluginV2, self).delete_network(context, id)
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.release_tunnel(session, binding.segmentation_id,
self.tunnel_id_ranges)
elif binding.network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
ovs_db_v2.release_vlan(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).get_network(context,
id, None)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None,
limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(OVSNeutronPluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port_data = port['port']
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
port = super(OVSNeutronPluginV2, self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data, port)
self._process_port_create_security_group(context, port, sgids)
self._process_port_create_extra_dhcp_opts(context, port,
dhcp_opts)
port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, port,
port_data.get(addr_pair.ADDRESS_PAIRS)))
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
original_port = super(OVSNeutronPluginV2, self).get_port(
context, id)
updated_port = super(OVSNeutronPluginV2, self).update_port(
context, id, port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
secgrp_member_updated = self.is_security_group_member_updated(
context, original_port, updated_port)
need_port_update_notify |= secgrp_member_updated
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = ovs_db_v2.get_network_binding(None,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.network_type,
binding.segmentation_id,
binding.physical_network)
if secgrp_member_updated:
old_set = set(original_port.get(ext_sg.SECURITYGROUPS))
new_set = set(updated_port.get(ext_sg.SECURITYGROUPS))
self.notifier.security_groups_member_updated(
context,
old_set ^ new_set)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, id, do_notify=False)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(OVSNeutronPluginV2, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
self.notify_security_groups_member_updated(context, port)
| true | true |
f72a83c49b78622f2d47b7d505eed6ddffe2fe8d | 6,547 | py | Python | monero_glue/xmr/sub/mlsag_hasher.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 20 | 2018-04-05T22:06:10.000Z | 2021-09-18T10:43:44.000Z | monero_glue/xmr/sub/mlsag_hasher.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | null | null | null | monero_glue/xmr/sub/mlsag_hasher.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 5 | 2018-08-06T15:06:04.000Z | 2021-07-16T01:58:43.000Z | from monero_glue.xmr import crypto
from monero_glue.xmr.sub.keccak_hasher import HashWrapper
from monero_serialize import xmrserialize
class PreMlsagHasher(object):
"""
Iterative construction of the pre_mlsag_hash
"""
def __init__(self, state=None):
from monero_glue.xmr.sub.keccak_hasher import HashWrapper, KeccakXmrArchive
self.is_simple = state[0] if state else None
self.state = state[1] if state else 0
self.kc_master = HashWrapper(state[2] if state else crypto.get_keccak())
self.rsig_hasher = state[3] if state else crypto.get_keccak()
self.rtcsig_hasher = None
if state:
self.rtcsig_hasher = KeccakXmrArchive(state[4]) if state[4] else None
else:
self.rtcsig_hasher = KeccakXmrArchive()
def state_save(self):
return (
self.is_simple,
self.state,
self.kc_master.ctx,
self.rsig_hasher,
self.rtcsig_hasher.ctx() if self.rtcsig_hasher else None,
)
def state_load(self, x):
from monero_glue.xmr.sub.keccak_hasher import HashWrapper, KeccakXmrArchive
self.is_simple = x[0]
self.state = x[1]
self.kc_master = HashWrapper(x[2])
self.rsig_hasher = x[3]
if x[4]:
self.rtcsig_hasher = KeccakXmrArchive(x[4])
else:
self.rtcsig_hasher = None
def init(self, is_simple):
if self.state != 0:
raise ValueError("State error")
self.state = 1
self.is_simple = is_simple
async def set_message(self, message):
self.kc_master.update(message)
async def set_type_fee(self, rv_type, fee):
if self.state != 1:
raise ValueError("State error")
self.state = 2
from monero_serialize.xmrtypes import RctSigBase
rfields = RctSigBase.f_specs()
await self.rtcsig_hasher.message_field(None, field=rfields[0], fvalue=rv_type)
await self.rtcsig_hasher.message_field(None, field=rfields[1], fvalue=fee)
async def set_pseudo_out(self, out):
if self.state != 2 and self.state != 3:
raise ValueError("State error")
self.state = 3
from monero_serialize.xmrtypes import KeyV
await self.rtcsig_hasher.field(out, KeyV.ELEM_TYPE)
async def set_ecdh(self, ecdh, raw=True):
if self.state != 2 and self.state != 3 and self.state != 4:
raise ValueError("State error")
self.state = 4
if raw:
await self.rtcsig_hasher.buffer(ecdh)
else:
from monero_serialize.xmrtypes import EcdhInfo
await self.rtcsig_hasher.field(ecdh, EcdhInfo.ELEM_TYPE)
async def set_out_pk(self, out_pk, mask=None):
if self.state != 4 and self.state != 5:
raise ValueError("State error")
self.state = 5
from monero_serialize.xmrtypes import ECKey
await self.rtcsig_hasher.field(mask if mask else out_pk, ECKey)
async def rctsig_base_done(self):
if self.state != 5:
raise ValueError("State error")
self.state = 6
c_hash = self.rtcsig_hasher.get_digest()
self.kc_master.update(c_hash)
self.rtcsig_hasher = None
async def rsig_val(self, p, bulletproof, raw=False):
if self.state == 8:
raise ValueError("State error")
if raw:
if isinstance(p, list):
for x in p:
self.rsig_hasher.update(x)
else:
self.rsig_hasher.update(p)
return
if bulletproof:
self.rsig_hasher.update(p.A)
self.rsig_hasher.update(p.S)
self.rsig_hasher.update(p.T1)
self.rsig_hasher.update(p.T2)
self.rsig_hasher.update(p.taux)
self.rsig_hasher.update(p.mu)
for i in range(len(p.L)):
self.rsig_hasher.update(p.L[i])
for i in range(len(p.R)):
self.rsig_hasher.update(p.R[i])
self.rsig_hasher.update(p.a)
self.rsig_hasher.update(p.b)
self.rsig_hasher.update(p.t)
else:
for i in range(64):
self.rsig_hasher.update(p.asig.s0[i])
for i in range(64):
self.rsig_hasher.update(p.asig.s1[i])
self.rsig_hasher.update(p.asig.ee)
for i in range(64):
self.rsig_hasher.update(p.Ci[i])
async def get_digest(self):
if self.state != 6:
raise ValueError("State error")
self.state = 8
c_hash = self.rsig_hasher.digest()
self.rsig_hasher = None
self.kc_master.update(c_hash)
return self.kc_master.digest()
async def get_pre_mlsag_hash(rv):
"""
Generates final message for the Ring CT signature
:param rv:
:type rv: RctSig
:return:
"""
from monero_glue.xmr.sub.keccak_hasher import get_keccak_writer
from monero_serialize.xmrtypes import RctType
kc_master = HashWrapper(crypto.get_keccak())
kc_master.update(rv.message)
is_simple = rv.type in [RctType.Simple, RctType.Bulletproof, RctType.Bulletproof2]
outputs = len(rv.ecdhInfo)
inputs = 0
if rv.type == RctType.Simple:
inputs = len(rv.pseudoOuts)
elif rv.type in [RctType.Bulletproof, RctType.Bulletproof2]:
inputs = len(rv.p.pseudoOuts)
kwriter = get_keccak_writer()
ar = xmrserialize.Archive(kwriter, True)
await rv.serialize_rctsig_base(ar, inputs, outputs)
c_hash = kwriter.get_digest()
kc_master.update(c_hash)
kc = crypto.get_keccak()
if rv.type in [RctType.Bulletproof, RctType.Bulletproof2]:
for p in rv.p.bulletproofs:
kc.update(p.A)
kc.update(p.S)
kc.update(p.T1)
kc.update(p.T2)
kc.update(p.taux)
kc.update(p.mu)
for i in range(len(p.L)):
kc.update(p.L[i])
for i in range(len(p.R)):
kc.update(p.R[i])
kc.update(p.a)
kc.update(p.b)
kc.update(p.t)
else:
for r in rv.p.rangeSigs:
for i in range(64):
kc.update(r.asig.s0[i])
for i in range(64):
kc.update(r.asig.s1[i])
kc.update(r.asig.ee)
for i in range(64):
kc.update(r.Ci[i])
c_hash = kc.digest()
kc_master.update(c_hash)
return kc_master.digest()
| 30.882075 | 86 | 0.590805 | from monero_glue.xmr import crypto
from monero_glue.xmr.sub.keccak_hasher import HashWrapper
from monero_serialize import xmrserialize
class PreMlsagHasher(object):
def __init__(self, state=None):
from monero_glue.xmr.sub.keccak_hasher import HashWrapper, KeccakXmrArchive
self.is_simple = state[0] if state else None
self.state = state[1] if state else 0
self.kc_master = HashWrapper(state[2] if state else crypto.get_keccak())
self.rsig_hasher = state[3] if state else crypto.get_keccak()
self.rtcsig_hasher = None
if state:
self.rtcsig_hasher = KeccakXmrArchive(state[4]) if state[4] else None
else:
self.rtcsig_hasher = KeccakXmrArchive()
def state_save(self):
return (
self.is_simple,
self.state,
self.kc_master.ctx,
self.rsig_hasher,
self.rtcsig_hasher.ctx() if self.rtcsig_hasher else None,
)
def state_load(self, x):
from monero_glue.xmr.sub.keccak_hasher import HashWrapper, KeccakXmrArchive
self.is_simple = x[0]
self.state = x[1]
self.kc_master = HashWrapper(x[2])
self.rsig_hasher = x[3]
if x[4]:
self.rtcsig_hasher = KeccakXmrArchive(x[4])
else:
self.rtcsig_hasher = None
def init(self, is_simple):
if self.state != 0:
raise ValueError("State error")
self.state = 1
self.is_simple = is_simple
async def set_message(self, message):
self.kc_master.update(message)
async def set_type_fee(self, rv_type, fee):
if self.state != 1:
raise ValueError("State error")
self.state = 2
from monero_serialize.xmrtypes import RctSigBase
rfields = RctSigBase.f_specs()
await self.rtcsig_hasher.message_field(None, field=rfields[0], fvalue=rv_type)
await self.rtcsig_hasher.message_field(None, field=rfields[1], fvalue=fee)
async def set_pseudo_out(self, out):
if self.state != 2 and self.state != 3:
raise ValueError("State error")
self.state = 3
from monero_serialize.xmrtypes import KeyV
await self.rtcsig_hasher.field(out, KeyV.ELEM_TYPE)
async def set_ecdh(self, ecdh, raw=True):
if self.state != 2 and self.state != 3 and self.state != 4:
raise ValueError("State error")
self.state = 4
if raw:
await self.rtcsig_hasher.buffer(ecdh)
else:
from monero_serialize.xmrtypes import EcdhInfo
await self.rtcsig_hasher.field(ecdh, EcdhInfo.ELEM_TYPE)
async def set_out_pk(self, out_pk, mask=None):
if self.state != 4 and self.state != 5:
raise ValueError("State error")
self.state = 5
from monero_serialize.xmrtypes import ECKey
await self.rtcsig_hasher.field(mask if mask else out_pk, ECKey)
async def rctsig_base_done(self):
if self.state != 5:
raise ValueError("State error")
self.state = 6
c_hash = self.rtcsig_hasher.get_digest()
self.kc_master.update(c_hash)
self.rtcsig_hasher = None
async def rsig_val(self, p, bulletproof, raw=False):
if self.state == 8:
raise ValueError("State error")
if raw:
if isinstance(p, list):
for x in p:
self.rsig_hasher.update(x)
else:
self.rsig_hasher.update(p)
return
if bulletproof:
self.rsig_hasher.update(p.A)
self.rsig_hasher.update(p.S)
self.rsig_hasher.update(p.T1)
self.rsig_hasher.update(p.T2)
self.rsig_hasher.update(p.taux)
self.rsig_hasher.update(p.mu)
for i in range(len(p.L)):
self.rsig_hasher.update(p.L[i])
for i in range(len(p.R)):
self.rsig_hasher.update(p.R[i])
self.rsig_hasher.update(p.a)
self.rsig_hasher.update(p.b)
self.rsig_hasher.update(p.t)
else:
for i in range(64):
self.rsig_hasher.update(p.asig.s0[i])
for i in range(64):
self.rsig_hasher.update(p.asig.s1[i])
self.rsig_hasher.update(p.asig.ee)
for i in range(64):
self.rsig_hasher.update(p.Ci[i])
async def get_digest(self):
if self.state != 6:
raise ValueError("State error")
self.state = 8
c_hash = self.rsig_hasher.digest()
self.rsig_hasher = None
self.kc_master.update(c_hash)
return self.kc_master.digest()
async def get_pre_mlsag_hash(rv):
from monero_glue.xmr.sub.keccak_hasher import get_keccak_writer
from monero_serialize.xmrtypes import RctType
kc_master = HashWrapper(crypto.get_keccak())
kc_master.update(rv.message)
is_simple = rv.type in [RctType.Simple, RctType.Bulletproof, RctType.Bulletproof2]
outputs = len(rv.ecdhInfo)
inputs = 0
if rv.type == RctType.Simple:
inputs = len(rv.pseudoOuts)
elif rv.type in [RctType.Bulletproof, RctType.Bulletproof2]:
inputs = len(rv.p.pseudoOuts)
kwriter = get_keccak_writer()
ar = xmrserialize.Archive(kwriter, True)
await rv.serialize_rctsig_base(ar, inputs, outputs)
c_hash = kwriter.get_digest()
kc_master.update(c_hash)
kc = crypto.get_keccak()
if rv.type in [RctType.Bulletproof, RctType.Bulletproof2]:
for p in rv.p.bulletproofs:
kc.update(p.A)
kc.update(p.S)
kc.update(p.T1)
kc.update(p.T2)
kc.update(p.taux)
kc.update(p.mu)
for i in range(len(p.L)):
kc.update(p.L[i])
for i in range(len(p.R)):
kc.update(p.R[i])
kc.update(p.a)
kc.update(p.b)
kc.update(p.t)
else:
for r in rv.p.rangeSigs:
for i in range(64):
kc.update(r.asig.s0[i])
for i in range(64):
kc.update(r.asig.s1[i])
kc.update(r.asig.ee)
for i in range(64):
kc.update(r.Ci[i])
c_hash = kc.digest()
kc_master.update(c_hash)
return kc_master.digest()
| true | true |
f72a84f3844befc8aa1a322a61d1a30d58921cb7 | 5,638 | py | Python | apps/challenges/tests/test_challenges.py | gene1wood/spark | 071d6da19076ee047530220223d7beab3d31abab | [
"BSD-3-Clause"
] | 3 | 2015-12-09T15:02:03.000Z | 2017-10-05T16:54:14.000Z | apps/challenges/tests/test_challenges.py | gene1wood/spark | 071d6da19076ee047530220223d7beab3d31abab | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:29:25.000Z | 2019-03-28T03:40:58.000Z | apps/challenges/tests/test_challenges.py | gene1wood/spark | 071d6da19076ee047530220223d7beab3d31abab | [
"BSD-3-Clause"
] | 3 | 2019-03-28T03:41:01.000Z | 2020-04-29T09:47:21.000Z | import logging
from datetime import datetime, timedelta
from spark.tests import TestCase
from nose.tools import eq_
from geo.continents import (AFRICA, ASIA, EUROPE, NORTH_AMERICA, SOUTH_AMERICA,
OCEANIA, ANTARCTICA)
from users.models import User
from stats.models import SharingHistory
from challenges.challenges import all_challenges
class ChallengesTestCase(TestCase):
fixtures = ['boost.json', 'challenges.json']
def get_profile(self, username):
return User.objects.get(username=username).profile
def assert_completion(self, profile, challenge_id):
eq_(True, all_challenges[challenge_id].is_completed_by(profile))
def assert_non_completion(self, profile, challenge_id):
eq_(False, all_challenges[challenge_id].is_completed_by(profile))
def test_complete_1_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share(profile)
profile = self.get_profile('bob')
eq_(1, profile.total_shares)
self.assert_completion(profile, '1_1')
def test_complete_1_2(self):
# franck has completed Boost 1/2
profile = self.get_profile('franck')
self.assert_completion(profile, '1_2')
def test_complete_1_3(self):
# franck has completed Boost 2/2
profile = self.get_profile('franck')
self.assert_completion(profile, '1_3')
def test_complete_2_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_facebook(profile)
self.assert_completion(profile, '2_1')
def test_complete_2_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_twitter(profile)
self.assert_completion(profile, '2_2')
def test_complete_2_3(self):
profile = self.get_profile('bob')
eq_(False, profile.login_desktop)
profile.login_desktop = True
self.assert_completion(profile, '2_3')
def test_complete_2_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_qr_code(profile)
self.assert_completion(profile, '2_4')
def test_complete_2_5(self):
profile = self.get_profile('batman')
child = profile.children_profiles[0]
self.assert_non_completion(profile, '2_5')
# Paris
profile.boost1_completed = True
profile.latitude = 48.857487002645485
profile.longitude = 2.3291015625
profile.save()
# Close to Paris (< 100km)
child.boost1_completed = True
child.latitude = 48.821332549646634
child.longitude = 2.4993896484375
child.save()
self.assert_non_completion(profile, '2_5')
# Barcelona
child.boost1_completed = True
child.latitude = 41.387917
child.longitude = 2.169918
child.save()
self.assert_completion(profile, '2_5')
def test_complete_2_6(self):
profile = self.get_profile('batman')
eq_(None, profile.country_code)
profile.country_code = 'US'
profile.save()
self.assert_non_completion(profile, '2_6')
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'US'
child.save()
self.assert_non_completion(profile, '2_6')
child.country_code = 'MX'
child.save()
self.assert_completion(profile, '2_6')
def test_complete_2_7(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
for i in range(13):
SharingHistory.add_share(profile)
eq_(13, profile.total_shares)
self.assert_completion(profile, '2_7')
def test_complete_3_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_poster(profile)
self.assert_completion(profile, '3_2')
def test_complete_3_3(self):
profile = self.get_profile('batman')
profile.boost1_completed = True
profile.country_code = 'US'
eq_(NORTH_AMERICA, profile.continent_code)
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'CA'
child.save()
eq_(NORTH_AMERICA, child.continent_code)
self.assert_non_completion(profile, '3_3')
child.country_code = 'FR'
child.save()
eq_(EUROPE, child.continent_code)
self.assert_completion(profile, '3_3')
def test_complete_3_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
now = datetime.now()
_create_share(profile, now - timedelta(hours=15))
_create_share(profile, now - timedelta(hours=8))
_create_share(profile, now - timedelta(hours=3))
eq_(3, profile.total_shares)
self.assert_non_completion(profile, '3_4')
_create_share(profile, now - timedelta(hours=11))
eq_(4, profile.total_shares)
self.assert_completion(profile, '3_4')
def test_complete_3_5(self):
pass
def _create_share(profile, date):
share = SharingHistory.objects.create(parent=profile)
share.date_shared = date
share.save()
| 27.773399 | 79 | 0.631075 | import logging
from datetime import datetime, timedelta
from spark.tests import TestCase
from nose.tools import eq_
from geo.continents import (AFRICA, ASIA, EUROPE, NORTH_AMERICA, SOUTH_AMERICA,
OCEANIA, ANTARCTICA)
from users.models import User
from stats.models import SharingHistory
from challenges.challenges import all_challenges
class ChallengesTestCase(TestCase):
fixtures = ['boost.json', 'challenges.json']
def get_profile(self, username):
return User.objects.get(username=username).profile
def assert_completion(self, profile, challenge_id):
eq_(True, all_challenges[challenge_id].is_completed_by(profile))
def assert_non_completion(self, profile, challenge_id):
eq_(False, all_challenges[challenge_id].is_completed_by(profile))
def test_complete_1_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share(profile)
profile = self.get_profile('bob')
eq_(1, profile.total_shares)
self.assert_completion(profile, '1_1')
def test_complete_1_2(self):
profile = self.get_profile('franck')
self.assert_completion(profile, '1_2')
def test_complete_1_3(self):
profile = self.get_profile('franck')
self.assert_completion(profile, '1_3')
def test_complete_2_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_facebook(profile)
self.assert_completion(profile, '2_1')
def test_complete_2_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_twitter(profile)
self.assert_completion(profile, '2_2')
def test_complete_2_3(self):
profile = self.get_profile('bob')
eq_(False, profile.login_desktop)
profile.login_desktop = True
self.assert_completion(profile, '2_3')
def test_complete_2_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_qr_code(profile)
self.assert_completion(profile, '2_4')
def test_complete_2_5(self):
profile = self.get_profile('batman')
child = profile.children_profiles[0]
self.assert_non_completion(profile, '2_5')
profile.boost1_completed = True
profile.latitude = 48.857487002645485
profile.longitude = 2.3291015625
profile.save()
child.boost1_completed = True
child.latitude = 48.821332549646634
child.longitude = 2.4993896484375
child.save()
self.assert_non_completion(profile, '2_5')
child.boost1_completed = True
child.latitude = 41.387917
child.longitude = 2.169918
child.save()
self.assert_completion(profile, '2_5')
def test_complete_2_6(self):
profile = self.get_profile('batman')
eq_(None, profile.country_code)
profile.country_code = 'US'
profile.save()
self.assert_non_completion(profile, '2_6')
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'US'
child.save()
self.assert_non_completion(profile, '2_6')
child.country_code = 'MX'
child.save()
self.assert_completion(profile, '2_6')
def test_complete_2_7(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
for i in range(13):
SharingHistory.add_share(profile)
eq_(13, profile.total_shares)
self.assert_completion(profile, '2_7')
def test_complete_3_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_poster(profile)
self.assert_completion(profile, '3_2')
def test_complete_3_3(self):
profile = self.get_profile('batman')
profile.boost1_completed = True
profile.country_code = 'US'
eq_(NORTH_AMERICA, profile.continent_code)
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'CA'
child.save()
eq_(NORTH_AMERICA, child.continent_code)
self.assert_non_completion(profile, '3_3')
child.country_code = 'FR'
child.save()
eq_(EUROPE, child.continent_code)
self.assert_completion(profile, '3_3')
def test_complete_3_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
now = datetime.now()
_create_share(profile, now - timedelta(hours=15))
_create_share(profile, now - timedelta(hours=8))
_create_share(profile, now - timedelta(hours=3))
eq_(3, profile.total_shares)
self.assert_non_completion(profile, '3_4')
_create_share(profile, now - timedelta(hours=11))
eq_(4, profile.total_shares)
self.assert_completion(profile, '3_4')
def test_complete_3_5(self):
pass
def _create_share(profile, date):
share = SharingHistory.objects.create(parent=profile)
share.date_shared = date
share.save()
| true | true |
f72a8508eb15e1b3e71b5ca8ef252bc825472afa | 15,226 | py | Python | tests/integration-tests/tests/storage/test_ebs.py | eshpc/aws-parallelcluster | 8cc6169a12661ce1c0025c93ebd9019c26e7219e | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/tests/storage/test_ebs.py | eshpc/aws-parallelcluster | 8cc6169a12661ce1c0025c93ebd9019c26e7219e | [
"Apache-2.0"
] | 108 | 2021-10-11T09:12:06.000Z | 2022-03-28T09:28:39.000Z | tests/integration-tests/tests/storage/test_ebs.py | yuleiwan/aws-parallelcluster | aad2a3019ef4ad08d702f5acf41b152b3f7a0b46 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import boto3
import pytest
import utils
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from tests.common.schedulers_common import get_scheduler_commands
from tests.storage.kms_key_factory import KMSKeyFactory
from tests.storage.snapshots_factory import EBSSnapshotsFactory
from tests.storage.storage_common import verify_directory_correctly_shared
@pytest.mark.regions(["eu-west-3", "cn-north-1", "us-gov-west-1"])
@pytest.mark.instances(["c4.xlarge", "c5.xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("instance")
def test_ebs_single(scheduler, pcluster_config_reader, clusters_factory, kms_key_factory, region, os):
mount_dir = "ebs_mount_dir"
kms_key_id = kms_key_factory.create_kms_key(region)
cluster_config = pcluster_config_reader(
mount_dir=mount_dir, ec2_iam_role=kms_key_factory.iam_role_arn, ebs_kms_key_id=kms_key_id
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
volume_id = get_ebs_volume_ids(cluster, region)[0]
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size=35)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=True, kms_key_id=kms_key_id)
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=True)
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "alinux2", "slurm")
@pytest.mark.dimensions("cn-northwest-1", "c4.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-1", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_snapshot(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs snapshot")
mount_dir = "ebs_mount_dir"
volume_size = 21
# This volume_size is set to be larger than snapshot size(10G), to test create volumes larger than its snapshot size
logging.info("Creating snapshot")
snapshot_id = snapshots_factory.create_snapshot(request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region)
logging.info("Snapshot id: %s" % snapshot_id)
cluster_config = pcluster_config_reader(mount_dir=mount_dir, volume_size=volume_size, snapshot_id=snapshot_id)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size="9.8")
_test_ebs_resize(remote_command_executor, mount_dir, volume_size=volume_size)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
# Checks for test data
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
# cn-north-1 does not support KMS
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "alinux2", "awsbatch")
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-2", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("instance")
def test_ebs_multiple(scheduler, pcluster_config_reader, clusters_factory, region, os):
mount_dirs = ["/ebs_mount_dir_{0}".format(i) for i in range(0, 5)]
volume_sizes = [15 + 5 * i for i in range(0, 5)]
# for volume type sc1 and st1, the minimum volume sizes are 500G
volume_sizes[3] = 500
volume_sizes[4] = 500
cluster_config = pcluster_config_reader(mount_dirs=mount_dirs, volume_sizes=volume_sizes)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
for mount_dir, volume_size in zip(mount_dirs, volume_sizes):
# for volume size equal to 500G, the filesystem size is only about 492G
# This is because the file systems use some of the total space available on a device for storing internal
# structures and data (the file system's metadata). The overhead of the XFS filesystem is around 0.5%.
# If we test with small volume size(eg: 40G), the number is not large enough to show the gap between the
# partition size and the filesystem size. For sc1 and st1, the minimum size is 500G, so there will be a size
# difference.
_test_ebs_correctly_mounted(
remote_command_executor, mount_dir, volume_size if volume_size != 500 else "49[0-9]"
)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
volume_ids = get_ebs_volume_ids(cluster, region)
for i in range(len(volume_ids)):
# test different volume types
volume_id = volume_ids[i]
ebs_settings = _get_ebs_settings_by_name(cluster.config, f"ebs{i+1}")
volume_type = ebs_settings["VolumeType"]
volume = describe_volume(volume_id, region)
assert_that(volume[0]).is_equal_to(volume_type)
encrypted = ebs_settings.get("Encrypted")
if encrypted is None:
# Default encryption if not specified
encrypted = True
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=encrypted, kms_key_id=ebs_settings.get("KmsKeyId"))
# test different iops
# only io1, io2, gp3 can configure iops
if volume_type in ["io1", "io2", "gp3"]:
volume_iops = ebs_settings["Iops"]
assert_that(volume[1]).is_equal_to(int(volume_iops))
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=False)
_assert_root_volume_configuration(cluster, os, region, scheduler)
def _get_ebs_settings_by_name(config, name):
for shared_storage in config["SharedStorage"]:
if shared_storage["Name"] == name:
return shared_storage["EbsSettings"]
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "centos7", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_existing(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs existing")
existing_mount_dir = "existing_mount_dir"
logging.info("Creating volume")
volume_id = snapshots_factory.create_existing_volume(
request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region
)
logging.info("Existing Volume id: %s" % volume_id)
cluster_config = pcluster_config_reader(volume_id=volume_id, existing_mount_dir=existing_mount_dir)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
existing_mount_dir = "/" + existing_mount_dir
_test_ebs_correctly_mounted(remote_command_executor, existing_mount_dir, volume_size="9.8")
_test_ebs_correctly_shared(remote_command_executor, existing_mount_dir, scheduler_commands)
# Checks for test data
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(existing_mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
# delete the cluster before detaching the EBS volume
cluster.delete()
# check the volume still exists after deleting the cluster
_assert_volume_exist(volume_id, region)
def _test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size):
logging.info("Testing ebs {0} is correctly mounted".format(mount_dir))
result = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 | awk '{{print $2, $6}}' | grep '{0}'".format(mount_dir)
)
assert_that(result.stdout).matches(r"{size}G {mount_dir}".format(size=volume_size, mount_dir=mount_dir))
result = remote_command_executor.run_remote_command("cat /etc/fstab")
assert_that(result.stdout).matches(r"UUID=.* {mount_dir} ext4 _netdev 0 0".format(mount_dir=mount_dir))
def _test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands):
logging.info("Testing ebs correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
def _test_home_correctly_shared(remote_command_executor, scheduler_commands):
logging.info("Testing home dir correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, "/home", scheduler_commands)
def _test_ebs_resize(remote_command_executor, mount_dir, volume_size):
"""
This test verifies the following case:
If the volume is created from a snapshot with a size larger than the snapshot, the size of the volume is correct.
"""
logging.info("Testing ebs has correct volume size")
# get the filesystem that the shared_dir is mounted on
# example output of "df -h -t ext4"
# Filesystem Size Used Avail Use% Mounted on
# /dev/nvme1n1p1 9.8G 37M 9.3G 1% /ebs_mount_dir
# /dev/nvme2n1p1 9.8G 37M 9.3G 1% /existing_mount_dir
filesystem_name = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 |grep '{mount_dir}' | awk '{{print $1}}'".format(mount_dir=mount_dir)
).stdout
# get the volume name given the filesystem name
# example input: /dev/nvme1n1p1
# example output: nvme1n1
volume_name = remote_command_executor.run_remote_command(
"lsblk -no pkname {filesystem_name}".format(filesystem_name=filesystem_name)
).stdout
# get the volume size of the volume
# example output of "lsblk"
# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# nvme0n1 259:0 0 25G 0 disk
# ├─nvme0n1p1 259:1 0 25G 0 part /
# └─nvme0n1p128 259:2 0 1M 0 part
# nvme1n1 259:3 0 21G 0 disk
# └─nvme1n1p1 259:4 0 10G 0 part /ebs_mount_dir
# nvme2n1 259:5 0 10G 0 disk
# └─nvme2n1p1 259:6 0 10G 0 part /existing_mount_dir
result = remote_command_executor.run_remote_command(
"lsblk | tail -n +2 | grep {volume_name}| awk '{{print $4}}' | sed -n '1p'''".format(volume_name=volume_name)
)
assert_that(result.stdout).matches(r"{size}G".format(size=volume_size))
def get_ebs_volume_ids(cluster, region):
# get the list of configured ebs volume ids
# example output: ['vol-000', 'vol-001', 'vol-002']
return utils.retrieve_cfn_outputs(cluster.cfn_name, region).get("EBSIds").split(",")
def describe_volume(volume_id, region):
volume = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
volume_type = volume.get("VolumeType")
volume_iops = volume.get("Iops")
return volume_type, volume_iops
def _assert_volume_exist(volume_id, region):
volume_status = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("State")
)
assert_that(volume_status).is_equal_to("available")
def _test_ebs_encrypted_with_kms(volume_id, region, encrypted, kms_key_id=None):
logging.info("Getting Encrypted information from DescribeVolumes API.")
volume_info = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
assert_that(volume_info.get("Encrypted") == encrypted).is_true()
if kms_key_id:
assert_that(volume_info.get("KmsKeyId")).matches(kms_key_id)
def _test_root_volume_encryption(cluster, os, region, scheduler, encrypted):
logging.info("Testing root volume encryption.")
if scheduler == "slurm":
# If the scheduler is slurm, root volumes both on head and compute can be encrypted
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
root_volume_id = utils.get_root_volume_id(instance, region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
else:
# If the scheduler is awsbatch, only the head_node root volume can be encrypted.
root_volume_id = utils.get_root_volume_id(cluster.cfn_resources["HeadNode"], region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
def _assert_root_volume_configuration(cluster, os, region, scheduler):
logging.info("Testing root volume type, iops, throughput.")
# Test root volume of head node
head_node = cluster.cfn_resources["HeadNode"]
if utils.dict_has_nested_key(cluster.config, ("HeadNode", "LocalStorage", "RootVolume")):
logging.info("Checking head node root volume settings")
root_volume_id = utils.get_root_volume_id(head_node, region, os)
expected_settings = cluster.config["HeadNode"]["LocalStorage"]["RootVolume"]
_assert_volume_configuration(expected_settings, root_volume_id, region)
if scheduler == "slurm":
# Only if the scheduler is slurm, root volumes both on compute can be configured
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
if instance == head_node:
# head node is already checked
continue
root_volume_id = utils.get_root_volume_id(instance, region, os)
if utils.dict_has_nested_key(
cluster.config, ("Scheduling", "SlurmQueues", 0, "ComputeSettings", "LocalStorage", "RootVolume")
):
logging.info("Checking compute node root volume settings")
expected_settings = cluster.config["Scheduling"]["SlurmQueues"][0]["ComputeSettings"]["LocalStorage"][
"RootVolume"
]
_assert_volume_configuration(expected_settings, root_volume_id, region)
def _assert_volume_configuration(expected_settings, volume_id, region):
actual_root_volume_settings = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
)
for key in expected_settings:
assert_that(actual_root_volume_settings[key]).is_equal_to(expected_settings[key])
@pytest.fixture(scope="class")
def snapshots_factory():
factory = EBSSnapshotsFactory()
yield factory
factory.release_all()
@pytest.fixture(scope="module")
def kms_key_factory():
factory = KMSKeyFactory()
yield factory
factory.release_all()
| 46.705521 | 120 | 0.732825 |
import logging
import boto3
import pytest
import utils
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from tests.common.schedulers_common import get_scheduler_commands
from tests.storage.kms_key_factory import KMSKeyFactory
from tests.storage.snapshots_factory import EBSSnapshotsFactory
from tests.storage.storage_common import verify_directory_correctly_shared
@pytest.mark.regions(["eu-west-3", "cn-north-1", "us-gov-west-1"])
@pytest.mark.instances(["c4.xlarge", "c5.xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("instance")
def test_ebs_single(scheduler, pcluster_config_reader, clusters_factory, kms_key_factory, region, os):
mount_dir = "ebs_mount_dir"
kms_key_id = kms_key_factory.create_kms_key(region)
cluster_config = pcluster_config_reader(
mount_dir=mount_dir, ec2_iam_role=kms_key_factory.iam_role_arn, ebs_kms_key_id=kms_key_id
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
volume_id = get_ebs_volume_ids(cluster, region)[0]
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size=35)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=True, kms_key_id=kms_key_id)
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=True)
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "alinux2", "slurm")
@pytest.mark.dimensions("cn-northwest-1", "c4.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-1", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_snapshot(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs snapshot")
mount_dir = "ebs_mount_dir"
volume_size = 21
logging.info("Creating snapshot")
snapshot_id = snapshots_factory.create_snapshot(request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region)
logging.info("Snapshot id: %s" % snapshot_id)
cluster_config = pcluster_config_reader(mount_dir=mount_dir, volume_size=volume_size, snapshot_id=snapshot_id)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size="9.8")
_test_ebs_resize(remote_command_executor, mount_dir, volume_size=volume_size)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "alinux2", "awsbatch")
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-2", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("instance")
def test_ebs_multiple(scheduler, pcluster_config_reader, clusters_factory, region, os):
mount_dirs = ["/ebs_mount_dir_{0}".format(i) for i in range(0, 5)]
volume_sizes = [15 + 5 * i for i in range(0, 5)]
volume_sizes[3] = 500
volume_sizes[4] = 500
cluster_config = pcluster_config_reader(mount_dirs=mount_dirs, volume_sizes=volume_sizes)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
for mount_dir, volume_size in zip(mount_dirs, volume_sizes):
# If we test with small volume size(eg: 40G), the number is not large enough to show the gap between the
# partition size and the filesystem size. For sc1 and st1, the minimum size is 500G, so there will be a size
# difference.
_test_ebs_correctly_mounted(
remote_command_executor, mount_dir, volume_size if volume_size != 500 else "49[0-9]"
)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
volume_ids = get_ebs_volume_ids(cluster, region)
for i in range(len(volume_ids)):
# test different volume types
volume_id = volume_ids[i]
ebs_settings = _get_ebs_settings_by_name(cluster.config, f"ebs{i+1}")
volume_type = ebs_settings["VolumeType"]
volume = describe_volume(volume_id, region)
assert_that(volume[0]).is_equal_to(volume_type)
encrypted = ebs_settings.get("Encrypted")
if encrypted is None:
# Default encryption if not specified
encrypted = True
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=encrypted, kms_key_id=ebs_settings.get("KmsKeyId"))
# test different iops
# only io1, io2, gp3 can configure iops
if volume_type in ["io1", "io2", "gp3"]:
volume_iops = ebs_settings["Iops"]
assert_that(volume[1]).is_equal_to(int(volume_iops))
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=False)
_assert_root_volume_configuration(cluster, os, region, scheduler)
def _get_ebs_settings_by_name(config, name):
for shared_storage in config["SharedStorage"]:
if shared_storage["Name"] == name:
return shared_storage["EbsSettings"]
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "centos7", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_existing(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs existing")
existing_mount_dir = "existing_mount_dir"
logging.info("Creating volume")
volume_id = snapshots_factory.create_existing_volume(
request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region
)
logging.info("Existing Volume id: %s" % volume_id)
cluster_config = pcluster_config_reader(volume_id=volume_id, existing_mount_dir=existing_mount_dir)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
existing_mount_dir = "/" + existing_mount_dir
_test_ebs_correctly_mounted(remote_command_executor, existing_mount_dir, volume_size="9.8")
_test_ebs_correctly_shared(remote_command_executor, existing_mount_dir, scheduler_commands)
# Checks for test data
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(existing_mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
# delete the cluster before detaching the EBS volume
cluster.delete()
# check the volume still exists after deleting the cluster
_assert_volume_exist(volume_id, region)
def _test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size):
logging.info("Testing ebs {0} is correctly mounted".format(mount_dir))
result = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 | awk '{{print $2, $6}}' | grep '{0}'".format(mount_dir)
)
assert_that(result.stdout).matches(r"{size}G {mount_dir}".format(size=volume_size, mount_dir=mount_dir))
result = remote_command_executor.run_remote_command("cat /etc/fstab")
assert_that(result.stdout).matches(r"UUID=.* {mount_dir} ext4 _netdev 0 0".format(mount_dir=mount_dir))
def _test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands):
logging.info("Testing ebs correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
def _test_home_correctly_shared(remote_command_executor, scheduler_commands):
logging.info("Testing home dir correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, "/home", scheduler_commands)
def _test_ebs_resize(remote_command_executor, mount_dir, volume_size):
logging.info("Testing ebs has correct volume size")
# get the filesystem that the shared_dir is mounted on
# example output of "df -h -t ext4"
# Filesystem Size Used Avail Use% Mounted on
# /dev/nvme1n1p1 9.8G 37M 9.3G 1% /ebs_mount_dir
# /dev/nvme2n1p1 9.8G 37M 9.3G 1% /existing_mount_dir
filesystem_name = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 |grep '{mount_dir}' | awk '{{print $1}}'".format(mount_dir=mount_dir)
).stdout
# get the volume name given the filesystem name
# example input: /dev/nvme1n1p1
# example output: nvme1n1
volume_name = remote_command_executor.run_remote_command(
"lsblk -no pkname {filesystem_name}".format(filesystem_name=filesystem_name)
).stdout
# get the volume size of the volume
# example output of "lsblk"
# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# nvme0n1 259:0 0 25G 0 disk
# ├─nvme0n1p1 259:1 0 25G 0 part /
# └─nvme0n1p128 259:2 0 1M 0 part
# nvme1n1 259:3 0 21G 0 disk
# └─nvme1n1p1 259:4 0 10G 0 part /ebs_mount_dir
# nvme2n1 259:5 0 10G 0 disk
# └─nvme2n1p1 259:6 0 10G 0 part /existing_mount_dir
result = remote_command_executor.run_remote_command(
"lsblk | tail -n +2 | grep {volume_name}| awk '{{print $4}}' | sed -n '1p'''".format(volume_name=volume_name)
)
assert_that(result.stdout).matches(r"{size}G".format(size=volume_size))
def get_ebs_volume_ids(cluster, region):
# get the list of configured ebs volume ids
# example output: ['vol-000', 'vol-001', 'vol-002']
return utils.retrieve_cfn_outputs(cluster.cfn_name, region).get("EBSIds").split(",")
def describe_volume(volume_id, region):
volume = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
volume_type = volume.get("VolumeType")
volume_iops = volume.get("Iops")
return volume_type, volume_iops
def _assert_volume_exist(volume_id, region):
volume_status = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("State")
)
assert_that(volume_status).is_equal_to("available")
def _test_ebs_encrypted_with_kms(volume_id, region, encrypted, kms_key_id=None):
logging.info("Getting Encrypted information from DescribeVolumes API.")
volume_info = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
assert_that(volume_info.get("Encrypted") == encrypted).is_true()
if kms_key_id:
assert_that(volume_info.get("KmsKeyId")).matches(kms_key_id)
def _test_root_volume_encryption(cluster, os, region, scheduler, encrypted):
logging.info("Testing root volume encryption.")
if scheduler == "slurm":
# If the scheduler is slurm, root volumes both on head and compute can be encrypted
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
root_volume_id = utils.get_root_volume_id(instance, region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
else:
# If the scheduler is awsbatch, only the head_node root volume can be encrypted.
root_volume_id = utils.get_root_volume_id(cluster.cfn_resources["HeadNode"], region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
def _assert_root_volume_configuration(cluster, os, region, scheduler):
logging.info("Testing root volume type, iops, throughput.")
# Test root volume of head node
head_node = cluster.cfn_resources["HeadNode"]
if utils.dict_has_nested_key(cluster.config, ("HeadNode", "LocalStorage", "RootVolume")):
logging.info("Checking head node root volume settings")
root_volume_id = utils.get_root_volume_id(head_node, region, os)
expected_settings = cluster.config["HeadNode"]["LocalStorage"]["RootVolume"]
_assert_volume_configuration(expected_settings, root_volume_id, region)
if scheduler == "slurm":
# Only if the scheduler is slurm, root volumes both on compute can be configured
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
if instance == head_node:
# head node is already checked
continue
root_volume_id = utils.get_root_volume_id(instance, region, os)
if utils.dict_has_nested_key(
cluster.config, ("Scheduling", "SlurmQueues", 0, "ComputeSettings", "LocalStorage", "RootVolume")
):
logging.info("Checking compute node root volume settings")
expected_settings = cluster.config["Scheduling"]["SlurmQueues"][0]["ComputeSettings"]["LocalStorage"][
"RootVolume"
]
_assert_volume_configuration(expected_settings, root_volume_id, region)
def _assert_volume_configuration(expected_settings, volume_id, region):
actual_root_volume_settings = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
)
for key in expected_settings:
assert_that(actual_root_volume_settings[key]).is_equal_to(expected_settings[key])
@pytest.fixture(scope="class")
def snapshots_factory():
factory = EBSSnapshotsFactory()
yield factory
factory.release_all()
@pytest.fixture(scope="module")
def kms_key_factory():
factory = KMSKeyFactory()
yield factory
factory.release_all()
| true | true |
f72a864d02a1615e377a438d9b2868959da2187a | 8,179 | py | Python | tensorflow_text/python/ops/bert_tokenizer.py | kornesh/text | f762def9dbb14f8f182936dd25af154af79f366e | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/bert_tokenizer.py | kornesh/text | f762def9dbb14f8f182936dd25af154af79f366e | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/bert_tokenizer.py | kornesh/text | f762def9dbb14f8f182936dd25af154af79f366e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tokenization ops for BERT preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import string_ops
from tensorflow_text.python.ops import regex_split_ops
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
_DELIM_REGEX = [
r"\s+",
r"|".join([
r"[!-/]",
r"[:-@]",
r"[\[-`]",
r"[{-~]",
r"[\p{P}]",
]),
r"|".join([
r"[\x{4E00}-\x{9FFF}]",
r"[\x{3400}-\x{4DBF}]",
r"[\x{20000}-\x{2A6DF}]",
r"[\x{2A700}-\x{2B73F}]",
r"[\x{2B740}-\x{2B81F}]",
r"[\x{2B820}-\x{2CEAF}]",
r"[\x{F900}-\x{FAFF}]",
r"[\x{2F800}-\x{2FA1F}]",
]),
]
_DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE.remove(r"\s+")
_KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE)
class BasicTokenizer(TokenizerWithOffsets):
"""Basic tokenizer for for tokenizing text.
A basic tokenizer that tokenizes using some deterministic rules:
- For most languages, this tokenizer will split on whitespace.
- For Chinese, Japanese, and Korean characters, this tokenizer will split on
Unicode characters.
Attributes:
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If true and lower_case=False, the input text will be
normalized to `normalization_form`. See normalize_utf8() op for a list of
valid values.
"""
def __init__(self,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
self._lower_case = lower_case
if not keep_whitespace:
self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN
else:
self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN
self._normalization_form = normalization_form
def tokenize(self, text_input):
tokens, _, _ = self.tokenize_with_offsets(text_input)
return tokens
def tokenize_with_offsets(self, text_input):
"""Performs basic word tokenization for BERT.
Args:
text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.
Returns:
A `RaggedTensor` of tokenized strings from text_input.
"""
# lowercase and strip accents (if option is set)
if self._lower_case:
text_input = case_fold_utf8(text_input)
text_input = normalize_utf8(text_input, "NFD")
text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "")
else:
# utf8 normalization
if self._normalization_form is not None:
text_input = normalize_utf8(text_input, self._normalization_form)
# strip out control characters
text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ")
return regex_split_ops.regex_split_with_offsets(
text_input, _DELIM_REGEX_PATTERN, self._keep_delim_regex_pattern,
"BertBasicTokenizer")
class BertTokenizer(TokenizerWithOffsets):
"""Tokenizer used for BERT.
This tokenizer applies an end-to-end, text string to wordpiece tokenization.
It first applies basic tokenization, and then follwed by wordpiece
tokenization.
See BasicTokenizer and WordpieceTokenizer for their respective details.
Attributes:
vocab_lookup_table: A lookup table implementing the LookupInterface
containing the vocabulary of subwords or a string which is the file path
to the vocab.txt file.
suffix_indicator: (optional) The characters prepended to a wordpiece to
indicate that it is a suffix to another subword. Default is '##'.
max_bytes_per_word: (optional) Max size of input token. Default is 100.
max_chars_per_token: (optional) Max size of subwords, excluding suffix
indicator. If known, providing this improves the efficiency of decoding
long words.
token_out_type: (optional) The type of the token to return. This can be
`tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.
unknown_token: (optional) The value to use when an unknown token is found.
Default is "[UNK]". If this is set to a string, and `token_out_type` is
`tf.int64`, the `vocab_lookup_table` is used to convert the
`unknown_token` to an integer. If this is set to `None`,
out-of-vocabulary tokens are left as is.
split_unknown_characters: (optional) Whether to split out single unknown
characters as subtokens. If False (default), words containing unknown
characters will be treated as single unknown tokens.
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If true and lower_case=False, the input text will be
normalized to `normalization_form`. See normalize_utf8() op for a list
of valid values.
"""
def __init__(self,
vocab_lookup_table,
suffix_indicator="##",
max_bytes_per_word=100,
max_chars_per_token=None,
token_out_type=dtypes.int64,
unknown_token="[UNK]",
split_unknown_characters=False,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
if isinstance(vocab_lookup_table, str):
init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)
vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(
init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)
self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,
normalization_form)
self._wordpiece_tokenizer = WordpieceTokenizer(
vocab_lookup_table, suffix_indicator, max_bytes_per_word,
max_chars_per_token, token_out_type, unknown_token,
split_unknown_characters)
def tokenize_with_offsets(self, text_input):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._wordpiece_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = array_ops.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def tokenize(self, text_input):
"""Performs untokenized text to wordpiece tokenization for BERT.
Args:
text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8
strings.
Returns:
A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string
contents (or ID in the vocab_lookup_table representing that string)
of the `jth` token in `input[i1...iN]`
"""
tokens = self._basic_tokenizer.tokenize(text_input)
return self._wordpiece_tokenizer.tokenize(tokens)
| 40.093137 | 80 | 0.711334 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import string_ops
from tensorflow_text.python.ops import regex_split_ops
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
_DELIM_REGEX = [
r"\s+",
r"|".join([
r"[!-/]",
r"[:-@]",
r"[\[-`]",
r"[{-~]",
r"[\p{P}]",
]),
r"|".join([
r"[\x{4E00}-\x{9FFF}]",
r"[\x{3400}-\x{4DBF}]",
r"[\x{20000}-\x{2A6DF}]",
r"[\x{2A700}-\x{2B73F}]",
r"[\x{2B740}-\x{2B81F}]",
r"[\x{2B820}-\x{2CEAF}]",
r"[\x{F900}-\x{FAFF}]",
r"[\x{2F800}-\x{2FA1F}]",
]),
]
_DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE.remove(r"\s+")
_KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE)
class BasicTokenizer(TokenizerWithOffsets):
def __init__(self,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
self._lower_case = lower_case
if not keep_whitespace:
self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN
else:
self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN
self._normalization_form = normalization_form
def tokenize(self, text_input):
tokens, _, _ = self.tokenize_with_offsets(text_input)
return tokens
def tokenize_with_offsets(self, text_input):
if self._lower_case:
text_input = case_fold_utf8(text_input)
text_input = normalize_utf8(text_input, "NFD")
text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "")
else:
if self._normalization_form is not None:
text_input = normalize_utf8(text_input, self._normalization_form)
text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ")
return regex_split_ops.regex_split_with_offsets(
text_input, _DELIM_REGEX_PATTERN, self._keep_delim_regex_pattern,
"BertBasicTokenizer")
class BertTokenizer(TokenizerWithOffsets):
def __init__(self,
vocab_lookup_table,
suffix_indicator="##",
max_bytes_per_word=100,
max_chars_per_token=None,
token_out_type=dtypes.int64,
unknown_token="[UNK]",
split_unknown_characters=False,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
if isinstance(vocab_lookup_table, str):
init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)
vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(
init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)
self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,
normalization_form)
self._wordpiece_tokenizer = WordpieceTokenizer(
vocab_lookup_table, suffix_indicator, max_bytes_per_word,
max_chars_per_token, token_out_type, unknown_token,
split_unknown_characters)
def tokenize_with_offsets(self, text_input):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._wordpiece_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = array_ops.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def tokenize(self, text_input):
tokens = self._basic_tokenizer.tokenize(text_input)
return self._wordpiece_tokenizer.tokenize(tokens)
| true | true |
f72a86580356f7077caa16bc6708c93567dae160 | 6,746 | py | Python | py_algorithms/data_structures/heap.py | rlishtaba/py-algorithms-playground | ce7cf332483e01d05bcad98921d736c33a33a66c | [
"MIT"
] | 31 | 2017-09-17T06:29:15.000Z | 2022-03-11T14:45:40.000Z | py_algorithms/data_structures/heap.py | MindaugasVaitkus2/py-algorithms | ce7cf332483e01d05bcad98921d736c33a33a66c | [
"MIT"
] | null | null | null | py_algorithms/data_structures/heap.py | MindaugasVaitkus2/py-algorithms | ce7cf332483e01d05bcad98921d736c33a33a66c | [
"MIT"
] | 11 | 2017-11-01T20:33:20.000Z | 2022-02-13T16:54:21.000Z | import sys
from typing import Any
from typing import Callable
from typing import List
from typing import Union
from ..utils import test_iterable
class _HeapNode:
def __init__(self, key: Any, value: Any):
self.key = key
self.value = value
self.degree = 0
self.marked = False
self.right = self
self.left = self
self.parent = None
self.child = None
def is_marked(self) -> bool:
return self.marked is True
class Heap:
MAX_MIN = -sys.maxsize
def __init__(self, comparator_f2: Callable[[Any, Any], bool], xs: List[Any] = ()):
test_iterable(xs)
self._size = 0
self._comparator_f2 = comparator_f2
self._next = None
self._stored = {}
# default initial values
for x in xs:
self.push(x, x)
@staticmethod
def _get_by_index(array, index) -> Union[None, Any]:
try:
return array[index]
except IndexError:
return None
@classmethod
def _set_entry_by_index(cls, array, index, value):
if cls._get_by_index(array, index) == cls.MAX_MIN:
array[index] = value
return array
else:
array.extend([cls.MAX_MIN] * (index - len(array) + 1))
return cls._set_entry_by_index(array, index, value)
@property
def size(self):
return self._size
@property
def next(self) -> Union[Any, None]:
if self._next:
return self._next.value
return None
@property
def next_key(self) -> Union[Any, None]:
if self._next:
return self._next.key
return None
@property
def is_empty(self) -> bool:
return self._next is None
def clear(self) -> 'Heap':
self._next = None
self._size = 0
self._stored = {}
return self
def contains_key(self, key) -> bool:
if self._stored.get(key, None) and self._stored.get(key):
return True
return False
def push(self, key: Any, value: any) -> Any:
if key is None:
raise RuntimeError('Could not process heap keys equal to Null.')
node = _HeapNode(key, value)
if self._next:
node.right = self._next
node.left = self._next.left
node.left.right = node
self._next.left = node
if self._comparator_f2(key, self._next.key):
self._next = node
else:
self._next = node
self._size += 1
w = self._next.right
while w is not self._next:
w = w.right
if not self._stored.get(key, None):
self._stored[key] = []
self._stored[key].append(node)
return value
def pop(self) -> Any:
if not self._next:
return None
popped = self._next
if self._size == 1:
self.clear()
return popped.value
# things getting hairy here, we need to merge popped
# `popped` node's children to the root node
if self._next.child:
self._next.child.parent = None
sibling = self._next.child.right
while not sibling == self._next.child:
sibling.parent = None
sibling = sibling.right
# Merge children into root.
# If next is a singular root node,
# make its child pointer the next node
if self._next.right == self._next:
self._next = self._next.child
else:
next_left, next_right = self._next.left, self._next.right
current_child = self._next.child
self._next.right.left = current_child
self._next.left.right = current_child.right
current_child.right.left = next_left
current_child.right = next_right
self._next = self._next.right
else:
self._next.left.right = self._next.right
self._next.right.left = self._next.left
self._next = self._next.right
self._consolidate()
if not self._stored.get(popped.key, None):
raise RuntimeError("Could not delete a heap entry.")
self._size -= 1
return popped.value
def _consolidate(self):
roots = []
root = self._next
_min = root
while True: # find the nodes in the list
roots.append(root)
root = root.right
if root == self._next:
break
degrees = []
for root in roots:
if self._comparator_f2(root.key, _min.key):
_min = root
# check if we need to merge
if not self._get_by_index(degrees, root.degree):
self._set_entry_by_index(degrees, root.degree, root)
else:
# there is another node(s) with the same degree,
# we'll try to consolidate them
degree = root.degree
while not (self._get_by_index(degrees, degree) in [self.MAX_MIN, None]):
other_root_with_degree = degrees[degree]
if self._comparator_f2(root.key, other_root_with_degree.key):
# determine which node is the parent, which one is the
# child
smaller, larger = root, other_root_with_degree
else:
smaller, larger = other_root_with_degree, root
self._link_nodes(larger, smaller)
degrees[degree] = self.MAX_MIN
root = smaller
degree += 1
self._set_entry_by_index(degrees, degree, root)
# make sure duplicate keys in the right order
if _min.key == root.key:
_min = root
self._next = _min
@staticmethod
def _link_nodes(child, parent) -> None:
"""make node a child of a parent"""
# link the child's siblings
child.left.right = child.right
child.right.left = child.left
child.parent = parent
# if parent doesn't have children, make new child its only child
if not parent.child:
parent.child = child.right = child.left = child
# otherwise insert new child into parent's children list
else:
current_child = parent.child
child.left = current_child
child.right = current_child.right
current_child.right.left = child
current_child.right = child
parent.degree += 1
child.marked = False
| 29.982222 | 88 | 0.546991 | import sys
from typing import Any
from typing import Callable
from typing import List
from typing import Union
from ..utils import test_iterable
class _HeapNode:
def __init__(self, key: Any, value: Any):
self.key = key
self.value = value
self.degree = 0
self.marked = False
self.right = self
self.left = self
self.parent = None
self.child = None
def is_marked(self) -> bool:
return self.marked is True
class Heap:
MAX_MIN = -sys.maxsize
def __init__(self, comparator_f2: Callable[[Any, Any], bool], xs: List[Any] = ()):
test_iterable(xs)
self._size = 0
self._comparator_f2 = comparator_f2
self._next = None
self._stored = {}
for x in xs:
self.push(x, x)
@staticmethod
def _get_by_index(array, index) -> Union[None, Any]:
try:
return array[index]
except IndexError:
return None
@classmethod
def _set_entry_by_index(cls, array, index, value):
if cls._get_by_index(array, index) == cls.MAX_MIN:
array[index] = value
return array
else:
array.extend([cls.MAX_MIN] * (index - len(array) + 1))
return cls._set_entry_by_index(array, index, value)
@property
def size(self):
return self._size
@property
def next(self) -> Union[Any, None]:
if self._next:
return self._next.value
return None
@property
def next_key(self) -> Union[Any, None]:
if self._next:
return self._next.key
return None
@property
def is_empty(self) -> bool:
return self._next is None
def clear(self) -> 'Heap':
self._next = None
self._size = 0
self._stored = {}
return self
def contains_key(self, key) -> bool:
if self._stored.get(key, None) and self._stored.get(key):
return True
return False
def push(self, key: Any, value: any) -> Any:
if key is None:
raise RuntimeError('Could not process heap keys equal to Null.')
node = _HeapNode(key, value)
if self._next:
node.right = self._next
node.left = self._next.left
node.left.right = node
self._next.left = node
if self._comparator_f2(key, self._next.key):
self._next = node
else:
self._next = node
self._size += 1
w = self._next.right
while w is not self._next:
w = w.right
if not self._stored.get(key, None):
self._stored[key] = []
self._stored[key].append(node)
return value
def pop(self) -> Any:
if not self._next:
return None
popped = self._next
if self._size == 1:
self.clear()
return popped.value
if self._next.child:
self._next.child.parent = None
sibling = self._next.child.right
while not sibling == self._next.child:
sibling.parent = None
sibling = sibling.right
# Merge children into root.
# If next is a singular root node,
# make its child pointer the next node
if self._next.right == self._next:
self._next = self._next.child
else:
next_left, next_right = self._next.left, self._next.right
current_child = self._next.child
self._next.right.left = current_child
self._next.left.right = current_child.right
current_child.right.left = next_left
current_child.right = next_right
self._next = self._next.right
else:
self._next.left.right = self._next.right
self._next.right.left = self._next.left
self._next = self._next.right
self._consolidate()
if not self._stored.get(popped.key, None):
raise RuntimeError("Could not delete a heap entry.")
self._size -= 1
return popped.value
def _consolidate(self):
roots = []
root = self._next
_min = root
while True: # find the nodes in the list
roots.append(root)
root = root.right
if root == self._next:
break
degrees = []
for root in roots:
if self._comparator_f2(root.key, _min.key):
_min = root
# check if we need to merge
if not self._get_by_index(degrees, root.degree):
self._set_entry_by_index(degrees, root.degree, root)
else:
# there is another node(s) with the same degree,
# we'll try to consolidate them
degree = root.degree
while not (self._get_by_index(degrees, degree) in [self.MAX_MIN, None]):
other_root_with_degree = degrees[degree]
if self._comparator_f2(root.key, other_root_with_degree.key):
smaller, larger = root, other_root_with_degree
else:
smaller, larger = other_root_with_degree, root
self._link_nodes(larger, smaller)
degrees[degree] = self.MAX_MIN
root = smaller
degree += 1
self._set_entry_by_index(degrees, degree, root)
if _min.key == root.key:
_min = root
self._next = _min
@staticmethod
def _link_nodes(child, parent) -> None:
child.left.right = child.right
child.right.left = child.left
child.parent = parent
# if parent doesn't have children, make new child its only child
if not parent.child:
parent.child = child.right = child.left = child
else:
current_child = parent.child
child.left = current_child
child.right = current_child.right
current_child.right.left = child
current_child.right = child
parent.degree += 1
child.marked = False
| true | true |
f72a870c46c2c96ce5f26f8f2ab22a280b9f3442 | 9,272 | py | Python | test-features.py | MartyLake/KLTypeList | 76529f9b494b4474e334958544de99253f878b6c | [
"MIT"
] | null | null | null | test-features.py | MartyLake/KLTypeList | 76529f9b494b4474e334958544de99253f878b6c | [
"MIT"
] | null | null | null | test-features.py | MartyLake/KLTypeList | 76529f9b494b4474e334958544de99253f878b6c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2014 Alain Martin
import argparse
import ast
import os
import re
import subprocess
import sys
import tempfile
FEATURE_EXT = '.feature'
REPO_ROOT = os.path.dirname(os.path.abspath(__file__))
def compiler_arg_choices():
compilers_dir = os.path.join(REPO_ROOT, 'compilers')
return [os.path.basename(file_name)
for file_name in os.listdir(compilers_dir)]
def parse_args():
def existing_dir_or_file(path):
if not os.path.exists(path):
message = 'No such file or directory %s' % os.path.abspath(path)
raise argparse.ArgumentTypeError(message)
return path
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', '--compiler',
required=True,
choices=compiler_arg_choices())
arg_parser.add_argument('input_path',
type=existing_dir_or_file,
nargs='?',
default=os.path.curdir)
return arg_parser.parse_args(sys.argv[1:])
class Compiler(object):
@staticmethod
def from_file(compiler_file_path):
with open(compiler_file_path, 'r') as compiler_file:
settings = ast.literal_eval(compiler_file.read())
return Compiler(
settings['exe'], settings['options'], settings['env'])
def __init__(self, exe, options, env):
self.exe = exe
self.options = options
self.env = env
def call_env(self):
call_env = os.environ.copy()
for key in self.env:
if key not in call_env:
call_env[key] = ''
for path in self.env[key]:
call_env[key] += os.pathsep + path
return call_env
def compile(self, source_file_path):
compiler_cmd = [self.exe, source_file_path] + self.options
call_env = self.call_env()
return_code = 0
output = ''
try:
output = subprocess.check_output(
compiler_cmd, env=call_env, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
return_code = error.returncode
output = error.output
return return_code, output
class Status(object):
ERROR = 'ERROR'
PASSED = 'PASSED'
FAILED = 'FAILED'
def get_return_type(result):
if result is None:
return 'DNC'
if result in ('true', 'false'):
return 'Boolean'
if result.isdigit():
return 'Integer'
if result.startswith('TypeList<'):
return 'TypeList'
if len(result) == 1:
return 'Type'
class Feature(object):
def __init__(self, line, name, has_arguments, return_type):
self.line = line
self.name = name
self.has_arguments = has_arguments is not None
self.return_type = return_type
@staticmethod
def from_declaration(line):
feature_declaration_regex = re.compile(r'^(.+?)(?:<(.*)>)? -> (.+)$')
match = feature_declaration_regex.search(line)
if match:
name, has_arguments, return_type = match.groups()
return Feature(line, name, has_arguments, return_type)
def run_test(self, feature_test, compiler):
if (self.name != feature_test.feature_name
or self.has_arguments != (feature_test.arguments is not None)
or (self.return_type != get_return_type(feature_test.result)
and feature_test.result is not None)):
print '[ %-6s ] %s\ndoes not match %s' % (
'ERROR', feature_test.line, self.line)
return Status.ERROR
return feature_test.run(self, compiler)
test_code_skeleton = '''
#include "KL/TypeList.hpp"
#include <type_traits>
using A = {A};
using B = {B};
using C = {C};
using namespace KL;
class Test{feature_name}
{{
void test()
{{
{result_type} Result = TypeList<{pack}>::{feature_name}{arguments};
static_assert({assertion}, "!");
}}
}};
'''
def get_result_type(return_type):
if return_type in ('Boolean', 'Integer'):
return 'const auto'
if return_type in ('Type', 'TypeList'):
return 'using'
def get_assertion(return_type, result):
if result is None:
return 'true'
if return_type in ('Boolean', 'Integer'):
return '%s == Result' % result
if return_type in ('TypeList', 'Type'):
return 'std::is_same<%s, Result>::value' % result
class FeatureTest(object):
def __init__(self, line, feature_name, pack, arguments, result):
self.line = line
self.feature_name = feature_name
self.pack = pack
self.arguments = arguments
self.result = result
@staticmethod
def from_declaration(line):
feature_test_declaration_regex = re.compile(
r'^TypeList<(.*)>::(.+?)(?:<(.*)>)?'
r' (?:NOT COMPILE|== (.+))$')
match = feature_test_declaration_regex.search(line)
if match:
pack, feature_name, arguments, result = match.groups()
return FeatureTest(line, feature_name, pack, arguments, result)
def run(self, feature, compiler):
arguments = ''
if feature.has_arguments:
arguments += '<' + self.arguments + '>'
if feature.return_type in ('Boolean', 'Integer'):
arguments += '::value'
test_code = test_code_skeleton.format(
feature_name=feature.name,
result_type=get_result_type(feature.return_type),
pack=self.pack,
arguments=arguments,
assertion=get_assertion(feature.return_type, self.result),
A='void',
B='bool',
C='char',
)
temp_file_descriptor = None
temp_file_path = None
temp_file = None
return_code = None
try:
temp_file_descriptor, temp_file_path = tempfile.mkstemp(
suffix='.cpp')
temp_file = os.fdopen(temp_file_descriptor, 'w')
temp_file.write(test_code)
temp_file.close()
return_code, output = compiler.compile(temp_file_path)
finally:
if temp_file:
temp_file.close()
elif temp_file_descriptor:
os.close(temp_file_descriptor)
if temp_file_path:
os.remove(temp_file_path)
if return_code is not None:
if (return_code == 0) == (self.result is not None):
print '[ %-6s ] %s' % ('PASS', self.line)
return Status.PASSED
else:
print '[ %-6s ] %s' % ('FAIL!', self.line)
print output
return Status.FAILED
return Status.ERROR
def test_feature_file(feature_file_path, compiler):
feature = None
status = []
with open(feature_file_path, 'r') as feature_file:
for line in feature_file:
if not line.isspace():
line = line.rstrip()
if not feature:
feature = Feature.from_declaration(line)
if feature:
print '[--------] %s' % feature.line
else:
print 'Failed to parse feature "%s" in %s' % (
line, feature_file_path)
return [Status.ERROR]
else:
test = FeatureTest.from_declaration(line)
if test:
status.append(feature.run_test(test, compiler))
else:
print 'Failed to parse feature test "%s" in %s' % (
line, feature_file_path)
status.append(Status.ERROR)
print ('[--------] %s passed' % status.count(Status.PASSED)
+ ', %s failed' % status.count(Status.FAILED)
+ ', %s errored\n' % status.count(Status.ERROR))
return status
def find_feature_files(path):
if os.path.isfile(path) and os.path.splitext(path)[1] == FEATURE_EXT:
yield path
return
for root, _, file_names in os.walk(path):
for file_name in file_names:
file_path = os.path.join(root, file_name)
if os.path.splitext(file_path)[1] == FEATURE_EXT:
yield file_path
def test_features(compiler, input_path):
compiler_file_path = os.path.join(REPO_ROOT, 'compilers', compiler)
compiler = Compiler.from_file(compiler_file_path)
feature_files = find_feature_files(input_path)
status = []
for feature_file_path in feature_files:
status += test_feature_file(feature_file_path, compiler)
print '[ TOTAL ] %s error%s, %s failed test%s, %s passed test%s' % (
status.count(Status.ERROR), 's'[status.count(Status.ERROR) == 1:],
status.count(Status.FAILED), 's'[status.count(Status.FAILED) == 1:],
status.count(Status.PASSED), 's'[status.count(Status.PASSED) == 1:])
return 1 if Status.ERROR in status else status.count(Status.FAILED)
if __name__ == '__main__':
sys.exit(test_features(**vars(parse_args())))
| 30.006472 | 77 | 0.574418 |
import argparse
import ast
import os
import re
import subprocess
import sys
import tempfile
FEATURE_EXT = '.feature'
REPO_ROOT = os.path.dirname(os.path.abspath(__file__))
def compiler_arg_choices():
compilers_dir = os.path.join(REPO_ROOT, 'compilers')
return [os.path.basename(file_name)
for file_name in os.listdir(compilers_dir)]
def parse_args():
def existing_dir_or_file(path):
if not os.path.exists(path):
message = 'No such file or directory %s' % os.path.abspath(path)
raise argparse.ArgumentTypeError(message)
return path
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', '--compiler',
required=True,
choices=compiler_arg_choices())
arg_parser.add_argument('input_path',
type=existing_dir_or_file,
nargs='?',
default=os.path.curdir)
return arg_parser.parse_args(sys.argv[1:])
class Compiler(object):
@staticmethod
def from_file(compiler_file_path):
with open(compiler_file_path, 'r') as compiler_file:
settings = ast.literal_eval(compiler_file.read())
return Compiler(
settings['exe'], settings['options'], settings['env'])
def __init__(self, exe, options, env):
self.exe = exe
self.options = options
self.env = env
def call_env(self):
call_env = os.environ.copy()
for key in self.env:
if key not in call_env:
call_env[key] = ''
for path in self.env[key]:
call_env[key] += os.pathsep + path
return call_env
def compile(self, source_file_path):
compiler_cmd = [self.exe, source_file_path] + self.options
call_env = self.call_env()
return_code = 0
output = ''
try:
output = subprocess.check_output(
compiler_cmd, env=call_env, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
return_code = error.returncode
output = error.output
return return_code, output
class Status(object):
ERROR = 'ERROR'
PASSED = 'PASSED'
FAILED = 'FAILED'
def get_return_type(result):
if result is None:
return 'DNC'
if result in ('true', 'false'):
return 'Boolean'
if result.isdigit():
return 'Integer'
if result.startswith('TypeList<'):
return 'TypeList'
if len(result) == 1:
return 'Type'
class Feature(object):
def __init__(self, line, name, has_arguments, return_type):
self.line = line
self.name = name
self.has_arguments = has_arguments is not None
self.return_type = return_type
@staticmethod
def from_declaration(line):
feature_declaration_regex = re.compile(r'^(.+?)(?:<(.*)>)? -> (.+)$')
match = feature_declaration_regex.search(line)
if match:
name, has_arguments, return_type = match.groups()
return Feature(line, name, has_arguments, return_type)
def run_test(self, feature_test, compiler):
if (self.name != feature_test.feature_name
or self.has_arguments != (feature_test.arguments is not None)
or (self.return_type != get_return_type(feature_test.result)
and feature_test.result is not None)):
print '[ %-6s ] %s\ndoes not match %s' % (
'ERROR', feature_test.line, self.line)
return Status.ERROR
return feature_test.run(self, compiler)
test_code_skeleton = '''
#include "KL/TypeList.hpp"
#include <type_traits>
using A = {A};
using B = {B};
using C = {C};
using namespace KL;
class Test{feature_name}
{{
void test()
{{
{result_type} Result = TypeList<{pack}>::{feature_name}{arguments};
static_assert({assertion}, "!");
}}
}};
'''
def get_result_type(return_type):
if return_type in ('Boolean', 'Integer'):
return 'const auto'
if return_type in ('Type', 'TypeList'):
return 'using'
def get_assertion(return_type, result):
if result is None:
return 'true'
if return_type in ('Boolean', 'Integer'):
return '%s == Result' % result
if return_type in ('TypeList', 'Type'):
return 'std::is_same<%s, Result>::value' % result
class FeatureTest(object):
def __init__(self, line, feature_name, pack, arguments, result):
self.line = line
self.feature_name = feature_name
self.pack = pack
self.arguments = arguments
self.result = result
@staticmethod
def from_declaration(line):
feature_test_declaration_regex = re.compile(
r'^TypeList<(.*)>::(.+?)(?:<(.*)>)?'
r' (?:NOT COMPILE|== (.+))$')
match = feature_test_declaration_regex.search(line)
if match:
pack, feature_name, arguments, result = match.groups()
return FeatureTest(line, feature_name, pack, arguments, result)
def run(self, feature, compiler):
arguments = ''
if feature.has_arguments:
arguments += '<' + self.arguments + '>'
if feature.return_type in ('Boolean', 'Integer'):
arguments += '::value'
test_code = test_code_skeleton.format(
feature_name=feature.name,
result_type=get_result_type(feature.return_type),
pack=self.pack,
arguments=arguments,
assertion=get_assertion(feature.return_type, self.result),
A='void',
B='bool',
C='char',
)
temp_file_descriptor = None
temp_file_path = None
temp_file = None
return_code = None
try:
temp_file_descriptor, temp_file_path = tempfile.mkstemp(
suffix='.cpp')
temp_file = os.fdopen(temp_file_descriptor, 'w')
temp_file.write(test_code)
temp_file.close()
return_code, output = compiler.compile(temp_file_path)
finally:
if temp_file:
temp_file.close()
elif temp_file_descriptor:
os.close(temp_file_descriptor)
if temp_file_path:
os.remove(temp_file_path)
if return_code is not None:
if (return_code == 0) == (self.result is not None):
print '[ %-6s ] %s' % ('PASS', self.line)
return Status.PASSED
else:
print '[ %-6s ] %s' % ('FAIL!', self.line)
print output
return Status.FAILED
return Status.ERROR
def test_feature_file(feature_file_path, compiler):
feature = None
status = []
with open(feature_file_path, 'r') as feature_file:
for line in feature_file:
if not line.isspace():
line = line.rstrip()
if not feature:
feature = Feature.from_declaration(line)
if feature:
print '[--------] %s' % feature.line
else:
print 'Failed to parse feature "%s" in %s' % (
line, feature_file_path)
return [Status.ERROR]
else:
test = FeatureTest.from_declaration(line)
if test:
status.append(feature.run_test(test, compiler))
else:
print 'Failed to parse feature test "%s" in %s' % (
line, feature_file_path)
status.append(Status.ERROR)
print ('[--------] %s passed' % status.count(Status.PASSED)
+ ', %s failed' % status.count(Status.FAILED)
+ ', %s errored\n' % status.count(Status.ERROR))
return status
def find_feature_files(path):
if os.path.isfile(path) and os.path.splitext(path)[1] == FEATURE_EXT:
yield path
return
for root, _, file_names in os.walk(path):
for file_name in file_names:
file_path = os.path.join(root, file_name)
if os.path.splitext(file_path)[1] == FEATURE_EXT:
yield file_path
def test_features(compiler, input_path):
compiler_file_path = os.path.join(REPO_ROOT, 'compilers', compiler)
compiler = Compiler.from_file(compiler_file_path)
feature_files = find_feature_files(input_path)
status = []
for feature_file_path in feature_files:
status += test_feature_file(feature_file_path, compiler)
print '[ TOTAL ] %s error%s, %s failed test%s, %s passed test%s' % (
status.count(Status.ERROR), 's'[status.count(Status.ERROR) == 1:],
status.count(Status.FAILED), 's'[status.count(Status.FAILED) == 1:],
status.count(Status.PASSED), 's'[status.count(Status.PASSED) == 1:])
return 1 if Status.ERROR in status else status.count(Status.FAILED)
if __name__ == '__main__':
sys.exit(test_features(**vars(parse_args())))
| false | true |
f72a885aba97b22f715ae7119075a6a258e3e0a9 | 3,885 | py | Python | func.py | Abner0627/IPRV_Optical-Flow | 85c0650f671ad44c8bbe1d820a761be42cbe56d0 | [
"MIT"
] | null | null | null | func.py | Abner0627/IPRV_Optical-Flow | 85c0650f671ad44c8bbe1d820a761be42cbe56d0 | [
"MIT"
] | null | null | null | func.py | Abner0627/IPRV_Optical-Flow | 85c0650f671ad44c8bbe1d820a761be42cbe56d0 | [
"MIT"
] | null | null | null | import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
# %%
def _pick(L, ty, path):
L_ = [cv2.imread(os.path.join(path, i)) for i in L if i.split('_')[0]==ty]
# 輸入影像
return L_
def _gray(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def _Pos(img, idx):
def on_press(event):
L.append(np.array([int(event.xdata), int(event.ydata)]))
# 紀錄點選的座標點
if len(L)>=2:
plt.close()
# 當點選次數大於等於2時,關閉視窗
np.save('./npy/loc_' + idx + '.npy', np.array(L))
# 儲存紀錄座標點
fig = plt.figure()
plt.imshow(img, animated= True)
L = []
fig.canvas.mpl_connect('button_press_event', on_press)
# 用動態圖的形式產生介面供使用者點選目標點
plt.show()
def _PlotPos(img, idx):
img_c = np.copy(img)
src = np.load('./npy/loc_' + idx + '.npy')
# 輸入儲存的選取座標
print('Choose point 1: ({}, {})'.format(src[0, 0], src[0, 1]))
print('Choose point 2: ({}, {})'.format(src[1, 0], src[1, 1]))
cv2.circle(img_c, (src[0, 0], src[0, 1]), 3, (0, 38, 255), -1)
cv2.circle(img_c, (src[1, 0], src[1, 1]), 3, (0, 38, 255), -1)
# 畫上座標點
return img_c
# def _flow(pre_img, nxt_img, pt_x, pt_y, param, init_flow=None):
# XL, YL = [0], [0]
# PX, PY = [pt_x], [pt_y]
# flow = init_flow
# ep = 1000
# i=0
# while ep>1e-2:
# if i==0:
# fg = 0
# else:
# fg = cv2.OPTFLOW_USE_INITIAL_FLOW
# flow = cv2.calcOpticalFlowFarneback(pre_img, nxt_img, flow=flow, flags=fg, **param)
# XL.append(flow[pt_y, pt_x, 0])
# YL.append(flow[pt_y, pt_x, 1])
# PX.append(int(pt_x + flow[pt_y, pt_x, 0]))
# PY.append(int(pt_y + flow[pt_y, pt_x, 1]))
# print('iter:{}, ep:{}\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))
# print('x = {:.4f}, y = {:.4f}'.format(PX[i], PY[i]))
# print('======================')
# i+=1
# if i>0:
# ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))
# return PX, PY
def _LKflow(pre_img, nxt_img, pt_x, pt_y, lk_params):
p0 = np.array([[pt_x, pt_y]]).astype(np.float32)
i = 0
PX, PY = [pt_x], [pt_y]
XL, YL = [], []
ep = 1e3
# 初始化各參數
while ep>1e-2:
if i==0:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, None, **lk_params)
else:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, p1, flags=cv2.OPTFLOW_USE_INITIAL_FLOW, **lk_params)
# 用迴圈計算每個iteration的輸出座標
PX.append(p1[0][0])
PY.append(p1[0][1])
XL.append(PX[i] - PX[i+1])
YL.append(PY[i] - PY[i+1])
# 紀錄輸出座標與位移向量
if i>0:
ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))
# 與前一個iteration位移向量之差值,
# 當差值<0.01時則停止迴圈
print('iter:{}, ep:{}\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))
print('x = {:.4f}, y = {:.4f}'.format(PX[i+1], PY[i+1]))
print('======================')
i+=1
return PX, PY
def _plot(img, PX, PY):
PX = np.array(PX).astype(np.int)
PY = np.array(PY).astype(np.int)
for j in range(len(PX)):
if j!=0:
cv2.line(img, (PX[j-1], PY[j-1]), (PX[j], PY[j]), (250, 5, 216), 2)
for k in range(len(PX)):
if k==0:
c = (0, 38, 255)
elif k==len(PX)-1:
c = (182, 255, 0)
else:
c = (255, 0, 0)
cv2.circle(img, (PX[k], PY[k]), 3, c, -1)
# 依每個iteration輸出的座標畫上標點
return img
# param = dict(pyr_scale=0.8,
# levels=25,
# iterations=1,
# winsize=5,
# poly_n=5,
# poly_sigma=1.1)
lk_params = dict(winSize = (15, 15),
maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_COUNT, 1, 0.03)) | 32.107438 | 122 | 0.488288 | import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
def _pick(L, ty, path):
L_ = [cv2.imread(os.path.join(path, i)) for i in L if i.split('_')[0]==ty]
return L_
def _gray(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def _Pos(img, idx):
def on_press(event):
L.append(np.array([int(event.xdata), int(event.ydata)]))
if len(L)>=2:
plt.close()
np.save('./npy/loc_' + idx + '.npy', np.array(L))
fig = plt.figure()
plt.imshow(img, animated= True)
L = []
fig.canvas.mpl_connect('button_press_event', on_press)
plt.show()
def _PlotPos(img, idx):
img_c = np.copy(img)
src = np.load('./npy/loc_' + idx + '.npy')
print('Choose point 1: ({}, {})'.format(src[0, 0], src[0, 1]))
print('Choose point 2: ({}, {})'.format(src[1, 0], src[1, 1]))
cv2.circle(img_c, (src[0, 0], src[0, 1]), 3, (0, 38, 255), -1)
cv2.circle(img_c, (src[1, 0], src[1, 1]), 3, (0, 38, 255), -1)
return img_c
def _LKflow(pre_img, nxt_img, pt_x, pt_y, lk_params):
p0 = np.array([[pt_x, pt_y]]).astype(np.float32)
i = 0
PX, PY = [pt_x], [pt_y]
XL, YL = [], []
ep = 1e3
while ep>1e-2:
if i==0:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, None, **lk_params)
else:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, p1, flags=cv2.OPTFLOW_USE_INITIAL_FLOW, **lk_params)
PX.append(p1[0][0])
PY.append(p1[0][1])
XL.append(PX[i] - PX[i+1])
YL.append(PY[i] - PY[i+1])
if i>0:
ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))
print('iter:{}, ep:{}\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))
print('x = {:.4f}, y = {:.4f}'.format(PX[i+1], PY[i+1]))
print('======================')
i+=1
return PX, PY
def _plot(img, PX, PY):
PX = np.array(PX).astype(np.int)
PY = np.array(PY).astype(np.int)
for j in range(len(PX)):
if j!=0:
cv2.line(img, (PX[j-1], PY[j-1]), (PX[j], PY[j]), (250, 5, 216), 2)
for k in range(len(PX)):
if k==0:
c = (0, 38, 255)
elif k==len(PX)-1:
c = (182, 255, 0)
else:
c = (255, 0, 0)
cv2.circle(img, (PX[k], PY[k]), 3, c, -1)
return img
lk_params = dict(winSize = (15, 15),
maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_COUNT, 1, 0.03)) | true | true |
f72a8a9e767b0990ec36270d87dafab2e37e3e27 | 276,837 | py | Python | mindspore/ops/operations/nn_ops.py | Rossil2012/mindspore | 8a20b5d784b3fec6d32e058581ec56ec553a06a0 | [
"Apache-2.0"
] | 1 | 2021-04-23T06:35:18.000Z | 2021-04-23T06:35:18.000Z | mindspore/ops/operations/nn_ops.py | nudt-eddie/mindspore | 55372b41fdfae6d2b88d7078971e06d537f6c558 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/nn_ops.py | nudt-eddie/mindspore | 55372b41fdfae6d2b88d7078971e06d537f6c558 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for nn."""
import math
import operator
from functools import reduce
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register
from ..operations.math_ops import _infer_shape_reduce
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):
"""
Checks whether an argument is a positive int or tuple with 2 or 4(when allow_four is True) positive int elements.
"""
def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two "
f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)
elif len(arg_value) == 2:
ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value
elif len(arg_value) == 4:
if not allow_four:
_raise_message()
ret = arg_value if ret_four else (arg_value[2], arg_value[3])
else:
_raise_message()
return ret
validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and item > 0:
continue
_raise_message()
return ret_value
class Flatten(PrimitiveWithInfer):
r"""
Flattens a tensor without changing its batch size on the 0-th axis.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimension.
Examples:
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape == (1, 24)
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)
prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])
return input_x[0], prod
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class Softmax(PrimitiveWithInfer):
r"""
Softmax operation.
Applies the Softmax operation to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)},
where :math:`N` is the length of the tensor.
Args:
axis (Union[int, tuple]): The axis to do the Softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = P.Softmax()
>>> softmax(input_x)
[0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type("axis", axis, [int, tuple], self.name)
if isinstance(axis, int):
self.add_prim_attr('axis', (axis,))
for item in self.axis:
validator.check_value_type("item of axis", item, [int], self.name)
def infer_shape(self, logits):
validator.check_integer("length of axis", len(self.axis), 1, Rel.GE, self.name)
rank = len(logits)
for axis_v in self.axis:
validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class LogSoftmax(PrimitiveWithInfer):
r"""
Log Softmax activation function.
Applies the Log Softmax function to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Log Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \log \left(\frac{exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
where :math:`N` is the length of the Tensor.
Args:
axis (int): The axis to do the Log softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Log Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> log_softmax = P.LogSoftmax()
>>> log_softmax(input_x)
[-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144]
"""
@prim_attr_register
def __init__(self, axis=-1):
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class Softplus(PrimitiveWithInfer):
r"""
Softplus activation function.
Softplus is a smooth approximation to the ReLU function.
The function is shown as follows:
.. math::
\text{output} = \log(1 + \exp(\text{input_x})),
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softplus = P.Softplus()
>>> softplus(input_x)
[1.3132615, 2.126928, 3.0485873, 4.01815, 5.0067153]
"""
@prim_attr_register
def __init__(self):
"""init Softplus"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.
The function is shown as follows:
.. math::
\text{output} = \frac{\text{input_x}}{1 + \left| \text{input_x} \right|},
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""
@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class ReLU(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU()
>>> result = relu(input_x)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
"""
@prim_attr_register
def __init__(self):
"""init ReLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)
return input_x
class ReLU6(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) upper bounded by 6 of input tensor element-wise.
It returns :math:`\min(\max(0,x), 6)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6()
>>> result = relu6(input_x)
"""
@prim_attr_register
def __init__(self):
"""init ReLU6"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class ReLUV2(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor should be a 4-D tensor.
Outputs:
- **output** (Tensor) - Has the same type and shape as the `input_x`.
- **mask** (Tensor) - A tensor whose data type must be uint8.
Examples:
>>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)
>>> relu_v2 = P.ReLUV2()
>>> output = relu_v2(input_x)
([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]],
[[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]])
"""
@prim_attr_register
def __init__(self):
"""init ReLUV2"""
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
def __infer__(self, input_x):
input_shape = list(input_x['shape'])
input_dtype = input_x['dtype']
mask_shape = []
if len(input_shape) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, "
f"but got a {len(input_shape)}-D tensor whose shape is {input_shape}")
for i in enumerate(input_shape):
if i[0] == 1:
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append((input_shape[1] + 31) // 32)
else:
mask_shape.append((input_shape[1] + 15) // 16)
else:
mask_shape.append(i[1])
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append(4)
else:
mask_shape.append(2)
output_shape = (input_x['shape'], mask_shape)
validator.check_subclass("input_x", input_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)
mask_dtype = mstype.uint8
output_dtype = (input_dtype, mask_dtype)
return {'shape': output_shape,
'dtype': output_dtype,
'value': None}
class Elu(PrimitiveWithInfer):
r"""
Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise.
The data type of input tensor should be float.
Args:
alpha (float): The coefficient of negative factor whose type is float,
only support '1.0' currently. Default: 1.0.
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, has the same shape and data type as `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu()
>>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)
"""
@prim_attr_register
def __init__(self, alpha=1.0):
"""Init Elu"""
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class HSwish(PrimitiveWithInfer):
r"""
Hard swish activation function.
Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
Hard swish is defined as:
.. math::
\text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSwish, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hswish = P.HSwish()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hswish(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, xshape):
return xshape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Sigmoid(PrimitiveWithInfer):
r"""
Sigmoid activation function.
Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
.. math::
\text{sigmoid}(x_i) = \frac{1}{1 + exp(-x_i)},
where :math:`x_i` is the element of the input.
Inputs:
- **input_x** (Tensor) - The input of Sigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> sigmoid = P.Sigmoid()
>>> sigmoid(input_x)
[0.73105866, 0.880797, 0.9525742, 0.98201376, 0.9933071]
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class HSigmoid(PrimitiveWithInfer):
r"""
Hard sigmoid activation function.
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
Hard sigmoid is defined as:
.. math::
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hsigmoid = P.HSigmoid()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hsigmoid(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Tanh(PrimitiveWithInfer):
r"""
Tanh activation function.
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
.. math::
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
where :math:`x_i` is an element of the input Tensor.
Inputs:
- **input_x** (Tensor) - The input of Tanh.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> tanh = P.Tanh()
>>> tanh(input_x)
[0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class FusedBatchNorm(Primitive):
r"""
FusedBatchNorm is a BatchNorm that moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Outputs:
Tuple of 5 Tensor, the normalized input and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNorm()
>>> output = op(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
class FusedBatchNormEx(PrimitiveWithInfer):
r"""
FusedBatchNormEx is an extension of FusedBatchNorm, FusedBatchNormEx has one more output(output reserve)
than FusedBatchNorm, reserve will be used in backpropagation phase. FusedBatchNorm is a BatchNorm that
moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - The input of FusedBatchNormEx, Tensor of shape :math:`(N, C)`,
data type: float16 or float32.
- **scale** (Tensor) - Parameter scale, same with gamma above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **bias** (Tensor) - Parameter bias, same with beta above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **mean** (Tensor) - mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **variance** (Tensor) - variance value, Tensor of shape :math:`(C,)`, data type: float32.
Outputs:
Tuple of 6 Tensors, the normalized input, the updated parameters and reserve.
- **output_x** (Tensor) - The input of FusedBatchNormEx, same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Updated parameter scale, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_bias** (Tensor) - Updated parameter bias, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_mean** (Tensor) - Updated mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_variance** (Tensor) - Updated variance value, Tensor of shape :math:`(C,)`,
data type: float32.
- **reserve** (Tensor) - reserve space, Tensor of shape :math:`(C,)`, data type: float32.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNormEx()
>>> output = op(input_x, scale, bias, mean, variance)
"""
__mindspore_signature__ = (
sig.make_sig('input_x', dtype=sig.sig_dtype.T2),
sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
valid_types = [mstype.tensor_type(mstype.float32)]
validator.check_type_same(args_moving, valid_types, self.name)
return (input_x, scale, scale, scale, scale, scale)
class BNTrainingReduce(PrimitiveWithInfer):
"""
reduce sum at axis [0, 2, 3].
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
Outputs:
- **sum** (Tensor) - Tensor of shape :math:`(C,)`.
- **square_sum** (Tensor) - Tensor of shape :math:`(C,)`.
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
return ([x_shape[1]], [x_shape[1]])
def infer_dtype(self, x_type):
return (x_type, x_type)
class BNTrainingUpdate(PrimitiveWithInfer):
"""
The primitive operator of the register and info descriptor in bn_training_update.
"""
@prim_attr_register
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
#self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')
def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
class BatchNorm(PrimitiveWithInfer):
r"""
Batch Normalization for input data and updated parameters.
Batch Normalization is widely used in convolutional neural networks. This operation
applies Batch Normalization over input to avoid internal covariate shift as described
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
features using a mini-batch of data and the learned parameters which can be described
in the following formula,
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
is_training (bool): If `is_training` is True, `mean` and `variance` are computed during training.
If `is_training` is False, they're loaded from checkpoint during inference. Default: False.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `mean`.
Outputs:
Tuple of 5 Tensor, the normalized inputs and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the input_x. The shape is :math:`(N, C)`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> batch_norm = P.BatchNorm()
>>> output = batch_norm(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
validator.check_value_type('is_training', is_training, (bool,), self.name)
validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
if not self.is_training:
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
if self.is_training:
valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]
validator.check_type_same(args_moving, valid_types, self.name)
else:
args_moving = {"mean": mean, "variance": variance}
validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)
return (input_x, scale, bias, input_x, input_x)
class Conv2D(PrimitiveWithInfer):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_. More detailed introduction can be found here:
http://cs231n.github.io/convolutional-networks/.
Args:
out_channel (int): The dimension of the output.
kernel_size (Union[int, tuple[int]]): The kernel size of the 2D convolution.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
stride (Union(int, tuple[int])): The stride to be applied to the convolution filter. Default: 1.
dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1.
group (int): Split input into groups. Default: 1.
Returns:
Tensor, the value that applied 2D convolution.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is
:math:`(C_{out}, C_{in}, K_1, K_2)`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> conv2d = P.Conv2D(out_channel=32, kernel_size=3)
>>> conv2d(input, weight)
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init Conv2D"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check(f"x_shape[1] / group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_h = w_shape[2]
kernel_size_w = w_shape[3]
stride_h = self.stride[2]
stride_w = self.stride[3]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]
self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))
out_channel = self.out_channel
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class DepthwiseConv2dNative(PrimitiveWithInfer):
r"""
Returns the depth-wise convolution value for the input.
Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.
Given an input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` where :math:`N` is the batch size and a
filter tensor with kernel size :math:`(ks_{h}, ks_{w})`, containing :math:`C_{in} * \text{channel_multiplier}`
convolutional filters of depth 1; it applies different filters to each input channel (channel_multiplier channels
for each input channel has the default value 1), then concatenates the results together. The output has
:math:`\text{in_channels} * \text{channel_multiplier}` channels.
Args:
channel_multiplier (int): The multipiler for the original output convolution. Its value must be greater than 0.
kernel_size (Union[int, tuple[int]]): The size of the convolution kernel.
mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 3.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. Default: 0.
stride (Union[int, tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int, tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set the size of kernel as :math:`(K_1, K_2)`, then the shape is
:math:`(K, C_{in}, K_1, K_2)`, `K` must be 1.
Outputs:
Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> output.shape == (10, 96, 30, 30)
"""
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
mode=3,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init DepthwiseConv2dNative"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of stride should be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of dilation should be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer("mode", mode, 3, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT,
self.name)
self.group = validator.check_integer("group", group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape
_, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1:
raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}")
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)
self.add_prim_attr('pads', self.pad_list)
out_channel = self.channel_multiplier * x_shape[1]
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class _Pool(PrimitiveWithInfer):
r"""
Performs max/avg pooling operation.
Args:
ksize (Union[int, tuple[int]]): The size of the kernel, that should be a tuple
of two `int` for height and width. Default: 1.
strides (Union[int, tuple[int]]): The stride of the window, that should be
a tuple of two `int` for height and width. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', "NCHW")
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.strides = (1, self.strides[-2], self.strides[-1], 1)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
batch, channel, input_h, input_w = x_shape
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w]
for shape_value in out_shape:
if shape_value <= 0:
raise ValueError(f"For '{self.name}' The kernel size is not valid, "
f"please check it if is larger than data's shape size.")
return out_shape
def infer_dtype(self, x_dtype):
validator.check_subclass("input", x_dtype, mstype.tensor, self.name)
return x_dtype
class MaxPool(_Pool):
r"""
Max pooling operation.
Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_op = P.MaxPool(padding="VALID", ksize=2, strides=1)
>>> output_tensor = maxpool_op(input_tensor)
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPool, self).__init__(ksize, strides, padding)
class MaxPoolWithArgmax(_Pool):
r"""
Performs max pooling on the input Tensor and return both max values and indices.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value,
is an int number that represents height and width are both ksize, or a tuple of
two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Data type should be float16 or float32.
Outputs:
Tuple of 2 Tensor, the maxpool result and where max values from.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
- **mask** (Tensor) - Max values' index represented by the mask.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_arg_op = P.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> output_tensor, argmax = maxpool_arg_op(input_tensor)
"""
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
def infer_shape(self, x_shape):
out_shape = _Pool.infer_shape(self, x_shape)
_, _, out_h, out_w = out_shape
_, kernel_h, kernel_w, _ = self.ksize
argmax_shape = []
if self.is_tbe:
for i in range(4):
if i == 2:
dim = kernel_h * kernel_w
argmax_shape.append(dim)
elif i == 3:
dim = math.ceil(out_h * out_w / 16) + 1
argmax_shape.append(dim)
else:
argmax_shape.append(x_shape[i])
else:
argmax_shape = out_shape
return out_shape, argmax_shape
def infer_dtype(self, x_dtype):
out_dtype = x_dtype
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
argmax_dtype = mstype.uint16
if self.is_gpu:
argmax_dtype = mstype.int32
return out_dtype, argmax_dtype
class AvgPool(_Pool):
r"""
Average pooling operation.
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs
regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1)
>>>
>>> def construct(self, x):
>>> result = self.avgpool_op(x)
>>> return result
>>>
>>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> result = net(input_x)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[ 14.5 15.5 16.5]
[ 18.5 19.5 20.5]]
[[ 26.5 27.5 28.5]
[ 30.5 31.5 32.5]]]]
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("enable_ge"):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding)
class Conv2DBackpropInput(PrimitiveWithInfer):
"""
Computes the gradients of convolution with respect to the input.
Args:
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The size of the convolution window.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
stride (Union[int. tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int. tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the gradients of convolution.
Examples:
>>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> x = Tensor(np.ones([10, 32, 32, 32]))
>>> conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3)
>>> conv2d_backprop_input(dout, weight, F.shape(x))
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=None,
mode=1,
stride=1,
dilation=1,
group=1):
"""init Conv2DBackpropInput"""
self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('data_format', "NCHW")
if pad_list:
for x in pad_list:
validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)
self.pad_list = pad_list
def __infer__(self, doutput, w, x_size):
x_size_v = x_size['value']
validator.check_value_type('x_size', x_size_v, [tuple], self.name)
for i, dim_len in enumerate(x_size_v):
validator.check_value_type("x_size[%d]" % i, dim_len, [int], self.name)
args = {'doutput': doutput['dtype'], 'w': w['dtype']}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
# infer shape
dout_shape = doutput['shape']
kernel_h = self.kernel_size[0]
kernel_w = self.kernel_size[1]
stride_h = self.stride[0]
stride_w = self.stride[1]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
# default pad mode is valid
pad_list = (0, 0, 0, 0)
if self.pad_list:
pad_list = tuple(self.pad_list)
elif self.pad_mode == "SAME":
pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
'shape': x_size_v,
'dtype': doutput['dtype'],
}
return out
class BiasAdd(PrimitiveWithInfer):
r"""
Returns sum of input and bias tensor.
Adds the 1-D bias tensor to the input tensor, and broadcasts the shape on all axis
except for the channel axis.
Inputs:
- **input_x** (Tensor) - The input tensor. The shape can be 2-4 dimensions.
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`.
The shape of `bias` must be the same as `input_x` in the second dimension.
Outputs:
Tensor, with the same shape and type as `input_x`.
Examples:
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = P.BiasAdd()
>>> bias_add(input_x, bias)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def infer_shape(self, x_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.GE, self.name)
validator.check_integer("bias rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "x_shape[1]", x_shape[1], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, b_type):
args = {"input_x": x_type, "bias": b_type}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_type
class TopK(PrimitiveWithInfer):
"""
Finds values and indices of the `k` largest entries along the last dimension.
Args:
sorted (bool): If true, the resulting elements will
be sorted by the values in descending order. Default: False.
Inputs:
- **input_x** (Tensor) - Input to be computed, data type should be float16, float32 or int32.
- **k** (int) - Number of top elements to be computed along the last dimension, constant input is needed.
Outputs:
Tuple of 2 Tensor, the values and the indices.
- **values** (Tensor) - The `k` largest elements along each last dimensional slice.
- **indices** (Tensor) - The indices of values within the last dimension of input.
Examples:
>>> topk = P.TopK(sorted=True)
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
>>> k = 3
>>> values, indices = topk(input_x, k)
>>> assert values == Tensor(np.array([5, 4, 3]), mstype.float16)
>>> assert indices == Tensor(np.array([4, 3, 2]), mstype.int32)
"""
@prim_attr_register
def __init__(self, sorted=False):
validator.check_value_type("sorted", sorted, [bool], self.name)
self.init_prim_io_names(inputs=['input', 'k'],
outputs=['values', 'indices'])
def __infer__(self, input_x, k):
x_dtype = input_x['dtype']
valid_types = (mstype.int32, mstype.float16, mstype.float32)
validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)
k_v = k['value']
validator.check_value_type('k', k_v, (int,), self.name)
x_shape = list(input_x['shape'])
ndim = len(x_shape) - 1
x_shape[ndim] = k_v
return {'shape': (x_shape, x_shape),
'dtype': (x_dtype, mstype.int32),
'value': None}
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Gets the softmax cross-entropy value between logits and labels which shoule be one-hot encoding.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = -\sum_j{Y_{ij} * ln(p_{ij})}
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.
Outputs:
Tuple of 2 Tensor, the loss shape is `(N,)`, and the dlogits with the same shape as `logits`.
Examples:
>>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)
>>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)
>>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()
>>> loss, backprop = softmax_cross(logits, labels)
([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377],
[0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]])
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape", logits_shape, "labels_shape", labels_shape, Rel.EQ, self.name)
loss_shape = [logits_shape[0]]
dlogits_shape = logits_shape
return (loss_shape, dlogits_shape)
def infer_dtype(self, logits_type, labels_type):
args = {"logits": logits_type, "labels": labels_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return (logits_type, logits_type)
class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Computes the softmax cross-entropy value between logits and sparse encoding labels.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = \begin{cases} -ln(p_{ij}), &j = y_i \cr -ln(1 - p_{ij}), & j \neq y_i \end{cases}
.. math::
loss = \sum_{ij} loss_{ij}
Args:
is_grad (bool): If it's true, this operation returns the computed gradient. Default: False.
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N)`.
Data type should be int32 or int64.
Outputs:
Tensor, if `is_grad` is False, the output tensor is the value of loss which is a scalar tensor;
if `is_grad` is True, the output tensor is the gradient of input with the same shape as `logits`.
Examples:
Please refer to the usage in nn.SoftmaxCrossEntropyWithLogits source code.
"""
@prim_attr_register
def __init__(self, is_grad=False):
self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])
self.is_grad = is_grad
self.add_prim_attr('sens', 1.0)
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape[0]", logits_shape[0], "labels_shape[0]", labels_shape[0], Rel.EQ, self.name)
loss_shape = []
if self.is_grad:
return logits_shape
return loss_shape
def infer_dtype(self, logits_type, labels_type):
validator.check_tensor_type_same({"logits": logits_type}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"labels": labels_type}, (mstype.int32, mstype.int64), self.name)
return logits_type
class ApplyMomentum(PrimitiveWithInfer):
"""
Optimizer that implements the Momentum algorithm.
Refer to the paper `On the importance of initialization and momentum in deep
learning <https://dl.acm.org/doi/10.5555/3042817.3043064>`_ for more details.
Inputs of `variable`, `accumulation` and `gradient` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
Data type conversion of Parameter is not supported. RuntimeError exception will be thrown.
Args:
use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False.
use_nesterov (bool): Enable Nesterov momentum. Default: False.
gradient_scale (float): The scale of the gradient. Default: 1.0.
Inputs:
- **variable** (Parameter) - Weights to be updated. data type should be float.
- **accumulation** (Parameter) - Accumulated gradient value by moment weight.
Has the same data type with `variable`.
- **learning_rate** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float data type.
- **gradient** (Tensor) - Gradients, has the same data type as `variable`.
- **momentum** (Union[Number, Tensor]) - Momentum, should be a float number or
a scalar tensor with float data type.
Outputs:
Tensor, parameters to be updated.
Examples:
Please refer to the usage in nn.ApplyMomentum.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('momentum', dtype=sig.sig_dtype.T2),
)
@prim_attr_register
def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):
self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],
outputs=['output'])
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_ge = context.get_context("enable_ge")
def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):
if not self.is_ge and self.is_tbe:
return v_shape, v_shape
return v_shape
def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):
valid_types = [mstype.float16, mstype.float32, mstype.float64]
if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:
validator.check_tensor_type_same({"v": v_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"a": a_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l_dtype": l_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"g_dtype": g_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"m_dtype": m_dtype}, valid_types, self.name)
if not self.is_ge and self.is_tbe:
return g_dtype, g_dtype
return g_dtype
class SmoothL1Loss(PrimitiveWithInfer):
r"""
Computes smooth L1 loss, a robust L1 loss.
SmoothL1Loss is a Loss similar to MSELoss but less sensitive to outliers as described in the
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_ by Ross Girshick.
Note:
Sets input prediction as `X`, input target as `Y`, output as `loss`. Then,
.. math::
\text{SmoothL1Loss} = \begin{cases} \frac{0.5 x^{2}}{\text{beta}}, &if \left |x \right | < \text{beta} \cr
\left |x \right|-0.5 \text{beta}, &\text{otherwise}\end{cases}
Args:
beta (float): A parameter used to control the point where the function will change from
quadratic to linear. Default: 1.0.
Inputs:
- **prediction** (Tensor) - Predict data. Data type should be float16 or float32.
- **target** (Tensor) - Ground truth data, with the same type and shape as `prediction`.
Outputs:
Tensor, with the same type and shape as `prediction`.
Examples:
>>> loss = P.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data)
[0, 0, 0.5]
"""
@prim_attr_register
def __init__(self, beta=1.0):
validator.check_value_type('beta', beta, [float], self.name)
validator.check('beta', beta, '', 0, Rel.GT, self.name)
self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
def infer_shape(self, prediction, target):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target):
args = {"prediction": prediction, "target": target}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return prediction
class L2Loss(PrimitiveWithInfer):
"""
Calculates half of the L2 norm of a tensor without using the `sqrt`.
Set `input_x` as x and output as loss.
.. math::
loss = sum(x ** 2) / nelement(x)
:math:`nelement(x)` represents the number of `input_x`.
Inputs:
- **input_x** (Tensor) - A input Tensor. Data type should be float16 or float32.
Outputs:
Tensor, has the same dtype as `input_x`. The output tensor is the value of loss which is a scalar tensor.
Examples
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
>>> l2_loss = P.L2Loss()
>>> l2_loss(input_x)
7.0
"""
@prim_attr_register
def __init__(self):
"""init L2Loss"""
def infer_shape(self, input_x):
loss_shape = []
return loss_shape
def infer_dtype(self, x_type):
validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)
return x_type
class DataFormatDimMap(PrimitiveWithInfer):
"""
Returns the dimension index in the destination data format given in the source data format.
Args:
src_format (string): An optional value for source data format. Default: 'NHWC'.
dst_format (string): An optional value for destination data format. Default: 'NCHW'.
Inputs:
- **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.
The suggested values is in the range [-4, 4). It's type is int32.
Outputs:
Tensor, has the same type as the `input_x`.
Examples:
>>> x = Tensor([0, 1, 2, 3], mindspore.int32)
>>> dfdm = P.DataFormatDimMap()
>>> dfdm(x)
[0 3 1 2]
"""
@prim_attr_register
def __init__(self, src_format='NHWC', dst_format='NCHW'):
valid_values = ['NHWC', 'NCHW']
self.src_format = validator.check_string("src_format", src_format, valid_values, self.name)
self.dst_format = validator.check_string("dst_format", dst_format, valid_values, self.name)
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_type):
validator.check_subclass("x", x_type, mstype.tensor, self.name)
valid_types = [mstype.int32]
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
"""
Computes the RNNTLoss and its gradient with respect to the softmax outputs.
Args:
blank_label (int): blank label. Default: 0.
Inputs:
- **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32.
- **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`.
- **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **label_lebgths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
Outputs:
- **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **grads** (Tensor[int32]) - Has the same shape as `acts`.
Examples:
>>> B, T, U, V = 1, 2, 3, 5
>>> acts = np.random.random((B, T, U, V)).astype(np.float32)
>>> labels = np.array([[1, 2]]).astype(np.int32)
>>> input_length = np.array([T] * B).astype(np.int32)
>>> label_length = np.array([len(l) for l in labels]).astype(np.int32)
>>> rnnt_loss = P.RNNTLoss(blank_label=blank)
>>> costs, grads = rnnt_loss(Tensor(acts), Tensor(labels), Tensor(input_length), Tensor(label_length))
"""
@prim_attr_register
def __init__(self, blank_label=0):
validator.check_value_type('blank_label', blank_label, [int], self.name)
self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],
outputs=['costs', 'grads'])
def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):
validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)
validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)
validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)
validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)
validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)
validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
costs_shape = (acts_shape[0],)
return (costs_shape, acts_shape)
def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):
validator.check_subclass("acts_type", acts_type, mstype.tensor, self.name)
validator.check_subclass("labels_type", labels_type, mstype.tensor, self.name)
validator.check_subclass("input_length_type", input_length_type, mstype.tensor, self.name)
validator.check_subclass("label_length_type", label_length_type, mstype.tensor, self.name)
validator.check_tensor_type_same({"acts_type": acts_type}, [mstype.float32, mstype.float16], self.name)
validator.check_tensor_type_same({"labels_type": labels_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"input_length_type": input_length_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"label_length_type": label_length_type}, [mstype.int32], self.name)
return (acts_type, acts_type)
class SGD(PrimitiveWithInfer):
"""
Computes stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from On the importance of
initialization and momentum in deep learning.
Note:
For details, please refer to `nn.SGD` source code.
Args:
dampening (float): The dampening for momentum. Default: 0.0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
nesterov (bool): Enable Nesterov momentum. Default: False.
Inputs:
- **parameters** (Tensor) - Parameters to be updated. With float16 or float32 data type.
- **gradient** (Tensor) - Gradients. With float16 or float32 data type.
- **learning_rate** (Tensor) - Learning rate, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32)
- **accum** (Tensor) - Accum(velocity) to be updated. With float16 or float32 data type.
- **momentum** (Tensor) - Momentum, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32).
- **stat** (Tensor) - States to be updated with the same shape as gradient. With float16 or float32 data type.
Outputs:
Tensor, parameters to be updated.
Examples:
>>> sgd = P.SGD()
>>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32)
>>> gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32)
>>> learning_rate = Tensor(0.01, mindspore.float32)
>>> accum = Tensor(np.array([0.1, 0.3, -0.2, -0.1]), mindspore.float32)
>>> momentum = Tensor(0.1, mindspore.float32)
>>> stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32)
>>> result = sgd(parameters, gradient, learning_rate, accum, momentum, stat)
"""
@prim_attr_register
def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):
validator.check_value_type("nesterov", nesterov, [bool], self.name)
if nesterov and dampening != 0:
raise ValueError(f"Nesterov need zero dampening!")
self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],
outputs=['output'])
def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,
accum_shape, momentum_shape, stat_shape):
validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)
validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)
validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)
validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)
validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)
validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)
validator.check("gradient shape", gradient_shape, "stat shape", stat_shape, Rel.EQ, self.name)
return parameters_shape
def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,
accum_dtype, momentum_dtype, stat_dtype):
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({"parameters": parameters_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"gradient": gradient_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"learning_rate": learning_rate_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"accum": accum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"momentum": momentum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"stat": stat_dtype}, valid_types, self.name)
return parameters_dtype
class ApplyRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the Root Mean Square prop(RMSProp) algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the RMSProp algorithm.
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **decay** (float) - Decay rate. Only constant value is allowed.
- **momentum** (float) - Momentum. Only constant value is allowed.
- **epsilon** (float) - Ridge term. Only constant value is allowed.
Outputs:
Tensor, parameters to be update.
Examples:
>>> apply_rms = P.ApplyRMSProp()
>>> input_x = Tensor(1., mindspore.float32)
>>> mean_square = Tensor(2., mindspore.float32)
>>> moment = Tensor(1., mindspore.float32)
>>> grad = Tensor(2., mindspore.float32 )
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.001
>>> result = apply_rms(input_x, mean_square, moment, learning_rate, grad, decay, momentum, epsilon)
(-2.9977674, 0.80999994, 1.9987665)
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',
'rho', 'momentum', 'epsilon'], outputs=['output'])
self.is_ge = context.get_context("enable_ge")
self.is_d = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,
momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if not self.is_ge and self.is_d:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,
momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_decay = {"decay": decay_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_decay, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "decay": decay_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if not self.is_ge and self.is_d:
return var_dtype, var_dtype, var_dtype
return var_dtype
def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):
if decay is None or momentum is None or epsilon is None:
raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.")
class ApplyCenteredRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the centered RMSProp algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the centered RMSProp algorithm.
.. math::
g_{t} = \\rho g_{t-1} + (1 - \\rho)\\nabla Q_{i}(w)
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} - g_{t}^2 + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_gradient** (Tensor) - Mean gradients, must have the same type as `var`.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **decay** (float) - Decay rate.
- **momentum** (float) - Momentum.
- **epsilon** (float) - Ridge term.
Outputs:
Tensor, parameters to be update.
Examples:
>>> centered_rms_prop = P.ApplyCenteredRMSProp()
>>> input_x = Tensor(np.arange(-6, 6).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_square = Tensor(np.arange(-8, 4).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> moment = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.05
>>> result = centered_rms_prop(input_x, mean_grad, mean_square, moment, grad,
>>> learning_rate, decay, momentum, epsilon)
[[[ -6. -9.024922]
[-12.049845 -15.074766]
[-18.09969 -21.124613]]
[[-24.149532 -27.174456]
[-30.199379 -33.2243 ]
[-36.249226 -39.274143]]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_ascend = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,
learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if self.is_ascend:
return var_shape, mean_gradient_shape, mean_square_shape, moment_shape
return var_shape
def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,
learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_gradient": mean_gradient_dtype,
"mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_rho = {"rho": rho_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_rho, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "rho": rho_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if self.is_ascend:
return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype
return var_dtype
class LayerNorm(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
the value should be in [-1, rank(input)). Default: 1.
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNorm, the value should be in [-1, rank(input)). Default: 1.
epsilon (float): A value added to the denominator for numerical stability. Default: 1e-7.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
The input of LayerNorm.
- **gamma** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `gamma` as the scale on norm.
- **beta** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `beta` as the scale on norm.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
The shape is :math:`(N, C)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = P.LayerNorm()
>>> output = layer_norm(input_x, gamma, beta)
([[-0.22474492, 1., 2.2247488], [-0.22474492, 1., 2.2247488]],
[[2.], [2.]], [[0.6666667], [0.6666667]])
"""
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [float], self.name)
class L2Normalize(PrimitiveWithInfer):
r"""
L2 normalization Operator.
This operator will normalizes the input using the given axis. The function is shown as follows:
.. math::
\text{output} = \frac{x}{\sqrt{\text{max}(\text{sum} (\text{input_x}^2), \epsilon)}},
where :math:`\epsilon` is epsilon.
Args:
axis (int): The begin axis for the input to apply L2 normalize. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
- **input_x** (Tensor) - Input to compute the normalization. Data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input.
Examples:
>>> l2_normalize = P.L2Normalize()
>>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32)
>>> result = l2_normalize(input_x)
[[[-0.47247353 -0.30934513 -0.4991462 0.8185567 ]
[-0.08070751 -0.9961299 -0.5741758 0.09262337]
[-0.9916556 -0.3049123 0.5730487 -0.40579924]
[[-0.88134485 0.9509498 -0.86651784 0.57442576]
[ 0.99673784 0.08789381 -0.8187321 0.9957012 ]
[ 0.12891524 -0.9523804 -0.81952125 0.91396334]]]
"""
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("x", input_x, mstype.tensor, self.name)
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class DropoutGenMask(Primitive):
"""
Generates the mask value for the input shape.
Args:
Seed0 (int): Seed0 value for random generating. Default: 0.
Seed1 (int): Seed1 value for random generating. Default: 0.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_gen_mask = P.DropoutGenMask()
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> mask = dropout_gen_mask(shape, keep_prob)
"""
@prim_attr_register
def __init__(self, Seed0=0, Seed1=0):
self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])
validator.check_value_type("Seed0", Seed0, [int], self.name)
validator.check_value_type("Seed1", Seed1, [int], self.name)
self.add_prim_attr("_random_effect", True)
class DropoutDoMask(PrimitiveWithInfer):
"""
Applies dropout mask on the input tensor.
Take the mask output of DropoutGenMask as input, and apply dropout on the input.
Inputs:
- **input_x** (Tensor) - The input tensor.
- **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the
shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,
the output of `DropoutDoMask` are unpredictable.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of
`DropoutGenMask`.
Outputs:
Tensor, the value that applied dropout on.
Examples:
>>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32)
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_do_mask = P.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> assert output.shape == (20, 16, 50)
"""
@prim_attr_register
def __init__(self):
pass
def __infer__(self, input_x, mask, keep_prob):
input_x_shape = input_x['shape']
mask_shape = mask['shape']
keep_prob_shape = keep_prob['shape']
validator.check("keep_prob's dim", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)
size_x = reduce(lambda x, y: x * y, input_x_shape)
if len(mask_shape) != 1:
raise ValueError("DropoutDoMask mask shape should be 1-dimension.")
size_y = mask_shape[0] * 8
if size_x > size_y:
raise ValueError(f"DropoutDoMask y mask do not math input input_x shape:"
"{input_x_shape}, mask shape: {mask_shape}.")
validator.check_tensor_type_same({"input_x": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],
self.name)
validator.check_tensor_type_same({"input_mask": mask['dtype']}, [mstype.uint8], self.name)
keep_prob_v = keep_prob['value']
if keep_prob_v is not None:
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
out = {'shape': input_x_shape,
'dtype': input_x['dtype'],
'value': None}
return out
class ResizeBilinear(PrimitiveWithInfer):
r"""
Resizes the image to certain size using bilinear interpolation.
The resizing only affects the lower two dimensions which represent the height and width. The input images
can be represented by different data types, but the data types of output images are always float32.
Args:
size (tuple[int]): A tuple of 2 int elements `(new_height, new_width)`, the new size for the images.
align_corners (bool): If it's true, rescale input by `(new_height - 1) / (height - 1)`,
which exactly aligns the 4 corners of images and resized images. If it's false,
rescale by `new_height / height`. Default: False.
Inputs:
- **input** (Tensor) - Image to be resized. Tensor of shape `(N_i, ..., N_n, height, width)`,
with data type of float32 or float16.
Outputs:
Tensor, resized image. Tensor of shape `(N_i, ..., N_n, new_height, new_width)` in `float32`.
Examples:
>>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> resize_bilinear = P.ResizeBilinear((5, 5))
>>> result = resize_bilinear(tensor)
>>> assert result.shape == (1, 1, 5, 5)
"""
@prim_attr_register
def __init__(self, size, align_corners=False):
pass
def infer_shape(self, input_shape):
input_shape = list(input_shape)
batch, channel, _, _ = input_shape
out_shape = [batch, channel]
for i in self.size:
out_shape.append(int(i))
return out_shape
def infer_dtype(self, input_dtype):
validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)
return mstype.tensor_type(mstype.float32)
class OneHot(PrimitiveWithInfer):
r"""
Computes a one-hot tensor.
Makes a new tensor, whose locations represented by indices in `indices` take value `on_value`, while all
other locations take value `off_value`.
Note:
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
Args:
axis (int): Position to insert the value. e.g. If `indices` shape is [n, c], and `axis` is `-1` the output shape
will be [n, c, depth], If `axis` is `0` the output shape will be [depth, n, c]. Default: -1.
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32.
- **depth** (int) - A scalar defining the depth of the one hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
Has the same data type with as `on_value`.
Outputs:
Tensor, one_hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
Examples:
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = P.OneHot()
>>> result = onehot(indices, depth, on_value, off_value)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
validator.check_value_type("axis", axis, [int], self.name)
def __infer__(self, indices, depth, on_value, off_value):
# check type
validator.check_tensor_type_same({"indices": indices['dtype']}, (mstype.int32,), self.name)
validator.check_type_name("depth", depth['dtype'], mstype.int_type, self.name)
args = {"on_value": on_value['dtype'], "off_value": off_value['dtype']}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
# check shape
indices_shp = indices['shape']
validator.check_int_range("axis", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)
depth_val = depth['value']
validator.check_integer("depth", depth_val, 0, Rel.GE, self.name)
# create new dimension at end if self.axis is -1
_ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)
return {'shape': indices_shp,
'dtype': on_value['dtype'],
'value': None}
class Gelu(PrimitiveWithInfer):
r"""
Gaussian Error Linear Units activation function.
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
<https://arxiv.org/abs/1810.04805>`_.
Gelu is defined as follows:
.. math::
\text{output} = 0.5 * x * (1 + erf(x / \sqrt{2})),
where :math:`erf` is the "Gauss error function" .
Inputs:
- **input_x** (Tensor) - Input to compute the Gelu with data type of float16 or float32.
Outputs:
Tensor, with the same type and shape as input.
Examples:
>>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> gelu = P.Gelu()
>>> result = gelu(tensor)
"""
@prim_attr_register
def __init__(self):
"""init GeLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class GetNext(PrimitiveWithInfer):
"""
Returns the next element in the dataset queue.
Note:
The GetNext operation needs to be associated with network and it also depends on the init_dataset interface,
it can't be used directly as a single operation.
For details, please refer to `nn.DataWrapper` source code.
Args:
types (list[:class:`mindspore.dtype`]): The type of the outputs.
shapes (list[tuple[int]]): The dimensionality of the outputs.
output_num (int): The output number, length of `types` and `shapes`.
shared_name (str): The queue name of `init_dataset` interface.
Inputs:
No inputs.
Outputs:
tuple[Tensor], the output of Dataset. The shape is described in `shapes`
and the type is described is `types`.
Examples:
>>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')
>>> feature, label = get_next()
"""
@prim_attr_register
def __init__(self, types, shapes, output_num, shared_name):
validator.check_value_type("types", types, [list, tuple], self.name)
validator.check_value_type("shapes", shapes, [list, tuple], self.name)
validator.check("types length", len(types), "shapes length", len(shapes), Rel.EQ, self.name)
validator.check_value_type("output_num", output_num, [int], self.name)
def infer_shape(self):
return tuple(self.shapes)
def infer_dtype(self):
return tuple(self.types)
class PReLU(PrimitiveWithInfer):
r"""
Parametric Rectified Linear Unit activation function.
PReLU is described in the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_. Defined as follows:
.. math::
prelu(x_i)= \max(0, x_i) + \min(0, w * x_i),
where :math:`x_i` is an element of an channel of the input.
Note:
1-dimensional input_x is not supported.
Inputs:
- **input_x** (Tensor) - Float tensor, representing the output of the preview layer.
With data type of float16 or float32.
- **weight** (Tensor) - Float Tensor, w > 0, there is only two shapes are legitimate,
1 or the number of channels at input. With data type of float16 or float32.
Outputs:
Tensor, with the same type as `input_x`.
Detailed information, please refer to `nn.PReLU`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.prelu = P.PReLU()
>>> def construct(self, input_x, weight):
>>> result = self.prelu(input_x, weight)
>>> return result
>>>
>>> input_x = Tensor(np.random.randint(-3, 3, (2, 3, 2)), mindspore.float32)
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
>>> net = Net()
>>> result = net(input_x, weight)
[[[-0.1 1. ]
[ 0. 2. ]
[0. 0. ]]
[[-0.2 -0.1 ]
[2. -1.8000001]
[0.6 0.6 ]]]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x_shape, weight_shape):
input_x_dim = len(input_x_shape)
weight_dim = len(weight_shape)
if input_x_dim == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
if weight_dim != 1:
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
f' while channel of input_x is {input_x_shape[1]},'
f' weight_shape[0] is {weight_shape[0]}.')
return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"input_x": input_x_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"weight": weight_dtype}, valid_types, self.name)
return input_x_dtype
class LSTM(PrimitiveWithInfer):
"""
Performs the long short term memory(LSTM) on the input.
Detailed information, please refer to `nn.LSTM`.
"""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer("input_size", input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type("has_bias", has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type("bidirectional", bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, h_shape, c_shape, w_shape):
# (seq, batch_size, feature)
validator.check_integer("x rank", len(x_shape), 3, Rel.EQ, self.name)
validator.check_integer("x[2]", x_shape[2], self.input_size, Rel.EQ, self.name)
# h and c should be same shape
validator.check_integer("h rank", len(h_shape), 3, Rel.EQ, self.name)
validator.check("h_shape", h_shape, "c_shape", c_shape, Rel.EQ, self.name)
# (num_layers * num_directions, batch, hidden_size)
validator.check_integer("h[0]", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h[1]", h_shape[1], x_shape[1], Rel.EQ, self.name)
validator.check_integer("h[2]", h_shape[2], self.hidden_size, Rel.EQ, self.name)
y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)
# set arbitrary shape for reserved space
type_size = 4
gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)
states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)
self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size
self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_grid_comp_size = 0
self.page_size = 4096
current_offset = 0
current_offset += self.ws_gates_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_c_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_diff_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_grid_comp_size
reserved_shape = (current_offset, 1)
state_shape = (1, 1)
return (y_shape, h_shape, c_shape, reserved_shape, state_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):
args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)
def rnd_up(self, current_offset, page_size):
return ((current_offset + page_size - 1) // page_size) * page_size
def get_good_ld(self, dim, type_size):
ld = self.rnd_up(dim, 64 // type_size)
if ld * 256 == 0:
return ld + 64 // type_size
return ld
class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Uses the given logits to compute sigmoid cross entropy.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}}
.. math::
loss_{ij} = -[Y_{ij} * ln(p_{ij}) + (1 - Y_{ij})ln(1 - p_{ij})]
Inputs:
- **logits** (Tensor) - Input logits.
- **label** (Tensor) - Ground truth label.
Outputs:
Tensor, with the same shape and type as input `logits`.
Examples:
>>> logits = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> labels = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> sigmoid = P.SigmoidCrossEntropyWithLogits()
>>> sigmoid(logits, labels)
"""
@prim_attr_register
def __init__(self):
"""Init SigmoidCrossEntropyWithLogits"""
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
def infer_shape(self, x_shape, y_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_dtype
class Pad(PrimitiveWithInfer):
"""
Pads input tensor according to the paddings.
Args:
paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of
paddings are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, the tensor after padding.
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> output_tensor = pad_op(input_tensor)
>>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],
>>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, paddings):
"""Init Pad"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
if not isinstance(paddings, tuple):
raise TypeError('Paddings must be tuple type.')
for item in paddings:
if len(item) != 2:
raise ValueError('The shape of paddings must be (n, 2).')
self.paddings = paddings
def infer_shape(self, x):
paddings = np.array(self.paddings)
validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)
if not np.all(paddings >= 0):
raise ValueError('All elements of paddings must be >= 0.')
y_shape = ()
for i in range(int(paddings.size / 2)):
y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)
return y_shape
def infer_dtype(self, x):
validator.check_subclass("input_x", x, mstype.tensor, self.name)
return x
class MirrorPad(PrimitiveWithInfer):
"""
Pads the input tensor according to the paddings and mode.
Args:
mode (str): Specifies padding mode. The optional values are "REFLECT", "SYMMETRIC".
Default: "REFLECT".
Inputs:
- **input_x** (Tensor) - The input tensor.
- **paddings** (Tensor) - The paddings tensor. The value of `paddings` is a matrix(list),
and its shape is (N, 2). N is the rank of input data. All elements of paddings
are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Outputs:
Tensor, the tensor after padding.
- If `mode` is "REFLECT", it uses a way of symmetrical copying throught the axis of symmetry to fill in.
If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the
Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]].
- If `mode` is "SYMMETRIC", the filling method is similar to the "REFLECT". It is also copied
according to the symmetry axis, except that it includes the symmetry axis. If the `input_x`
is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is
[[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]].
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.pad = P.MirrorPad(mode="REFLECT")
>>> def construct(self, x, paddings):
>>> return self.pad(x, paddings)
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> paddings = Tensor([[1,1],[2,2]])
>>> pad = Net()
>>> ms_output = pad(Tensor(x), paddings)
"""
@prim_attr_register
def __init__(self, mode='REFLECT'):
"""Init Pad"""
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
self.set_const_input_indexes([1])
def __infer__(self, input_x, paddings):
validator.check_subclass("input_x", input_x['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape'])
paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size
validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)
if not np.all(paddings_value >= 0):
raise ValueError('All elements of paddings must be >= 0.')
adjust = 0
if self.mode == 'SYMMETRIC':
adjust = 1
for i in range(0, int(paddings_size / 2)):
if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):
raise ValueError('At least one dim has too high a padding value for this input and mode')
y_shape = ()
for i in range(0, int(paddings_size / 2)):
y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)
return {'shape': y_shape,
'dtype': input_x['dtype'],
'value': None}
class ROIAlign(PrimitiveWithInfer):
"""
Computes Region of Interest (RoI) Align operator.
The operator computes the value of each sampling point by bilinear interpolation from the nearby grid points on the
feature map. No quantization is performed on any coordinates involved in the RoI, its bins, or the sampling
points. The details of (RoI) Align operator are described in `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
Args:
pooled_height (int): The output features' height.
pooled_width (int): The output features' width.
spatial_scale (float): A scaling factor that maps the raw image coordinates to the input
feature map coordinates. Suppose the height of a RoI is `ori_h` in the raw image and `fea_h` in the
input feature map, the `spatial_scale` should be `fea_h / ori_h`.
sample_num (int): Number of sampling points. Default: 2.
roi_end_mode (int): Number must be 0 or 1. Default: 1.
Inputs:
- **features** (Tensor) - The input features, whose shape should be `(N, C, H, W)`.
- **rois** (Tensor) - The shape is `(rois_n, 5)`. With data type of float16 or float32.
`rois_n` represents the number of RoI. The size of the second dimension should be `5` and the `5` colunms
are `(image_index, top_left_x, top_left_y, bottom_right_x, bottom_right_y)`. `image_index` represents the
index of image. `top_left_x` and `top_left_y` represent the `x, y` coordinates of the top left corner
of corresponding RoI, respectively. `bottom_right_x` and `bottom_right_y` represent the `x, y`
coordinates of the bottom right corner of corresponding RoI, respectively.
Outputs:
Tensor, the shape is `(rois_n, C, pooled_height, pooled_width)`.
Examples:
>>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32)
>>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)
>>> roi_align = P.ROIAlign(2, 2, 0.5, 2)
>>> output_tensor = roi_align(input_tensor, rois)
>>> assert output_tensor == Tensor(np.array([[[[2.15]]]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):
"""init ROIAlign"""
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("roi_end_mode", roi_end_mode, [int], self.name)
validator.check_int_range("roi_end_mode", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
self.roi_end_mode = roi_end_mode
def infer_shape(self, inputs_shape, rois_shape):
return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]
def infer_dtype(self, inputs_type, rois_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"inputs_type": inputs_type}, valid_types, self.name)
validator.check_tensor_type_same({"rois_type": rois_type}, valid_types, self.name)
return inputs_type
class Adam(PrimitiveWithInfer):
r"""
Updates gradients by Adaptive Moment Estimation (Adam) algorithm.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Tensor) - Weights to be updated.
- **m** (Tensor) - The 1st moment vector in the updating formula, has the same type as `var`.
- **v** (Tensor) - the 2nd moment vector in the updating formula.
Mean square gradients with the same type as `var`.
- **beta1_power** (float) - :math:`beta_1^t` in the updating formula.
- **beta2_power** (float) - :math:`beta_2^t` in the updating formula.
- **lr** (float) - :math:`l` in the updating formula.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations.
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations.
- **epsilon** (float) - Term added to the denominator to improve numerical stability.
- **gradient** (Tensor) - Gradients, has the same type as `var`.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adam = P.Adam()
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad)
>>> return out
>>> net = Net()
>>> gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> result = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)
"""
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
return var_dtype, m_dtype, v_dtype
class FusedSparseAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula. With float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adam = P.FusedSparseAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseLazyAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the
original Adam algorithm, as only the current indices parameters will be updated.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula with float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_lazyadam = P.FusedSparseLazyAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1,
>>> beta2, epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseFtrl(PrimitiveWithInfer):
"""
Merge the duplicate value of the gradient and then update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The shape
of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
- **linear** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],
outputs=['output'])
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return [1], [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class FusedSparseProximalAdagrad(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update relevant entries according to the proximal adagrad
algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the variable and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Tensor) - The learning rate value. The data type must be float32.
- **l1** (Tensor) - l1 regularization strength. The data type must be float32.
- **l2** (Tensor) - l2 regularization strength. The data type must be float32.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient. The data type must be float32.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The data type
must be int32.
Outputs:
Tuple of 2 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.lr = Tensor(0.01, mstype.float32)
>>> self.l1 = Tensor(0.0, mstype.float32)
>>> self.l2 = Tensor(0.0, mstype.float32)
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
>>> self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('l1', dtype=sig.sig_dtype.T),
sig.make_sig('l2', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
return [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
r"""
Computes the Kullback-Leibler divergence between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = y_n \cdot (\log y_n - x_n)
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type must be float32.
- **input_y** (Tensor) - The label Tensor which has the same shape as `input_x`. The data type must be float32.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise it is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.kldiv_loss = P.KLDivLoss()
>>> def construct(self, x, y):
>>> result = self.kldiv_loss(x, y)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> result = net(input_x, input_y)
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
r"""
Computes the Binary Cross Entropy between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type should be float16 or float32.
- **input_y** (Tensor) - The label Tensor which has same shape and data type as `input_x`.
- **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
And it should have same shape and data type as `input_x`. Default: None.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise, the output is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.binary_cross_entropy = P.BinaryCrossEntropy()
>>> def construct(self, x, y, weight):
>>> result = self.binary_cross_entropy(x, y, weight)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> result = net(input_x, input_y, weight)
0.38240486
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type, weight_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
if weight_type:
validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)
return x_type
class ApplyAdaMax(PrimitiveWithInfer):
r"""
Update relevant entries according to the adamax scheme.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m_{t} = \beta_1 * m_{t-1} + (1 - \beta_1) * g \\
v_{t} = \max(\beta_2 * v_{t-1}, \left| g \right|) \\
var = var - \frac{l}{1 - \beta_1^t} * \frac{m_{t}}{v_{t} + \epsilon}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`v` represents the 2nd moment vector, :math:`v_{t-1}`
is the last momentent of :math:`v_{t}`, :math:`l` represents scaling factor `lr`,
:math:`g` represents `grad`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`beta_1^t` represents `beta1_power`, :math:`var` represents the variable to be updated,
:math:`\epsilon` represents `epsilon`.
Inputs of `var`, `m`, `v` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and type as `var`.
With float32 or float16 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients
with the same shape and type as `var`. With float32 or float16 data type.
- **beta1_power** (Union[Number, Tensor]) - :math:`beta_1^t` in the updating formula, should be scalar.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, :math:`l` in the updating formula, should be scalar.
With float32 or float16 data type.
- **beta1** (Union[Number, Tensor]) - The exponential decay rate for the 1st moment estimations,
should be scalar. With float32 or float16 data type.
- **beta2** (Union[Number, Tensor]) - The exponential decay rate for the 2nd moment estimations,
should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient, has the same shape and type as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_ada_max = P.ApplyAdaMax()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad)
>>> return out
>>> net = Net()
>>> beta1_power =Tensor(0.9, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.99, mstype.float32)
>>> epsilon = Tensor(1e-10, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(beta1_power, lr, beta1, beta2, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdaMax"""
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("m_shape", m_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("v_shape", v_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
beta1_power_shp_len = len(beta1_power_shape)
validator.check_integer("beta1 power's rank", beta1_power_shp_len, 1, Rel.LE, self.name)
if beta1_power_shp_len == 1:
validator.check_integer("beta1_power_shape[0]", beta1_power_shape[0], 1, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
beta1_shp_len = len(beta1_shape)
validator.check_integer("beta1's rank", beta1_shp_len, 1, Rel.LE, self.name)
if beta1_shp_len == 1:
validator.check_integer("beta1_shape[0]", beta1_shape[0], 1, Rel.EQ, self.name)
beta2_shp_len = len(beta2_shape)
validator.check_integer("beta2's rank", beta2_shp_len, 1, Rel.LE, self.name)
if beta2_shp_len == 1:
validator.check_integer("beta2_shape[0]", beta2_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("epsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1_power": beta1_power_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1": beta1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta2": beta2_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, m_dtype, v_dtype
class ApplyAdadelta(PrimitiveWithInfer):
r"""
Update relevant entries according to the adadelta scheme.
.. math::
accum = \rho * accum + (1 - \rho) * grad^2
.. math::
\text{update} = \sqrt{\text{accum_update} + \epsilon} * \frac{grad}{\sqrt{accum + \epsilon}}
.. math::
\text{accum_update} = \rho * \text{accum_update} + (1 - \rho) * update^2
.. math::
var -= lr * update
Inputs of `var`, `accum`, `accum_update` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Weights to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **accum_update** (Parameter) - Accum_update to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, should be scalar. With float32 or float16 data type.
- **rho** (Union[Number, Tensor]) - Decay rate, should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - Gradients, has the same shape and type as `var`. With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
- **accum_update** (Tensor) - The same shape and data type as `accum_update`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adadelta = P.ApplyAdadelta()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum_update")
>>> def construct(self, lr, rho, epsilon, grad):
>>> out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> rho = Tensor(0.0, mstype.float32)
>>> epsilon = Tensor(1e-6, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, rho, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('rho', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdadelta"""
def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,
epsilon_shape, grad_shape):
validator.check("accum_shape", accum_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("accum_update_shape", accum_update_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
rho_shp_len = len(rho_shape)
validator.check_integer("rho's rank", rho_shp_len, 1, Rel.LE, self.name)
if rho_shp_len == 1:
validator.check_integer("rho_shape[0]", rho_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("lepsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape, accum_update_shape
def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,
epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "accum": accum_dtype, "accum_update": accum_update_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"rho": rho_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, accum_dtype, accum_update_dtype
class ApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum}}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent..
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad = P.ApplyAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, update_slots=True):
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class ApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagradv2 scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
epsilon (float): A small value added for numerical stability.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float16 or float32 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad_v2(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, epsilon, update_slots=True):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)
return var_dtype, accum_dtype
class SparseApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * (1 / sqrt(accum))
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, update_slots=True, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_number_range("lr", lr, float("-inf"), float("inf"), Rel.INC_NEITHER, self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class SparseApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
epsilon (float): A small value added for numerical stability.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
update_slots (bool): If `True`, the computation logic will be different to `False`. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, epsilon, use_locking=False, update_slots=True):
self.lr = validator.check_value_type("lr", lr, [float], self.name)
self.epsilon = validator.check_value_type("epsilon", epsilon, [float], self.name)
self.use_locking = validator.check_value_type("update_slots", update_slots, [bool], self.name)
self.update_slots = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class ApplyProximalAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the proximal adagrad algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. Must has the same shape and dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. The data type should be
float16 or float32.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar. The data type should be
float16 or float32.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar. The data type should be
float16 or float32.
- **grad** (Tensor) - Gradient with the same shape and dtype as `var`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_adagrad = P.ApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad):
>>> out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
l1_shp_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shp_len, 1, Rel.LE, self.name)
if l1_shp_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shp_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shp_len, 1, Rel.LE, self.name)
if l2_shp_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class SparseApplyProximalAdagrad(PrimitiveWithCheck):
r"""
Update relevant entries according to the proximal adagrad algorithm. Compared with ApplyProximalAdagrad,
an additional index tensor is input.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value. Tshould be a float number or
a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type..
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones((3,), np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T4),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float16, mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
class ApplyAddSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = (\alpha + \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_add_sign = P.ApplyAddSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.alpha = 1.0
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('alpha', dtype=sig.sig_dtype.T2),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),
sig.make_sig('beta', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyAddSign"
def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyPowerSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = \exp(\text{logbase} * \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
All of inputs comply with the implicit type conversion rules to make the data types consistent.
If `lr`, `logbase`, `sign_decay` or `beta` is a number, the number is automatically converted to Tensor,
and the data type is consistent with the Tensor data type involved in the operation.
If inputs are tensors and have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
If data type of `var` is float16, all inputs must have the same data type as `var`.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **logbase** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_power_sign = P.ApplyPowerSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.logbase = np.e
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,
self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('logbase', dtype=sig.sig_dtype.T),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),
sig.make_sig('beta', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyPowerSign"
def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
logbase_shape_len = len(logbase_shape)
validator.check_integer("logbase's rank", logbase_shape_len, 1, Rel.LE, self.name)
if logbase_shape_len == 1:
validator.check_integer("logbase_shape[0]", logbase_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"logbase": logbase_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the following formula.
.. math::
var = var - \alpha * \delta
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Scaling factor, should be a scalar. With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_gradient_descent = P.ApplyGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> def construct(self, delta):
>>> out = self.apply_gradient_descent(self.var, self.alpha, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
return var_dtype
class ApplyProximalGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the FOBOS(Forward Backward Splitting) algorithm.
.. math::
\text{prox_v} = var - \alpha * \delta
.. math::
var = \frac{sign(\text{prox_v})}{1 + \alpha * l2} * \max(\left| \text{prox_v} \right| - alpha * l1, 0)
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Saling factor, should be a scalar. With float32 or float16 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar.
With float32 or float16 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar.
With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, delta):
>>> out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
l1_shape_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shape_len, 1, Rel.LE, self.name)
if l1_shape_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shape_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shape_len, 1, Rel.LE, self.name)
if l2_shape_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype
class LARSUpdate(PrimitiveWithInfer):
"""
Conduct lars (layer-wise adaptive rate scaling) update on the square sum of gradient.
Args:
epsilon (float): Term added to the denominator to improve numerical stability. Default: 1e-05.
hyperpara (float): Trust coefficient for calculating the local learning rate. Default: 0.001.
use_clip (bool): Whether to use clip operation for calculating the local learning rate. Default: False.
Inputs:
- **weight** (Tensor) - The weight to be updated.
- **gradient** (Tensor) - The gradient of weight, which has the same shape and dtype with weight.
- **norm_weight** (Tensor) - A scalar tensor, representing the square sum of weight.
- **norm_gradient** (Tensor) - A scalar tensor, representing the square sum of gradient.
- **weight_decay** (Union[Number, Tensor]) - Weight decay. It should be a scalar tensor or number.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. It should be a scalar tensor or number.
Outputs:
Tensor, represents the new gradient.
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import functional as F
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.lars = P.LARSUpdate()
>>> self.reduce = P.ReduceSum()
>>> def construct(self, weight, gradient):
>>> w_square_sum = self.reduce(F.square(weight))
>>> grad_square_sum = self.reduce(F.square(gradient))
>>> grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)
>>> return grad_t
>>> weight = np.random.random(size=(2, 3)).astype(np.float32)
>>> gradient = np.random.random(size=(2, 3)).astype(np.float32)
>>> net = Net()
>>> ms_output = net(Tensor(weight), Tensor(gradient))
"""
@prim_attr_register
def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):
"""init"""
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("hyperpara", hyperpara, [float], self.name)
validator.check_value_type("use_clip", use_clip, [bool], self.name)
def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,
learning_rate_shape):
validator.check("weight shape", weight_shape, "gradient shape", gradient_shape, Rel.EQ, self.name)
validator.check("norm weight shape", norm_weight_shape, "norm gradient shape", norm_gradient_shape, Rel.EQ,
self.name)
shp_len = len(weight_decay_shape)
validator.check_integer("weight decay's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("weight_decay_shape[0]", weight_decay_shape[0], 1, Rel.EQ, self.name)
shp_len = len(learning_rate_shape)
validator.check_integer("learning rate's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("learning_rate_shape[0]", learning_rate_shape[0], 1, Rel.EQ, self.name)
return weight_shape
def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,
weight_decay_dtype, learning_rate_dtype):
args = {"Weight dtype": weight_dtype, "gradient dtype": gradient_dtype, "norm weight dtype": norm_weight_dtype,
"norm gradient dtype": norm_gradient_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)
validator.check_scalar_or_tensor_type_same({"weight_decay": weight_decay_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
validator.check_scalar_or_tensor_type_same({"learning_rate": learning_rate_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
return weight_dtype
class ApplyFtrl(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL scheme.
Args:
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - Gradient. The data type should be float16 or float32.
- **lr** (Union[Number, Tensor]) - The learning rate value, must be positive. Default: 0.001.
It should be a float number or a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **lr_power** (Union[Number, Tensor]) - Learning rate power controls how the learning rate decreases
during training, must be less than or equal to zero. Use fixed learning rate if lr_power is zero.
Default: -0.5. It should be a float number or a scalar tensor with float16 or float32 data type.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class ApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(ApplyFtrlNet, self).__init__()
>>> self.apply_ftrl = P.ApplyFtrl()
>>> self.lr = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> self.lr_power = -0.5
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad):
>>> out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2,
>>> self.lr_power)
>>> return out
>>>
>>> net = ApplyFtrlNet()
>>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32)
>>> result = net(input_x)
[[0.67455846 0.14630564 0.160499 ]
[0.16329421 0.00415689 0.05202988]
[0.18672481 0.17418946 0.36420345]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_tbe = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,
lr_power_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if self.is_tbe:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr_power": lr_power_type}, valid_types, self.name)
if self.is_tbe:
return var_type, var_type, var_type
return var_type
class SparseApplyFtrl(PrimitiveWithCheck):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
class SparseApplyFtrlV2(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
l2_shrinkage (float): L2 shrinkage regularization.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlV2Net(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlV2Net, self).__init__()
>>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
l2_shrinkage=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlV2Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.l2_shrinkage = validator.check_value_type("l2_shrinkage", l2_shrinkage, [float], self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape, linear_shape
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class ConfusionMulGrad(PrimitiveWithInfer):
"""
`output0` is the dot product result of input0 and input1.
`output1` is the dot product result of input0 and input1, then apply the reducesum operation on it.
Args:
axis (Union[int, tuple[int], list[int]]): The dimensions to reduce.
Default:(), reduce all dimensions. Only constant value is allowed.
keep_dims (bool):
- If true, keep these reduced dimensions and the length as 1.
- If false, don't keep these dimensions. Default:False.
Inputs:
- **input_0** (Tensor) - The input Tensor.
- **input_1** (Tensor) - The input Tensor.
- **input_2** (Tensor) - The input Tensor.
Outputs:
- **output_0** (Tensor) - The same shape as `input0`.
- **output_1** (Tensor)
- If axis is (), and keep_dims is false, the output is a 0-D array representing
the sum of all elements in the input array.
- If axis is int, set as 2, and keep_dims is false,
the shape of output is :math:`(x_1,x_3,...,x_R)`.
- If axis is tuple(int), set as (2,3), and keep_dims is false,
the shape of output is :math:`(x_1,x_4,...x_R)`.
Examples:
>>> confusion_mul_grad = P.ConfusionMulGrad()
>>> input_0 = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
>>> input_1 = Tensor(np.random.randint(0, 4, (2, 3)), mindspore.float32)
>>> input_2 = Tensor(np.random.randint(-4, 0, (2, 3)), mindspore.float32)
>>> output_0, output_1 = confusion_mul_grad(input_0, input_1, input_2)
output_0:
[[ 3. 1. 0.]
[-6. 2. -2.]]
output_1:
-3.0
"""
@prim_attr_register
def __init__(self, axis=(), keep_dims=False):
self.init_prim_io_names(inputs=["input0", "input1", "input2"], outputs=["output0", "output1"])
self.axis_ = validator.check_value_type("axis", axis, [int, tuple, list], self.name)
self.keep_dims_ = validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
def infer_shape(self, input0_shape, input1_shape, input2_shape):
outshape0 = input0_shape
outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)
return outshape0, outshape1
def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):
validator.check_subclass("input0_dtype", input0_dtype, mstype.tensor, self.name)
validator.check_subclass("input1_dtype", input1_dtype, mstype.tensor, self.name)
validator.check_subclass("input2_dtype", input2_dtype, mstype.tensor, self.name)
return input0_dtype, input1_dtype
class Dropout(PrimitiveWithInfer):
"""
During training, randomly zeroes some of the elements of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout = P.Dropout(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 1, Rel.GE, self.name)
mask_shape = x_shape
return x_shape, mask_shape
def infer_dtype(self, x_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"x_dtype": x_dtype}, valid_types, self.name)
return x_dtype, x_dtype
class DropoutGrad(PrimitiveWithInfer):
"""
The gradient of Dropout. During training, randomly zeroes some of the elements
of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout_grad(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, dy_shape, mask_shape):
return dy_shape
def infer_dtype(self, dy_dtype, mask_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
return dy_dtype
class CTCLoss(PrimitiveWithInfer):
"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Args:
preprocess_collapse_repeated (bool): If true, repeated labels will be collapsed prior to the CTC calculation.
Default: False.
ctc_merge_repeated (bool): If false, during CTC calculation, repeated non-blank labels will not be merged
and these labels will be interpreted as individual ones. This is a simplfied
version of CTC. Default: True.
ignore_longer_outputs_than_inputs (bool): If True, sequences with longer outputs than inputs will be ignored.
Default: False.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float16, float32 or float64.
- **labels_indices** (Tensor) - The indices of labels. `labels_indices[i, :] == [b, t]` means `labels_values[i]`
stores the id for `(batch b, time t)`. The type must be int64 and rank must be 2.
- **labels_values** (Tensor) - A `1-D` input tensor. The values are associated with the given batch and time.
The type must be int32. `labels_values[i]` must in the range of `[0, num_classes)`.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not be greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`. The tensor has
the same type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64)
>>> labels_values = Tensor(np.array([2, 2]), mindspore.int32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> ctc_loss = P.CTCLoss()
>>> output = ctc_loss(inputs, labels_indices, labels_values, sequence_length)
"""
@prim_attr_register
def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False):
self.init_prim_io_names(inputs=["inputs", "labels_indices", "labels_values", "sequence_length"],
outputs=["loss", "gradient"])
validator.check_value_type("preprocess_collapse_repeated", preprocess_collapse_repeated, [bool], self.name)
self.preprocess_collapse_repeated_ = preprocess_collapse_repeated
self.ctc_merge_repeated_ = validator.check_value_type("ctc_merge_repeated", ctc_merge_repeated,
[bool], self.name)
validator.check_value_type("ignore_longer_outputs_than_inputs",
ignore_longer_outputs_than_inputs, [bool], self.name)
self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs
def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):
validator.check_integer("inputs rank", len(inputs), 3, Rel.EQ, self.name)
validator.check_integer("labels_indices rank", len(labels_indices), 2, Rel.EQ, self.name)
validator.check_integer("labels_indices dim one", labels_indices[1], 2, Rel.EQ, self.name)
validator.check_integer("labels_values rank", len(labels_values), 1, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length), 1, Rel.EQ, self.name)
validator.check('labels_indices size', labels_indices[0], 'labels_values size',
labels_values[0], Rel.EQ, self.name)
validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',
sequence_length[0], Rel.EQ, self.name)
batch_size = []
batch_size.append(inputs[1])
return batch_size, inputs
def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):
valid_dtype = [mstype.float16, mstype.float32, mstype.double]
validator.check_tensor_type_same({"inputs_dtype": inputs}, valid_dtype, self.name)
validator.check_tensor_type_same({"labels_indices_dtype": labels_indices}, [mstype.int64], self.name)
validator.check_tensor_type_same({"labels_values_dtype": labels_values}, [mstype.int32], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length}, [mstype.int32], self.name)
return inputs, inputs
class CTCGreedyDecoder(PrimitiveWithInfer):
"""
Performs greedy decoding on the logits given in inputs.
Args:
merge_repeated (bool): If True, merge repeated classes in output. Default: True.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float32 or float64.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **decoded_indices** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs, 2)`.
Data type is int64.
- **decoded_values** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs)`,
it stores the decoded classes. Data type is int64.
- **decoded_shape** (Tensor) - The value of tensor is :math:`[batch_size, max_decoded_legth]`.
Data type is int64.
- **log_probability** (Tensor) - A tensor with shape of :math:`(batch_size, 1)`,
containing sequence log-probability, has the same type as `inputs`.
Examples:
>>> class CTCGreedyDecoderNet(nn.Cell):
>>> def __init__(self):
>>> super(CTCGreedyDecoderNet, self).__init__()
>>> self.ctc_greedy_decoder = P.CTCGreedyDecoder()
>>> self.assert_op = P.Assert(300)
>>>
>>> def construct(self, inputs, sequence_length):
>>> out = self.ctc_greedy_decoder(inputs,sequence_length)
>>> self.assert_op(True, (out[0], out[1], out[2], out[3]))
>>> return out[2]
>>>
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> net = CTCGreedyDecoderNet()
>>> output = net(inputs, sequence_length)
"""
@prim_attr_register
def __init__(self, merge_repeated=True):
self.merge_repeated = validator.check_value_type("merge_repeated", merge_repeated, [bool], self.name)
def infer_shape(self, inputs_shape, sequence_length_shape):
validator.check_integer("inputs rank", len(inputs_shape), 3, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length_shape), 1, Rel.EQ, self.name)
validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',
sequence_length_shape[0], Rel.EQ, self.name)
total_decoded_outputs = -1
decoded_indices_shape = [total_decoded_outputs, 2]
decoded_values = [total_decoded_outputs]
decoded_shape = [2]
log_probability_shape = [inputs_shape[1], 1]
return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape
def infer_dtype(self, inputs_dtype, sequence_length_dtype):
validator.check_tensor_type_same({"inputs_dtype": inputs_dtype}, [mstype.float32, mstype.double], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length_dtype}, [mstype.int32], self.name)
decoded_type = mstype.tensor_type(mstype.int64)
return decoded_type, decoded_type, decoded_type, inputs_dtype
class BasicLSTMCell(PrimitiveWithInfer):
r"""
Applies the long short-term memory (LSTM) to the input.
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\
f_t = \sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\
\tilde{c}_t = \tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\
o_t = \sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\
c_t = f_t * c_{(t-1)} + i_t * \tilde{c}_t \\
h_t = o_t * \tanh(c_t) \\
\end{array}
Here :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`
are learnable weights between the output and the input in the formula. For instance,
:math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.
Details can be found in paper `LONG SHORT-TERM MEMORY
<https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and
`Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling
<https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.
Args:
keep_prob (float): If not 1.0, append `Dropout` layer on the outputs of each
LSTM layer except the last layer. Default 1.0. The range of dropout is [0.0, 1.0].
forget_bias (float): Add forget bias to forget gate biases in order to decrease former scale. Default: 1.0.
state_is_tuple (bool): If true, the state is a tuple of 2 tensors, containing h and c; If false, the state is
a tensor and it needs to be split first. Default: True.
activation (str): Activation. Default: "tanh". Only "tanh" is currently supported.
Inputs:
- **x** (Tensor) - Current words. Tensor of shape (`batch_size`, `input_size`).
The data type must be float16 or float32.
- **h** (Tensor) - Hidden state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **c** (Tensor) - Cell state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **w** (Tensor) - Weight. Tensor of shape (`input_size + hidden_size`, `4 x hidden_size`).
The data type must be float16 or float32.
- **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`).
The data type must be the same as `c`.
Outputs:
- **ct** (Tensor) - Forward :math:`c_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ht** (Tensor) - Cell output. Tensor of shape (`batch_size`, `hidden_size`). With data type of float16.
- **it** (Tensor) - Forward :math:`i_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **jt** (Tensor) - Forward :math:`j_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ft** (Tensor) - Forward :math:`f_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ot** (Tensor) - Forward :math:`o_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **tanhct** (Tensor) - Forward :math:`tanh c_t` cache at moment `t`.
Tensor of shape (`batch_size`, `hidden_size`), has the same type with input `c`.
Examples:
>>> x = Tensor(np.random.rand(1, 32).astype(np.float16))
>>> h = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> c = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> w = Tensor(np.random.rand(96, 256).astype(np.float16))
>>> b = Tensor(np.random.rand(256, ).astype(np.float16))
>>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> lstm(x, h, c, w, b)
"""
@prim_attr_register
def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.state_is_tuple = validator.check_value_type("state_is_tuple", state_is_tuple, [bool], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
self.add_prim_attr("io_format", "ND")
def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 2, Rel.EQ, self.name)
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 2, Rel.EQ, self.name)
validator.check_integer("b rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("x_shape[0]", x_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[0]", c_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[1]", c_shape[1], "h_shape[1]", h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[1]", w_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[0]", w_shape[0], "x_shape[1]+h_shape[1]", x_shape[1] + h_shape[1], Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
ct_shape = c_shape
ht_shape = c_shape
it_shape = c_shape
jt_shape = c_shape
ft_shape = c_shape
ot_shape = c_shape
tanhct_shape = c_shape
return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):
validator.check_tensor_type_same({"x_dtype": x_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"h_dtype": h_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, [mstype.float16, mstype.float32], self.name)
args = {"c_dtype": c_dtype, "b_dtype": b_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)
class InTopK(PrimitiveWithInfer):
r"""
Whether the targets are in the top `k` predictions.
Args:
k (int): Specify the number of top elements to be used for computing precision.
Inputs:
- **x1** (Tensor) - A 2D Tensor defines the predictions of a batch of samples with float16 or float32 data type.
- **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type.
Outputs:
Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`,
if the label in the first `k` predictions for sample `i` is in `x1`, then the value is True, otherwise False.
Examples:
>>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32)
>>> x2 = Tensor(np.array([1, 3]), mindspore.int32)
>>> in_top_k = P.InTopK(3)
>>> result = in_top_k(x1, x2)
[True False]
"""
@prim_attr_register
def __init__(self, k):
"""Init InTopK"""
self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])
validator.check_value_type("k", k, [int], self.name)
def infer_dtype(self, x1_dtype, x2_dtype):
validator.check_tensor_type_same({"x1": x1_dtype}, (mstype.float16, mstype.float32,), self.name)
validator.check_tensor_type_same({"x2": x2_dtype}, (mstype.int32,), self.name)
return mstype.tensor_type(mstype.bool_)
def infer_shape(self, x1_shape, x2_shape):
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
return x2_shape
class LRN(PrimitiveWithInfer):
r"""
Local Response Normalization
Args:
depth_radius (int): Half-width of the 1-D normalization window. Shape of 0-D.
bias (float): An offset (usually positive to avoid dividing by 0).
alpha (float): A scale factor, usually positive.
beta (float): An exponent.
norm_region (str): Specify normalization region. Options: "ACROSS_CHANNELS". Default: "ACROSS_CHANNELS".
Inputs:
- **x** (Tensor) - A 4D Tensor with float16 or float32 data type.
Outputs:
Tensor, With shape and data type same as the input tensor.
Examples:
>>> x = Tensor(np.random.rand(1, 10, 4, 4)), mindspore.float32)
>>> lrn = P.LRN()
>>> lrn(x)
"""
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
"""Init LRN"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
validator.check_value_type("norm_region", norm_region, [str], self.name)
validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)
validator.check_integer("depth_radius", depth_radius, 0, Rel.GE, self.name)
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32,), self.name)
return x_dtype
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ, self.name)
return x_shape
class CTCLossV2(PrimitiveWithInfer):
r"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Note:
- Cudnn Uses label value of for the `blank`
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_class)`. `num_class` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved.
- **labels** (Tensor) - The labels Tensor should be a `1-D` tensor whose shape is
:math:`(\sigma{label_lengths})`
or `2-D` tensor whose shape is
:math:`(max_time, max{label_lengths})`
The type must be int32.
- **input_lengths** (Tensor) - A `1-D` input tensor whose shape is
:math:`(batch_size,)`. The values should be batch. The type must be int32.
- **label_lengths** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`, has the same
type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> input_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> label_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> ctc_loss = P.CTCLossV2()
>>> output = ctc_loss(inputs, labels, input_lengths, label_lengths)
"""
@prim_attr_register
def __init__(self):
pass
def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):
validator.check_tensor_type_same({"input": input_dtype}, (mstype.float32,), self.name)
validator.check_tensor_type_same({"labels": labels_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"input_lengths": input_lengths_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"target_lengths": label_lengths_dtype}, (mstype.int32,), self.name)
return mstype.float32, mstype.float32
def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):
validator.check_integer("input shape", len(input_shape), 3, Rel.EQ, self.name)
validator.check_number_range("labels shape", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)
validator.check_integer("input lengths shape", len(input_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("label lengths shape", len(label_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)
return (input_shape[1],), input_shape
| 48.807652 | 120 | 0.628525 |
import math
import operator
from functools import reduce
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register
from ..operations.math_ops import _infer_shape_reduce
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):
def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two "
f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)
elif len(arg_value) == 2:
ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value
elif len(arg_value) == 4:
if not allow_four:
_raise_message()
ret = arg_value if ret_four else (arg_value[2], arg_value[3])
else:
_raise_message()
return ret
validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and item > 0:
continue
_raise_message()
return ret_value
class Flatten(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)
prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])
return input_x[0], prod
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class Softmax(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type("axis", axis, [int, tuple], self.name)
if isinstance(axis, int):
self.add_prim_attr('axis', (axis,))
for item in self.axis:
validator.check_value_type("item of axis", item, [int], self.name)
def infer_shape(self, logits):
validator.check_integer("length of axis", len(self.axis), 1, Rel.GE, self.name)
rank = len(logits)
for axis_v in self.axis:
validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class LogSoftmax(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=-1):
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class Softplus(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class Softsign(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class ReLU(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)
return input_x
class ReLU6(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class ReLUV2(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
def __infer__(self, input_x):
input_shape = list(input_x['shape'])
input_dtype = input_x['dtype']
mask_shape = []
if len(input_shape) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, "
f"but got a {len(input_shape)}-D tensor whose shape is {input_shape}")
for i in enumerate(input_shape):
if i[0] == 1:
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append((input_shape[1] + 31) // 32)
else:
mask_shape.append((input_shape[1] + 15) // 16)
else:
mask_shape.append(i[1])
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append(4)
else:
mask_shape.append(2)
output_shape = (input_x['shape'], mask_shape)
validator.check_subclass("input_x", input_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)
mask_dtype = mstype.uint8
output_dtype = (input_dtype, mask_dtype)
return {'shape': output_shape,
'dtype': output_dtype,
'value': None}
class Elu(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, alpha=1.0):
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class HSwish(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, xshape):
return xshape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Sigmoid(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class HSigmoid(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Tanh(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class FusedBatchNorm(Primitive):
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
class FusedBatchNormEx(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('input_x', dtype=sig.sig_dtype.T2),
sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
valid_types = [mstype.tensor_type(mstype.float32)]
validator.check_type_same(args_moving, valid_types, self.name)
return (input_x, scale, scale, scale, scale, scale)
class BNTrainingReduce(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
return ([x_shape[1]], [x_shape[1]])
def infer_dtype(self, x_type):
return (x_type, x_type)
class BNTrainingUpdate(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')
def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
class BatchNorm(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
validator.check_value_type('is_training', is_training, (bool,), self.name)
validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
if not self.is_training:
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
if self.is_training:
valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]
validator.check_type_same(args_moving, valid_types, self.name)
else:
args_moving = {"mean": mean, "variance": variance}
validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)
return (input_x, scale, bias, input_x, input_x)
class Conv2D(PrimitiveWithInfer):
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check(f"x_shape[1] / group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_h = w_shape[2]
kernel_size_w = w_shape[3]
stride_h = self.stride[2]
stride_w = self.stride[3]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]
self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))
out_channel = self.out_channel
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class DepthwiseConv2dNative(PrimitiveWithInfer):
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
mode=3,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of stride should be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of dilation should be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer("mode", mode, 3, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT,
self.name)
self.group = validator.check_integer("group", group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape
_, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1:
raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}")
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)
self.add_prim_attr('pads', self.pad_list)
out_channel = self.channel_multiplier * x_shape[1]
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class _Pool(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', "NCHW")
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.strides = (1, self.strides[-2], self.strides[-1], 1)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
batch, channel, input_h, input_w = x_shape
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w]
for shape_value in out_shape:
if shape_value <= 0:
raise ValueError(f"For '{self.name}' The kernel size is not valid, "
f"please check it if is larger than data's shape size.")
return out_shape
def infer_dtype(self, x_dtype):
validator.check_subclass("input", x_dtype, mstype.tensor, self.name)
return x_dtype
class MaxPool(_Pool):
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPool, self).__init__(ksize, strides, padding)
class MaxPoolWithArgmax(_Pool):
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
def infer_shape(self, x_shape):
out_shape = _Pool.infer_shape(self, x_shape)
_, _, out_h, out_w = out_shape
_, kernel_h, kernel_w, _ = self.ksize
argmax_shape = []
if self.is_tbe:
for i in range(4):
if i == 2:
dim = kernel_h * kernel_w
argmax_shape.append(dim)
elif i == 3:
dim = math.ceil(out_h * out_w / 16) + 1
argmax_shape.append(dim)
else:
argmax_shape.append(x_shape[i])
else:
argmax_shape = out_shape
return out_shape, argmax_shape
def infer_dtype(self, x_dtype):
out_dtype = x_dtype
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
argmax_dtype = mstype.uint16
if self.is_gpu:
argmax_dtype = mstype.int32
return out_dtype, argmax_dtype
class AvgPool(_Pool):
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("enable_ge"):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding)
class Conv2DBackpropInput(PrimitiveWithInfer):
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=None,
mode=1,
stride=1,
dilation=1,
group=1):
self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('data_format', "NCHW")
if pad_list:
for x in pad_list:
validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)
self.pad_list = pad_list
def __infer__(self, doutput, w, x_size):
x_size_v = x_size['value']
validator.check_value_type('x_size', x_size_v, [tuple], self.name)
for i, dim_len in enumerate(x_size_v):
validator.check_value_type("x_size[%d]" % i, dim_len, [int], self.name)
args = {'doutput': doutput['dtype'], 'w': w['dtype']}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
# infer shape
dout_shape = doutput['shape']
kernel_h = self.kernel_size[0]
kernel_w = self.kernel_size[1]
stride_h = self.stride[0]
stride_w = self.stride[1]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
# default pad mode is valid
pad_list = (0, 0, 0, 0)
if self.pad_list:
pad_list = tuple(self.pad_list)
elif self.pad_mode == "SAME":
pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
'shape': x_size_v,
'dtype': doutput['dtype'],
}
return out
class BiasAdd(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def infer_shape(self, x_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.GE, self.name)
validator.check_integer("bias rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "x_shape[1]", x_shape[1], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, b_type):
args = {"input_x": x_type, "bias": b_type}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_type
class TopK(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, sorted=False):
validator.check_value_type("sorted", sorted, [bool], self.name)
self.init_prim_io_names(inputs=['input', 'k'],
outputs=['values', 'indices'])
def __infer__(self, input_x, k):
x_dtype = input_x['dtype']
valid_types = (mstype.int32, mstype.float16, mstype.float32)
validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)
k_v = k['value']
validator.check_value_type('k', k_v, (int,), self.name)
x_shape = list(input_x['shape'])
ndim = len(x_shape) - 1
x_shape[ndim] = k_v
return {'shape': (x_shape, x_shape),
'dtype': (x_dtype, mstype.int32),
'value': None}
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape", logits_shape, "labels_shape", labels_shape, Rel.EQ, self.name)
loss_shape = [logits_shape[0]]
dlogits_shape = logits_shape
return (loss_shape, dlogits_shape)
def infer_dtype(self, logits_type, labels_type):
args = {"logits": logits_type, "labels": labels_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return (logits_type, logits_type)
class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, is_grad=False):
self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])
self.is_grad = is_grad
self.add_prim_attr('sens', 1.0)
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape[0]", logits_shape[0], "labels_shape[0]", labels_shape[0], Rel.EQ, self.name)
loss_shape = []
if self.is_grad:
return logits_shape
return loss_shape
def infer_dtype(self, logits_type, labels_type):
validator.check_tensor_type_same({"logits": logits_type}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"labels": labels_type}, (mstype.int32, mstype.int64), self.name)
return logits_type
class ApplyMomentum(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('momentum', dtype=sig.sig_dtype.T2),
)
@prim_attr_register
def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):
self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],
outputs=['output'])
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_ge = context.get_context("enable_ge")
def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):
if not self.is_ge and self.is_tbe:
return v_shape, v_shape
return v_shape
def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):
valid_types = [mstype.float16, mstype.float32, mstype.float64]
if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:
validator.check_tensor_type_same({"v": v_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"a": a_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l_dtype": l_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"g_dtype": g_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"m_dtype": m_dtype}, valid_types, self.name)
if not self.is_ge and self.is_tbe:
return g_dtype, g_dtype
return g_dtype
class SmoothL1Loss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, beta=1.0):
validator.check_value_type('beta', beta, [float], self.name)
validator.check('beta', beta, '', 0, Rel.GT, self.name)
self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
def infer_shape(self, prediction, target):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target):
args = {"prediction": prediction, "target": target}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return prediction
class L2Loss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
def infer_shape(self, input_x):
loss_shape = []
return loss_shape
def infer_dtype(self, x_type):
validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)
return x_type
class DataFormatDimMap(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, src_format='NHWC', dst_format='NCHW'):
valid_values = ['NHWC', 'NCHW']
self.src_format = validator.check_string("src_format", src_format, valid_values, self.name)
self.dst_format = validator.check_string("dst_format", dst_format, valid_values, self.name)
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_type):
validator.check_subclass("x", x_type, mstype.tensor, self.name)
valid_types = [mstype.int32]
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, blank_label=0):
validator.check_value_type('blank_label', blank_label, [int], self.name)
self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],
outputs=['costs', 'grads'])
def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):
validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)
validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)
validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)
validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)
validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)
validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
costs_shape = (acts_shape[0],)
return (costs_shape, acts_shape)
def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):
validator.check_subclass("acts_type", acts_type, mstype.tensor, self.name)
validator.check_subclass("labels_type", labels_type, mstype.tensor, self.name)
validator.check_subclass("input_length_type", input_length_type, mstype.tensor, self.name)
validator.check_subclass("label_length_type", label_length_type, mstype.tensor, self.name)
validator.check_tensor_type_same({"acts_type": acts_type}, [mstype.float32, mstype.float16], self.name)
validator.check_tensor_type_same({"labels_type": labels_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"input_length_type": input_length_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"label_length_type": label_length_type}, [mstype.int32], self.name)
return (acts_type, acts_type)
class SGD(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):
validator.check_value_type("nesterov", nesterov, [bool], self.name)
if nesterov and dampening != 0:
raise ValueError(f"Nesterov need zero dampening!")
self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],
outputs=['output'])
def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,
accum_shape, momentum_shape, stat_shape):
validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)
validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)
validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)
validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)
validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)
validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)
validator.check("gradient shape", gradient_shape, "stat shape", stat_shape, Rel.EQ, self.name)
return parameters_shape
def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,
accum_dtype, momentum_dtype, stat_dtype):
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({"parameters": parameters_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"gradient": gradient_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"learning_rate": learning_rate_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"accum": accum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"momentum": momentum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"stat": stat_dtype}, valid_types, self.name)
return parameters_dtype
class ApplyRMSProp(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',
'rho', 'momentum', 'epsilon'], outputs=['output'])
self.is_ge = context.get_context("enable_ge")
self.is_d = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,
momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if not self.is_ge and self.is_d:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,
momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_decay = {"decay": decay_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_decay, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "decay": decay_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if not self.is_ge and self.is_d:
return var_dtype, var_dtype, var_dtype
return var_dtype
def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):
if decay is None or momentum is None or epsilon is None:
raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.")
class ApplyCenteredRMSProp(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_ascend = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,
learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if self.is_ascend:
return var_shape, mean_gradient_shape, mean_square_shape, moment_shape
return var_shape
def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,
learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_gradient": mean_gradient_dtype,
"mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_rho = {"rho": rho_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_rho, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "rho": rho_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if self.is_ascend:
return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype
return var_dtype
class LayerNorm(Primitive):
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [float], self.name)
class L2Normalize(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("x", input_x, mstype.tensor, self.name)
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class DropoutGenMask(Primitive):
@prim_attr_register
def __init__(self, Seed0=0, Seed1=0):
self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])
validator.check_value_type("Seed0", Seed0, [int], self.name)
validator.check_value_type("Seed1", Seed1, [int], self.name)
self.add_prim_attr("_random_effect", True)
class DropoutDoMask(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def __infer__(self, input_x, mask, keep_prob):
input_x_shape = input_x['shape']
mask_shape = mask['shape']
keep_prob_shape = keep_prob['shape']
validator.check("keep_prob's dim", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)
size_x = reduce(lambda x, y: x * y, input_x_shape)
if len(mask_shape) != 1:
raise ValueError("DropoutDoMask mask shape should be 1-dimension.")
size_y = mask_shape[0] * 8
if size_x > size_y:
raise ValueError(f"DropoutDoMask y mask do not math input input_x shape:"
"{input_x_shape}, mask shape: {mask_shape}.")
validator.check_tensor_type_same({"input_x": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],
self.name)
validator.check_tensor_type_same({"input_mask": mask['dtype']}, [mstype.uint8], self.name)
keep_prob_v = keep_prob['value']
if keep_prob_v is not None:
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
out = {'shape': input_x_shape,
'dtype': input_x['dtype'],
'value': None}
return out
class ResizeBilinear(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, size, align_corners=False):
pass
def infer_shape(self, input_shape):
input_shape = list(input_shape)
batch, channel, _, _ = input_shape
out_shape = [batch, channel]
for i in self.size:
out_shape.append(int(i))
return out_shape
def infer_dtype(self, input_dtype):
validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)
return mstype.tensor_type(mstype.float32)
class OneHot(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
validator.check_value_type("axis", axis, [int], self.name)
def __infer__(self, indices, depth, on_value, off_value):
validator.check_tensor_type_same({"indices": indices['dtype']}, (mstype.int32,), self.name)
validator.check_type_name("depth", depth['dtype'], mstype.int_type, self.name)
args = {"on_value": on_value['dtype'], "off_value": off_value['dtype']}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
indices_shp = indices['shape']
validator.check_int_range("axis", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)
depth_val = depth['value']
validator.check_integer("depth", depth_val, 0, Rel.GE, self.name)
_ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)
return {'shape': indices_shp,
'dtype': on_value['dtype'],
'value': None}
class Gelu(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class GetNext(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, types, shapes, output_num, shared_name):
validator.check_value_type("types", types, [list, tuple], self.name)
validator.check_value_type("shapes", shapes, [list, tuple], self.name)
validator.check("types length", len(types), "shapes length", len(shapes), Rel.EQ, self.name)
validator.check_value_type("output_num", output_num, [int], self.name)
def infer_shape(self):
return tuple(self.shapes)
def infer_dtype(self):
return tuple(self.types)
class PReLU(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x_shape, weight_shape):
input_x_dim = len(input_x_shape)
weight_dim = len(weight_shape)
if input_x_dim == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
if weight_dim != 1:
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
f' while channel of input_x is {input_x_shape[1]},'
f' weight_shape[0] is {weight_shape[0]}.')
return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"input_x": input_x_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"weight": weight_dtype}, valid_types, self.name)
return input_x_dtype
class LSTM(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer("input_size", input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type("has_bias", has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type("bidirectional", bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, h_shape, c_shape, w_shape):
validator.check_integer("x rank", len(x_shape), 3, Rel.EQ, self.name)
validator.check_integer("x[2]", x_shape[2], self.input_size, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 3, Rel.EQ, self.name)
validator.check("h_shape", h_shape, "c_shape", c_shape, Rel.EQ, self.name)
validator.check_integer("h[0]", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h[1]", h_shape[1], x_shape[1], Rel.EQ, self.name)
validator.check_integer("h[2]", h_shape[2], self.hidden_size, Rel.EQ, self.name)
y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)
type_size = 4
gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)
states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)
self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size
self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_grid_comp_size = 0
self.page_size = 4096
current_offset = 0
current_offset += self.ws_gates_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_c_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_diff_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_grid_comp_size
reserved_shape = (current_offset, 1)
state_shape = (1, 1)
return (y_shape, h_shape, c_shape, reserved_shape, state_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):
args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)
def rnd_up(self, current_offset, page_size):
return ((current_offset + page_size - 1) // page_size) * page_size
def get_good_ld(self, dim, type_size):
ld = self.rnd_up(dim, 64 // type_size)
if ld * 256 == 0:
return ld + 64 // type_size
return ld
class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
def infer_shape(self, x_shape, y_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_dtype
class Pad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, paddings):
self.init_prim_io_names(inputs=['x'], outputs=['y'])
if not isinstance(paddings, tuple):
raise TypeError('Paddings must be tuple type.')
for item in paddings:
if len(item) != 2:
raise ValueError('The shape of paddings must be (n, 2).')
self.paddings = paddings
def infer_shape(self, x):
paddings = np.array(self.paddings)
validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)
if not np.all(paddings >= 0):
raise ValueError('All elements of paddings must be >= 0.')
y_shape = ()
for i in range(int(paddings.size / 2)):
y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)
return y_shape
def infer_dtype(self, x):
validator.check_subclass("input_x", x, mstype.tensor, self.name)
return x
class MirrorPad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, mode='REFLECT'):
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
self.set_const_input_indexes([1])
def __infer__(self, input_x, paddings):
validator.check_subclass("input_x", input_x['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape'])
paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size
validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)
if not np.all(paddings_value >= 0):
raise ValueError('All elements of paddings must be >= 0.')
adjust = 0
if self.mode == 'SYMMETRIC':
adjust = 1
for i in range(0, int(paddings_size / 2)):
if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):
raise ValueError('At least one dim has too high a padding value for this input and mode')
y_shape = ()
for i in range(0, int(paddings_size / 2)):
y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)
return {'shape': y_shape,
'dtype': input_x['dtype'],
'value': None}
class ROIAlign(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("roi_end_mode", roi_end_mode, [int], self.name)
validator.check_int_range("roi_end_mode", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
self.roi_end_mode = roi_end_mode
def infer_shape(self, inputs_shape, rois_shape):
return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]
def infer_dtype(self, inputs_type, rois_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"inputs_type": inputs_type}, valid_types, self.name)
validator.check_tensor_type_same({"rois_type": rois_type}, valid_types, self.name)
return inputs_type
class Adam(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
return var_dtype, m_dtype, v_dtype
class FusedSparseAdam(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseLazyAdam(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseFtrl(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],
outputs=['output'])
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return [1], [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class FusedSparseProximalAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('l1', dtype=sig.sig_dtype.T),
sig.make_sig('l2', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
return [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type, weight_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
if weight_type:
validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)
return x_type
class ApplyAdaMax(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("m_shape", m_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("v_shape", v_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
beta1_power_shp_len = len(beta1_power_shape)
validator.check_integer("beta1 power's rank", beta1_power_shp_len, 1, Rel.LE, self.name)
if beta1_power_shp_len == 1:
validator.check_integer("beta1_power_shape[0]", beta1_power_shape[0], 1, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
beta1_shp_len = len(beta1_shape)
validator.check_integer("beta1's rank", beta1_shp_len, 1, Rel.LE, self.name)
if beta1_shp_len == 1:
validator.check_integer("beta1_shape[0]", beta1_shape[0], 1, Rel.EQ, self.name)
beta2_shp_len = len(beta2_shape)
validator.check_integer("beta2's rank", beta2_shp_len, 1, Rel.LE, self.name)
if beta2_shp_len == 1:
validator.check_integer("beta2_shape[0]", beta2_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("epsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1_power": beta1_power_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1": beta1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta2": beta2_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, m_dtype, v_dtype
class ApplyAdadelta(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('rho', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,
epsilon_shape, grad_shape):
validator.check("accum_shape", accum_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("accum_update_shape", accum_update_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
rho_shp_len = len(rho_shape)
validator.check_integer("rho's rank", rho_shp_len, 1, Rel.LE, self.name)
if rho_shp_len == 1:
validator.check_integer("rho_shape[0]", rho_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("lepsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape, accum_update_shape
def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,
epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "accum": accum_dtype, "accum_update": accum_update_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"rho": rho_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, accum_dtype, accum_update_dtype
class ApplyAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, update_slots=True):
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class ApplyAdagradV2(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, epsilon, update_slots=True):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)
return var_dtype, accum_dtype
class SparseApplyAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, update_slots=True, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_number_range("lr", lr, float("-inf"), float("inf"), Rel.INC_NEITHER, self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class SparseApplyAdagradV2(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, epsilon, use_locking=False, update_slots=True):
self.lr = validator.check_value_type("lr", lr, [float], self.name)
self.epsilon = validator.check_value_type("epsilon", epsilon, [float], self.name)
self.use_locking = validator.check_value_type("update_slots", update_slots, [bool], self.name)
self.update_slots = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class ApplyProximalAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
l1_shp_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shp_len, 1, Rel.LE, self.name)
if l1_shp_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shp_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shp_len, 1, Rel.LE, self.name)
if l2_shp_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class SparseApplyProximalAdagrad(PrimitiveWithCheck):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T4),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float16, mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
class ApplyAddSign(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('alpha', dtype=sig.sig_dtype.T2),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),
sig.make_sig('beta', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyPowerSign(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('logbase', dtype=sig.sig_dtype.T),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),
sig.make_sig('beta', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
logbase_shape_len = len(logbase_shape)
validator.check_integer("logbase's rank", logbase_shape_len, 1, Rel.LE, self.name)
if logbase_shape_len == 1:
validator.check_integer("logbase_shape[0]", logbase_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"logbase": logbase_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyGradientDescent(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, alpha_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
return var_dtype
class ApplyProximalGradientDescent(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
l1_shape_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shape_len, 1, Rel.LE, self.name)
if l1_shape_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shape_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shape_len, 1, Rel.LE, self.name)
if l2_shape_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype
class LARSUpdate(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("hyperpara", hyperpara, [float], self.name)
validator.check_value_type("use_clip", use_clip, [bool], self.name)
def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,
learning_rate_shape):
validator.check("weight shape", weight_shape, "gradient shape", gradient_shape, Rel.EQ, self.name)
validator.check("norm weight shape", norm_weight_shape, "norm gradient shape", norm_gradient_shape, Rel.EQ,
self.name)
shp_len = len(weight_decay_shape)
validator.check_integer("weight decay's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("weight_decay_shape[0]", weight_decay_shape[0], 1, Rel.EQ, self.name)
shp_len = len(learning_rate_shape)
validator.check_integer("learning rate's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("learning_rate_shape[0]", learning_rate_shape[0], 1, Rel.EQ, self.name)
return weight_shape
def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,
weight_decay_dtype, learning_rate_dtype):
args = {"Weight dtype": weight_dtype, "gradient dtype": gradient_dtype, "norm weight dtype": norm_weight_dtype,
"norm gradient dtype": norm_gradient_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)
validator.check_scalar_or_tensor_type_same({"weight_decay": weight_decay_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
validator.check_scalar_or_tensor_type_same({"learning_rate": learning_rate_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
return weight_dtype
class ApplyFtrl(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_tbe = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,
lr_power_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if self.is_tbe:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr_power": lr_power_type}, valid_types, self.name)
if self.is_tbe:
return var_type, var_type, var_type
return var_type
class SparseApplyFtrl(PrimitiveWithCheck):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
class SparseApplyFtrlV2(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.l2_shrinkage = validator.check_value_type("l2_shrinkage", l2_shrinkage, [float], self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape, linear_shape
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class ConfusionMulGrad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=(), keep_dims=False):
self.init_prim_io_names(inputs=["input0", "input1", "input2"], outputs=["output0", "output1"])
self.axis_ = validator.check_value_type("axis", axis, [int, tuple, list], self.name)
self.keep_dims_ = validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
def infer_shape(self, input0_shape, input1_shape, input2_shape):
outshape0 = input0_shape
outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)
return outshape0, outshape1
def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):
validator.check_subclass("input0_dtype", input0_dtype, mstype.tensor, self.name)
validator.check_subclass("input1_dtype", input1_dtype, mstype.tensor, self.name)
validator.check_subclass("input2_dtype", input2_dtype, mstype.tensor, self.name)
return input0_dtype, input1_dtype
class Dropout(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 1, Rel.GE, self.name)
mask_shape = x_shape
return x_shape, mask_shape
def infer_dtype(self, x_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"x_dtype": x_dtype}, valid_types, self.name)
return x_dtype, x_dtype
class DropoutGrad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, dy_shape, mask_shape):
return dy_shape
def infer_dtype(self, dy_dtype, mask_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
return dy_dtype
class CTCLoss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False):
self.init_prim_io_names(inputs=["inputs", "labels_indices", "labels_values", "sequence_length"],
outputs=["loss", "gradient"])
validator.check_value_type("preprocess_collapse_repeated", preprocess_collapse_repeated, [bool], self.name)
self.preprocess_collapse_repeated_ = preprocess_collapse_repeated
self.ctc_merge_repeated_ = validator.check_value_type("ctc_merge_repeated", ctc_merge_repeated,
[bool], self.name)
validator.check_value_type("ignore_longer_outputs_than_inputs",
ignore_longer_outputs_than_inputs, [bool], self.name)
self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs
def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):
validator.check_integer("inputs rank", len(inputs), 3, Rel.EQ, self.name)
validator.check_integer("labels_indices rank", len(labels_indices), 2, Rel.EQ, self.name)
validator.check_integer("labels_indices dim one", labels_indices[1], 2, Rel.EQ, self.name)
validator.check_integer("labels_values rank", len(labels_values), 1, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length), 1, Rel.EQ, self.name)
validator.check('labels_indices size', labels_indices[0], 'labels_values size',
labels_values[0], Rel.EQ, self.name)
validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',
sequence_length[0], Rel.EQ, self.name)
batch_size = []
batch_size.append(inputs[1])
return batch_size, inputs
def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):
valid_dtype = [mstype.float16, mstype.float32, mstype.double]
validator.check_tensor_type_same({"inputs_dtype": inputs}, valid_dtype, self.name)
validator.check_tensor_type_same({"labels_indices_dtype": labels_indices}, [mstype.int64], self.name)
validator.check_tensor_type_same({"labels_values_dtype": labels_values}, [mstype.int32], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length}, [mstype.int32], self.name)
return inputs, inputs
class CTCGreedyDecoder(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, merge_repeated=True):
self.merge_repeated = validator.check_value_type("merge_repeated", merge_repeated, [bool], self.name)
def infer_shape(self, inputs_shape, sequence_length_shape):
validator.check_integer("inputs rank", len(inputs_shape), 3, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length_shape), 1, Rel.EQ, self.name)
validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',
sequence_length_shape[0], Rel.EQ, self.name)
total_decoded_outputs = -1
decoded_indices_shape = [total_decoded_outputs, 2]
decoded_values = [total_decoded_outputs]
decoded_shape = [2]
log_probability_shape = [inputs_shape[1], 1]
return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape
def infer_dtype(self, inputs_dtype, sequence_length_dtype):
validator.check_tensor_type_same({"inputs_dtype": inputs_dtype}, [mstype.float32, mstype.double], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length_dtype}, [mstype.int32], self.name)
decoded_type = mstype.tensor_type(mstype.int64)
return decoded_type, decoded_type, decoded_type, inputs_dtype
class BasicLSTMCell(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.state_is_tuple = validator.check_value_type("state_is_tuple", state_is_tuple, [bool], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
self.add_prim_attr("io_format", "ND")
def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 2, Rel.EQ, self.name)
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 2, Rel.EQ, self.name)
validator.check_integer("b rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("x_shape[0]", x_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[0]", c_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[1]", c_shape[1], "h_shape[1]", h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[1]", w_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[0]", w_shape[0], "x_shape[1]+h_shape[1]", x_shape[1] + h_shape[1], Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
ct_shape = c_shape
ht_shape = c_shape
it_shape = c_shape
jt_shape = c_shape
ft_shape = c_shape
ot_shape = c_shape
tanhct_shape = c_shape
return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):
validator.check_tensor_type_same({"x_dtype": x_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"h_dtype": h_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, [mstype.float16, mstype.float32], self.name)
args = {"c_dtype": c_dtype, "b_dtype": b_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)
class InTopK(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, k):
self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])
validator.check_value_type("k", k, [int], self.name)
def infer_dtype(self, x1_dtype, x2_dtype):
validator.check_tensor_type_same({"x1": x1_dtype}, (mstype.float16, mstype.float32,), self.name)
validator.check_tensor_type_same({"x2": x2_dtype}, (mstype.int32,), self.name)
return mstype.tensor_type(mstype.bool_)
def infer_shape(self, x1_shape, x2_shape):
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
return x2_shape
class LRN(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
self.init_prim_io_names(inputs=['x'], outputs=['y'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
validator.check_value_type("norm_region", norm_region, [str], self.name)
validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)
validator.check_integer("depth_radius", depth_radius, 0, Rel.GE, self.name)
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32,), self.name)
return x_dtype
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ, self.name)
return x_shape
class CTCLossV2(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):
validator.check_tensor_type_same({"input": input_dtype}, (mstype.float32,), self.name)
validator.check_tensor_type_same({"labels": labels_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"input_lengths": input_lengths_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"target_lengths": label_lengths_dtype}, (mstype.int32,), self.name)
return mstype.float32, mstype.float32
def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):
validator.check_integer("input shape", len(input_shape), 3, Rel.EQ, self.name)
validator.check_number_range("labels shape", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)
validator.check_integer("input lengths shape", len(input_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("label lengths shape", len(label_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)
return (input_shape[1],), input_shape
| true | true |
f72a8af8ff8184287d32ace234a39d44ce65d605 | 25,935 | py | Python | lib/kb_das_tool/Utils/DASToolUtil.py | n1mus/kb_das_tool | e19f2c68aa24a93eec95a2dcbb6d662d7c088dcc | [
"MIT"
] | null | null | null | lib/kb_das_tool/Utils/DASToolUtil.py | n1mus/kb_das_tool | e19f2c68aa24a93eec95a2dcbb6d662d7c088dcc | [
"MIT"
] | null | null | null | lib/kb_das_tool/Utils/DASToolUtil.py | n1mus/kb_das_tool | e19f2c68aa24a93eec95a2dcbb6d662d7c088dcc | [
"MIT"
] | null | null | null | import errno
import json
import os
import subprocess
import sys
import time
import uuid
import zipfile
import shutil
from Bio import SeqIO
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.MetagenomeUtilsClient import MetagenomeUtils
from installed_clients.ReadsUtilsClient import ReadsUtils
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class DASToolUtil:
DASTOOL_THREADS=2
BINNER_RESULT_DIRECTORY = 'das_tool_output_dir'
BINNER_BIN_RESULT_DIR = 'das_tool_output_dir_DASTool_bins'
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.shock_url = config['shock-url']
self.ws_url = config['workspace-url']
self.dfu = DataFileUtil(self.callback_url)
self.ru = ReadsUtils(self.callback_url)
self.au = AssemblyUtil(self.callback_url)
self.mgu = MetagenomeUtils(self.callback_url)
def validate_run_das_tool_params(self, params):
"""
validate_run_concoct_params:
validates params passed to run_concoct method
"""
log('Start validating run_kb_das_tool params')
# check for required parameters
for p in ['assembly_ref', 'input_binned_contig_names', 'output_binned_contig_name', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def mkdir_p(self, path):
"""
mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def run_command(self, command):
"""
run_command: run command and print result
"""
#os.chdir(self.scratch)
log('Start executing command:\n{}'.format(command))
log('Command is running from:\n{}'.format(self.scratch))
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output,stderr = pipe.communicate()
exitCode = pipe.returncode
if (exitCode == 0):
log('Executed command:\n{}\n'.format(command) +
'Exit Code: {}\n'.format(exitCode))
else:
error_msg = 'Error running command:\n{}\n'.format(command)
error_msg += 'Exit Code: {}\nOutput:\n{}\nStderr:\n{}'.format(exitCode, output, stderr)
raise ValueError(error_msg)
sys.exit(1)
return (output,stderr)
def get_contig_file(self, assembly_ref):
"""
get_contig_file: get contif file from GenomeAssembly object
"""
contig_file = self.au.get_assembly_as_fasta({'ref': assembly_ref}).get('path')
sys.stdout.flush()
contig_file = self.dfu.unpack_file({'file_path': contig_file})['file_path']
return contig_file
def retrieve_and_clean_assembly(self, task_params):
if os.path.exists(task_params['contig_file_path']):
assembly = task_params['contig_file_path']
print("FOUND ASSEMBLY ON LOCAL SCRATCH")
else:
# we are on njsw so lets copy it over to scratch
assembly = self.get_contig_file(task_params['assembly_ref'])
# remove spaces from fasta headers because that breaks bedtools
assembly_clean = os.path.abspath(assembly).split('.fa')[0] + "_clean.fa"
command = '/bin/bash reformat.sh in={} out={} addunderscore'.format(assembly,assembly_clean)
log('running reformat command: {}'.format(command))
out,err = self.run_command(command)
return assembly_clean
def generate_output_file_list(self, result_directory):
"""
generate_output_file_list: zip result files and generate file_links for report
"""
log('Start packing result files')
output_files = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file = os.path.join(output_directory, 'das_tool_result.zip')
report_file = None
with zipfile.ZipFile(result_file, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
# grab all files we want to zip
for dirname, subdirs, files in os.walk(result_directory):
for file in files:
if (file.endswith('.sam') or
file.endswith('.bam') or
file.endswith('.bai') or
file.endswith('.summary')):
continue
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
continue
zip_file.write(os.path.join(dirname, file), file)
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
baseDir = os.path.basename(dirname)
for file in files:
full = os.path.join(dirname, file)
zip_file.write(full, os.path.join(baseDir, file))
output_files.append({'path': result_file,
'name': os.path.basename(result_file),
'label': os.path.basename(result_file),
'description': 'Files generated by kb_das_tool App'})
return output_files
def generate_html_report(self, result_directory, assembly_ref, binned_contig_obj_ref):
"""
generate_html_report: generate html summary report
"""
log('Start generating html report')
#html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'report.html')
# get summary data from existing assembly object and bins_objects
Summary_Table_Content = ''
Overview_Content = ''
(binned_contig_count, input_contig_count,
total_bins_count) = self.generate_overview_info(assembly_ref,
binned_contig_obj_ref,
result_directory)
# get pdfs
pdf_filename_l = [f for f in os.listdir(self.BINNER_RESULT_DIRECTORY) if f.endswith('.pdf')]
assert len(pdf_filename_l) == 2
Overview_Content += '<p>Binned contigs: {}</p>'.format(binned_contig_count)
Overview_Content += '<p>Input contigs: {}</p>'.format(input_contig_count)
Overview_Content += '<p>Number of bins: {}</p>'.format(total_bins_count)
for pdf_filename in pdf_filename_l:
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
Overview_Content)
report_template = report_template.replace('Summary_Table_Content',
Summary_Table_Content)
result_file.write(report_template)
# copy pdfs into html dir
for pdf_filename in pdf_filename_l:
shutil.copyfile(os.path.join(self.BINNER_RESULT_DIRECTORY, pdf_filename), os.path.join(output_directory, pdf_filename))
# save html dir to shock
def dir_to_shock(dir_path, name, description):
'''
For regular directories or html directories
name - for regular directories: the name of the flat (zip) file returned to ui
for html directories: the name of the html file
'''
dfu_fileToShock_ret = self.dfu.file_to_shock({
'file_path': dir_path,
'make_handle': 0,
'pack': 'zip',
})
dir_shockInfo = {
'shock_id': dfu_fileToShock_ret['shock_id'],
'name': name,
'description': description
}
return dir_shockInfo
html_shockInfo = dir_to_shock(output_directory, 'report.html', 'Report html for DAS tool')
"""
html_report.append({'path': result_file_path,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for kb_concoct App'})
return html_report
"""
return [html_shockInfo]
def generate_overview_info(self, assembly_ref, binned_contig_obj_ref, result_directory):
"""
_generate_overview_info: generate overview information from assembly and binnedcontig
"""
# get assembly and binned_contig objects that already have some data populated in them
assembly = self.dfu.get_objects({'object_refs': [assembly_ref]})['data'][0]
binned_contig = self.dfu.get_objects({'object_refs': [binned_contig_obj_ref]})['data'][0]
input_contig_count = assembly.get('data').get('num_contigs')
bins_directory = os.path.join(self.scratch, result_directory, self.BINNER_BIN_RESULT_DIR)
binned_contig_count = 0
total_bins_count = 0
total_bins = binned_contig.get('data').get('bins')
total_bins_count = len(total_bins)
for bin in total_bins:
binned_contig_count += len(bin.get('contigs'))
return (binned_contig_count, input_contig_count, total_bins_count)
def generate_report(self, binned_contig_obj_ref, params):
"""
generate_report: generate summary report
"""
log('Generating report')
params['result_directory'] = self.BINNER_RESULT_DIRECTORY
output_files = self.generate_output_file_list(params['result_directory'])
output_html_files = self.generate_html_report(params['result_directory'],
params['assembly_ref'],
binned_contig_obj_ref)
report_params = {
'message': '',
'workspace_name': params.get('workspace_name'),
'file_links': output_files,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 266,
'report_object_name': 'kb_das_tool_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def rename_and_standardize_bin_names(self):
"""
generate_command: generate renamed bins
"""
log("\n\nRunning rename_and_standardize_bin_names")
path_to_result_bins = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, "das_tool_output_dir_DASTool_bins")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fa'):
os.rename(os.path.abspath(path_to_result_bins) + '/' +
file, os.path.abspath(path_to_result_bins) + '/bin.' +
file.split('.')[-2].zfill(3) + '.fasta') # need to change to 4 digits
def make_binned_contig_summary_file_for_binning_apps(self, task_params):
"""
generate_command: generate binned contig summary command
"""
log("\n\nRunning make_binned_contig_summary_file_for_binning_apps")
result_directory = task_params['result_directory']
path_to_result_bins = '{}/{}/'.format(result_directory, task_params['bin_result_directory'])
path_to_summary_file = path_to_result_bins + 'binned_contig.summary'
with open(path_to_summary_file, 'w+') as f:
f.write("Bin name\tCompleteness\tGenome size\tGC content\n")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fasta'):
genome_bin_fna_file = os.path.join(path_to_result_bins, file)
bbstats_output_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY,
genome_bin_fna_file).split('.fasta')[0] + ".bbstatsout"
bbstats_output = self.generate_stats_for_genome_bins(task_params,
genome_bin_fna_file,
bbstats_output_file)
f.write('{}\t0\t{}\t{}\n'.format(genome_bin_fna_file.split("/")[-1],
bbstats_output['contig_bp'],
bbstats_output['gc_avg']))
f.close()
log('Finished make_binned_contig_summary_file_for_binning_apps function')
#
# def make_binned_contig_summary_file_for_binning_apps(self, task_params):
# """
# generate_command: generate binned contig summary command
# """
# log("\n\nRunning make_binned_contig_summary_file_for_binning_apps")
# path_to_result = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, "das_tool_output_dir_DASTool_bins")
# path_to_summary_file = path_to_result + '/binned_contig.summary'
# with open(path_to_summary_file, 'w+') as f:
# f.write("Bin name\tCompleteness\tGenome size\tGC content\n")
# for dirname, subdirs, files in os.walk(path_to_result):
# for file in files:
# if file.endswith('.fasta'):
# genome_bin_fna_file = os.path.join(path_to_result, file)
# bbstats_output_file = os.path.join(path_to_result,
# genome_bin_fna_file).split('.fasta')[0] + ".bbstatsout"
# bbstats_output = self.generate_stats_for_genome_bins(task_params,
# genome_bin_fna_file,
# bbstats_output_file)
# f.write('{}\t0\t{}\t{}\n'.format(genome_bin_fna_file.split("/")[-1],
# bbstats_output['contig_bp'],
# bbstats_output['gc_avg']))
# f.close()
# log('Finished make_binned_contig_summary_file_for_binning_apps function')
#
def generate_stats_for_genome_bins(self, task_params, genome_bin_fna_file, bbstats_output_file):
"""
generate_command: bbtools stats.sh command
"""
log("running generate_stats_for_genome_bins on {}".format(genome_bin_fna_file))
genome_bin_fna_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, genome_bin_fna_file)
command = '/bin/bash stats.sh in={} format=3 > {}'.format(genome_bin_fna_file, bbstats_output_file)
self.run_command(command)
bbstats_output = open(bbstats_output_file, 'r').readlines()[1]
n_scaffolds = bbstats_output.split('\t')[0]
n_contigs = bbstats_output.split('\t')[1]
scaf_bp = bbstats_output.split('\t')[2]
contig_bp = bbstats_output.split('\t')[3]
gap_pct = bbstats_output.split('\t')[4]
scaf_N50 = bbstats_output.split('\t')[5]
scaf_L50 = bbstats_output.split('\t')[6]
ctg_N50 = bbstats_output.split('\t')[7]
ctg_L50 = bbstats_output.split('\t')[8]
scaf_N90 = bbstats_output.split('\t')[9]
scaf_L90 = bbstats_output.split('\t')[10]
ctg_N90 = bbstats_output.split('\t')[11]
ctg_L90 = bbstats_output.split('\t')[12]
scaf_max = bbstats_output.split('\t')[13]
ctg_max = bbstats_output.split('\t')[14]
scaf_n_gt50K = bbstats_output.split('\t')[15]
scaf_pct_gt50K = bbstats_output.split('\t')[16]
gc_avg = float(bbstats_output.split('\t')[17]) * 100 # need to figure out if correct
gc_std = float(bbstats_output.split('\t')[18]) * 100 # need to figure out if correct
log('Generated generate_stats_for_genome_bins command: {}'.format(command))
return {'n_scaffolds': n_scaffolds,
'n_contigs': n_contigs,
'scaf_bp': scaf_bp,
'contig_bp': contig_bp,
'gap_pct': gap_pct,
'scaf_N50': scaf_N50,
'scaf_L50': scaf_L50,
'ctg_N50': ctg_N50,
'ctg_L50': ctg_L50,
'scaf_N90': scaf_N90,
'scaf_L90': scaf_L90,
'ctg_N90': ctg_N90,
'ctg_L90': ctg_L90,
'scaf_max': scaf_max,
'ctg_max': ctg_max,
'scaf_n_gt50K': scaf_n_gt50K,
'scaf_pct_gt50K': scaf_pct_gt50K,
'gc_avg': gc_avg,
'gc_std': gc_std
}
def generate_das_tool_input_files_and_commands_from_binned_contigs(self, params):
#params['binned_contig_list_file'] = binned_contig_list_file
binned_contig_names = params['input_binned_contig_names']
trimmed_binned_contig_name_list = []
contig_to_bin_file_name_list = []
for input_ref in binned_contig_names:
# next line needed for testing
# binned_contig = self.dfu.get_objects({'object_refs': [input_ref['binned_contig_obj_ref']]})['data'][0]
# next line needed in production only
binned_contig = self.dfu.get_objects({'object_refs': [input_ref]})['data'][0]
binned_contig_name = binned_contig.get('info')[1]
binned_contig_data = binned_contig.get('data')
bins = binned_contig_data.get('bins')
trimmed_binned_contig_name = binned_contig_name.split(".BinnedContig")[0]
trimmed_binned_contig_name_list.append(trimmed_binned_contig_name)
contig_to_bin_file_name = "{}_contigs_to_bins.tsv".format(trimmed_binned_contig_name)
contig_to_bin_file_name_list.append(contig_to_bin_file_name)
f = open(contig_to_bin_file_name, "w+")
for bin in bins:
bin_id = bin.get('bid')
trimmed_bin_id = bin_id.split(".fasta")[0]
contigs = bin.get('contigs')
for contig_id, contig_value in contigs.items():
f.write("{}\t{}.{}\n".format(contig_id, trimmed_binned_contig_name, trimmed_bin_id))
f.close()
#contig_to_bin_file_name_list = self.BINNER_RESULT_DIRECTORY + contig_to_bin_file_name
# temp = str(self.BINNER_RESULT_DIRECTORY) + '/'
# contig_to_bin_file_name_list = [temp + s for s in contig_to_bin_file_name_list]
return (trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
def generate_das_tool_command(self, params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list):
"""
generate_command: generate concoct params
"""
print("\n\nRunning generate_das_tool_command")
command = 'DAS_Tool '
command += '-i {} '.format(contig_to_bin_file_name_list)
command += '-l {} '.format(trimmed_binned_contig_name_list)
command += '-c {} '.format(params.get('contig_file_path'))
command += '-o {} '.format(self.BINNER_RESULT_DIRECTORY)
command += '--search_engine {} '.format(params.get('search_engine'))
command += '--score_threshold {} '.format(params.get('score_threshold'))
command += '--duplicate_penalty {} '.format(params.get('duplicate_penalty'))
command += '--megabin_penalty {} '.format(params.get('megabin_penalty'))
command += '--write_bin_evals {} '.format(params.get('write_bin_evals'))
command += '--create_plots {} '.format(params.get('create_plots'))
command += '--write_bins 1 '
command += '--write_unbinned 0 '
command += '-t {}'.format(self.DASTOOL_THREADS)
log('Generated das_tool command: {}'.format(command))
return command
def run_das_tool(self, params):
"""
run_das_tool: DAS_Tool app
required params:
assembly_ref: Metagenome assembly object reference
input_binned_contig_names: list of BinnedContig objects
output_binned_contig_name: output BinnedContig object name
workspace_name: the name of the workspace it gets saved to.
optional params:
search_engine; default diamond
score_threshold; default 0.5
duplicate_penalty; default 0.6
megabin_penalty; default 0.5
write_bin_evals; default 1
create_plots; default 1
write_bins; default 1
write_unbinned; default 0
ref: https://github.com/cmks/DAS_Tool
"""
log('--->\nrunning DASToolUtil.run_das_tool\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self.validate_run_das_tool_params(params)
print("\n\nFinished running validate_run_das_tool_params")
#
contig_file = self.get_contig_file(params.get('assembly_ref'))
params['contig_file_path'] = contig_file
result_directory = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY)
params['result_directory'] = result_directory
self.mkdir_p(result_directory)
cwd = os.getcwd()
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
(trimmed_binned_contig_name_list, contig_to_bin_file_name_list) = self.generate_das_tool_input_files_and_commands_from_binned_contigs(params)
comma_symbol = ','
trimmed_binned_contig_name_list = comma_symbol.join(trimmed_binned_contig_name_list)
contig_to_bin_file_name_list = comma_symbol.join(contig_to_bin_file_name_list)
log(os.listdir(result_directory))
log("trimmed_binned_contig_name_list {}".format(trimmed_binned_contig_name_list))
log("contig_to_bin_file_name_list {}".format(contig_to_bin_file_name_list))
# binned_contig_to_file_params = {
# 'input_ref': input_ref['binned_contig_obj_ref'],
# 'save_to_shock': 1,
# 'bin_file_directory': '{}/bin_set_{}/'.format(result_directory, i),
# 'workspace_name': params.get('workspace_name'),
# }
#
# self.mgu.binned_contigs_to_file(binned_contig_to_file_params) # returns "binned_contig_obj_ref" of type "obj_ref" (An X/Y/Z style reference)
#shutil.copytree(bin_file_directory, os.path.join(result_directory, bin_file_directory))
#print('\n\n\n result: {}'.format(self.mgu.binned_contigs_to_file(binned_contig_to_file_params)))
#run concoct
command = self.generate_das_tool_command(params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
log('\nWorking dir is {}'.format(result_directory))
log('\nWorking dir is {}'.format(os.getcwd()))
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
self.run_command(command)
self.rename_and_standardize_bin_names()
os.chdir(self.scratch)
task_params = {}
task_params['result_directory'] = os.path.join(self.scratch)
task_params['bin_result_directory'] = os.path.join(self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins")
# check to make sure bins were generated, otherwise no need to run the rest
if not os.path.exists(task_params['bin_result_directory']):
raise AssertionError('No bins produced - skipping the creation of a new BinnedContig object')
self.make_binned_contig_summary_file_for_binning_apps(task_params)
generate_binned_contig_param = {
'file_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins"),
'assembly_ref': params.get('assembly_ref'),
'binned_contig_name': params.get('output_binned_contig_name'),
'workspace_name': params.get('workspace_name')
}
binned_contig_obj_ref = self.mgu.file_to_binned_contigs(
generate_binned_contig_param).get('binned_contig_obj_ref')
reportVal = self.generate_report(binned_contig_obj_ref, params)
returnVal = {
'result_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY),
'binned_contig_obj_ref': binned_contig_obj_ref
}
returnVal.update(reportVal)
return returnVal
| 44.409247 | 154 | 0.609794 | import errno
import json
import os
import subprocess
import sys
import time
import uuid
import zipfile
import shutil
from Bio import SeqIO
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.MetagenomeUtilsClient import MetagenomeUtils
from installed_clients.ReadsUtilsClient import ReadsUtils
def log(message, prefix_newline=False):
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class DASToolUtil:
DASTOOL_THREADS=2
BINNER_RESULT_DIRECTORY = 'das_tool_output_dir'
BINNER_BIN_RESULT_DIR = 'das_tool_output_dir_DASTool_bins'
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.shock_url = config['shock-url']
self.ws_url = config['workspace-url']
self.dfu = DataFileUtil(self.callback_url)
self.ru = ReadsUtils(self.callback_url)
self.au = AssemblyUtil(self.callback_url)
self.mgu = MetagenomeUtils(self.callback_url)
def validate_run_das_tool_params(self, params):
log('Start validating run_kb_das_tool params')
for p in ['assembly_ref', 'input_binned_contig_names', 'output_binned_contig_name', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def mkdir_p(self, path):
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def run_command(self, command):
log('Start executing command:\n{}'.format(command))
log('Command is running from:\n{}'.format(self.scratch))
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output,stderr = pipe.communicate()
exitCode = pipe.returncode
if (exitCode == 0):
log('Executed command:\n{}\n'.format(command) +
'Exit Code: {}\n'.format(exitCode))
else:
error_msg = 'Error running command:\n{}\n'.format(command)
error_msg += 'Exit Code: {}\nOutput:\n{}\nStderr:\n{}'.format(exitCode, output, stderr)
raise ValueError(error_msg)
sys.exit(1)
return (output,stderr)
def get_contig_file(self, assembly_ref):
contig_file = self.au.get_assembly_as_fasta({'ref': assembly_ref}).get('path')
sys.stdout.flush()
contig_file = self.dfu.unpack_file({'file_path': contig_file})['file_path']
return contig_file
def retrieve_and_clean_assembly(self, task_params):
if os.path.exists(task_params['contig_file_path']):
assembly = task_params['contig_file_path']
print("FOUND ASSEMBLY ON LOCAL SCRATCH")
else:
assembly = self.get_contig_file(task_params['assembly_ref'])
assembly_clean = os.path.abspath(assembly).split('.fa')[0] + "_clean.fa"
command = '/bin/bash reformat.sh in={} out={} addunderscore'.format(assembly,assembly_clean)
log('running reformat command: {}'.format(command))
out,err = self.run_command(command)
return assembly_clean
def generate_output_file_list(self, result_directory):
log('Start packing result files')
output_files = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file = os.path.join(output_directory, 'das_tool_result.zip')
report_file = None
with zipfile.ZipFile(result_file, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
for dirname, subdirs, files in os.walk(result_directory):
for file in files:
if (file.endswith('.sam') or
file.endswith('.bam') or
file.endswith('.bai') or
file.endswith('.summary')):
continue
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
continue
zip_file.write(os.path.join(dirname, file), file)
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
baseDir = os.path.basename(dirname)
for file in files:
full = os.path.join(dirname, file)
zip_file.write(full, os.path.join(baseDir, file))
output_files.append({'path': result_file,
'name': os.path.basename(result_file),
'label': os.path.basename(result_file),
'description': 'Files generated by kb_das_tool App'})
return output_files
def generate_html_report(self, result_directory, assembly_ref, binned_contig_obj_ref):
log('Start generating html report')
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'report.html')
Summary_Table_Content = ''
Overview_Content = ''
(binned_contig_count, input_contig_count,
total_bins_count) = self.generate_overview_info(assembly_ref,
binned_contig_obj_ref,
result_directory)
pdf_filename_l = [f for f in os.listdir(self.BINNER_RESULT_DIRECTORY) if f.endswith('.pdf')]
assert len(pdf_filename_l) == 2
Overview_Content += '<p>Binned contigs: {}</p>'.format(binned_contig_count)
Overview_Content += '<p>Input contigs: {}</p>'.format(input_contig_count)
Overview_Content += '<p>Number of bins: {}</p>'.format(total_bins_count)
for pdf_filename in pdf_filename_l:
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
Overview_Content)
report_template = report_template.replace('Summary_Table_Content',
Summary_Table_Content)
result_file.write(report_template)
for pdf_filename in pdf_filename_l:
shutil.copyfile(os.path.join(self.BINNER_RESULT_DIRECTORY, pdf_filename), os.path.join(output_directory, pdf_filename))
def dir_to_shock(dir_path, name, description):
dfu_fileToShock_ret = self.dfu.file_to_shock({
'file_path': dir_path,
'make_handle': 0,
'pack': 'zip',
})
dir_shockInfo = {
'shock_id': dfu_fileToShock_ret['shock_id'],
'name': name,
'description': description
}
return dir_shockInfo
html_shockInfo = dir_to_shock(output_directory, 'report.html', 'Report html for DAS tool')
return [html_shockInfo]
def generate_overview_info(self, assembly_ref, binned_contig_obj_ref, result_directory):
assembly = self.dfu.get_objects({'object_refs': [assembly_ref]})['data'][0]
binned_contig = self.dfu.get_objects({'object_refs': [binned_contig_obj_ref]})['data'][0]
input_contig_count = assembly.get('data').get('num_contigs')
bins_directory = os.path.join(self.scratch, result_directory, self.BINNER_BIN_RESULT_DIR)
binned_contig_count = 0
total_bins_count = 0
total_bins = binned_contig.get('data').get('bins')
total_bins_count = len(total_bins)
for bin in total_bins:
binned_contig_count += len(bin.get('contigs'))
return (binned_contig_count, input_contig_count, total_bins_count)
def generate_report(self, binned_contig_obj_ref, params):
log('Generating report')
params['result_directory'] = self.BINNER_RESULT_DIRECTORY
output_files = self.generate_output_file_list(params['result_directory'])
output_html_files = self.generate_html_report(params['result_directory'],
params['assembly_ref'],
binned_contig_obj_ref)
report_params = {
'message': '',
'workspace_name': params.get('workspace_name'),
'file_links': output_files,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 266,
'report_object_name': 'kb_das_tool_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def rename_and_standardize_bin_names(self):
log("\n\nRunning rename_and_standardize_bin_names")
path_to_result_bins = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, "das_tool_output_dir_DASTool_bins")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fa'):
os.rename(os.path.abspath(path_to_result_bins) + '/' +
file, os.path.abspath(path_to_result_bins) + '/bin.' +
file.split('.')[-2].zfill(3) + '.fasta')
def make_binned_contig_summary_file_for_binning_apps(self, task_params):
log("\n\nRunning make_binned_contig_summary_file_for_binning_apps")
result_directory = task_params['result_directory']
path_to_result_bins = '{}/{}/'.format(result_directory, task_params['bin_result_directory'])
path_to_summary_file = path_to_result_bins + 'binned_contig.summary'
with open(path_to_summary_file, 'w+') as f:
f.write("Bin name\tCompleteness\tGenome size\tGC content\n")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fasta'):
genome_bin_fna_file = os.path.join(path_to_result_bins, file)
bbstats_output_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY,
genome_bin_fna_file).split('.fasta')[0] + ".bbstatsout"
bbstats_output = self.generate_stats_for_genome_bins(task_params,
genome_bin_fna_file,
bbstats_output_file)
f.write('{}\t0\t{}\t{}\n'.format(genome_bin_fna_file.split("/")[-1],
bbstats_output['contig_bp'],
bbstats_output['gc_avg']))
f.close()
log('Finished make_binned_contig_summary_file_for_binning_apps function')
# generate_command: generate binned contig summary command
# """
def generate_stats_for_genome_bins(self, task_params, genome_bin_fna_file, bbstats_output_file):
log("running generate_stats_for_genome_bins on {}".format(genome_bin_fna_file))
genome_bin_fna_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, genome_bin_fna_file)
command = '/bin/bash stats.sh in={} format=3 > {}'.format(genome_bin_fna_file, bbstats_output_file)
self.run_command(command)
bbstats_output = open(bbstats_output_file, 'r').readlines()[1]
n_scaffolds = bbstats_output.split('\t')[0]
n_contigs = bbstats_output.split('\t')[1]
scaf_bp = bbstats_output.split('\t')[2]
contig_bp = bbstats_output.split('\t')[3]
gap_pct = bbstats_output.split('\t')[4]
scaf_N50 = bbstats_output.split('\t')[5]
scaf_L50 = bbstats_output.split('\t')[6]
ctg_N50 = bbstats_output.split('\t')[7]
ctg_L50 = bbstats_output.split('\t')[8]
scaf_N90 = bbstats_output.split('\t')[9]
scaf_L90 = bbstats_output.split('\t')[10]
ctg_N90 = bbstats_output.split('\t')[11]
ctg_L90 = bbstats_output.split('\t')[12]
scaf_max = bbstats_output.split('\t')[13]
ctg_max = bbstats_output.split('\t')[14]
scaf_n_gt50K = bbstats_output.split('\t')[15]
scaf_pct_gt50K = bbstats_output.split('\t')[16]
gc_avg = float(bbstats_output.split('\t')[17]) * 100
gc_std = float(bbstats_output.split('\t')[18]) * 100
log('Generated generate_stats_for_genome_bins command: {}'.format(command))
return {'n_scaffolds': n_scaffolds,
'n_contigs': n_contigs,
'scaf_bp': scaf_bp,
'contig_bp': contig_bp,
'gap_pct': gap_pct,
'scaf_N50': scaf_N50,
'scaf_L50': scaf_L50,
'ctg_N50': ctg_N50,
'ctg_L50': ctg_L50,
'scaf_N90': scaf_N90,
'scaf_L90': scaf_L90,
'ctg_N90': ctg_N90,
'ctg_L90': ctg_L90,
'scaf_max': scaf_max,
'ctg_max': ctg_max,
'scaf_n_gt50K': scaf_n_gt50K,
'scaf_pct_gt50K': scaf_pct_gt50K,
'gc_avg': gc_avg,
'gc_std': gc_std
}
def generate_das_tool_input_files_and_commands_from_binned_contigs(self, params):
binned_contig_names = params['input_binned_contig_names']
trimmed_binned_contig_name_list = []
contig_to_bin_file_name_list = []
for input_ref in binned_contig_names:
binned_contig = self.dfu.get_objects({'object_refs': [input_ref]})['data'][0]
binned_contig_name = binned_contig.get('info')[1]
binned_contig_data = binned_contig.get('data')
bins = binned_contig_data.get('bins')
trimmed_binned_contig_name = binned_contig_name.split(".BinnedContig")[0]
trimmed_binned_contig_name_list.append(trimmed_binned_contig_name)
contig_to_bin_file_name = "{}_contigs_to_bins.tsv".format(trimmed_binned_contig_name)
contig_to_bin_file_name_list.append(contig_to_bin_file_name)
f = open(contig_to_bin_file_name, "w+")
for bin in bins:
bin_id = bin.get('bid')
trimmed_bin_id = bin_id.split(".fasta")[0]
contigs = bin.get('contigs')
for contig_id, contig_value in contigs.items():
f.write("{}\t{}.{}\n".format(contig_id, trimmed_binned_contig_name, trimmed_bin_id))
f.close()
return (trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
def generate_das_tool_command(self, params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list):
print("\n\nRunning generate_das_tool_command")
command = 'DAS_Tool '
command += '-i {} '.format(contig_to_bin_file_name_list)
command += '-l {} '.format(trimmed_binned_contig_name_list)
command += '-c {} '.format(params.get('contig_file_path'))
command += '-o {} '.format(self.BINNER_RESULT_DIRECTORY)
command += '--search_engine {} '.format(params.get('search_engine'))
command += '--score_threshold {} '.format(params.get('score_threshold'))
command += '--duplicate_penalty {} '.format(params.get('duplicate_penalty'))
command += '--megabin_penalty {} '.format(params.get('megabin_penalty'))
command += '--write_bin_evals {} '.format(params.get('write_bin_evals'))
command += '--create_plots {} '.format(params.get('create_plots'))
command += '--write_bins 1 '
command += '--write_unbinned 0 '
command += '-t {}'.format(self.DASTOOL_THREADS)
log('Generated das_tool command: {}'.format(command))
return command
def run_das_tool(self, params):
log('--->\nrunning DASToolUtil.run_das_tool\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self.validate_run_das_tool_params(params)
print("\n\nFinished running validate_run_das_tool_params")
contig_file = self.get_contig_file(params.get('assembly_ref'))
params['contig_file_path'] = contig_file
result_directory = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY)
params['result_directory'] = result_directory
self.mkdir_p(result_directory)
cwd = os.getcwd()
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
(trimmed_binned_contig_name_list, contig_to_bin_file_name_list) = self.generate_das_tool_input_files_and_commands_from_binned_contigs(params)
comma_symbol = ','
trimmed_binned_contig_name_list = comma_symbol.join(trimmed_binned_contig_name_list)
contig_to_bin_file_name_list = comma_symbol.join(contig_to_bin_file_name_list)
log(os.listdir(result_directory))
log("trimmed_binned_contig_name_list {}".format(trimmed_binned_contig_name_list))
log("contig_to_bin_file_name_list {}".format(contig_to_bin_file_name_list))
l_command(params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
log('\nWorking dir is {}'.format(result_directory))
log('\nWorking dir is {}'.format(os.getcwd()))
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
self.run_command(command)
self.rename_and_standardize_bin_names()
os.chdir(self.scratch)
task_params = {}
task_params['result_directory'] = os.path.join(self.scratch)
task_params['bin_result_directory'] = os.path.join(self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins")
if not os.path.exists(task_params['bin_result_directory']):
raise AssertionError('No bins produced - skipping the creation of a new BinnedContig object')
self.make_binned_contig_summary_file_for_binning_apps(task_params)
generate_binned_contig_param = {
'file_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins"),
'assembly_ref': params.get('assembly_ref'),
'binned_contig_name': params.get('output_binned_contig_name'),
'workspace_name': params.get('workspace_name')
}
binned_contig_obj_ref = self.mgu.file_to_binned_contigs(
generate_binned_contig_param).get('binned_contig_obj_ref')
reportVal = self.generate_report(binned_contig_obj_ref, params)
returnVal = {
'result_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY),
'binned_contig_obj_ref': binned_contig_obj_ref
}
returnVal.update(reportVal)
return returnVal
| true | true |
f72a8b1d41dcd8162bc15ea1ac9f0f974c941910 | 801 | py | Python | venv/Scripts/f2py.py | nfuster2017/AmazonWebCrawler | d45e2dec826b5cadd632ed8a94c2c4c127430000 | [
"MIT"
] | 1 | 2019-07-28T05:32:10.000Z | 2019-07-28T05:32:10.000Z | venv/Scripts/f2py.py | nfuster2017/AmazonWebCrawler | d45e2dec826b5cadd632ed8a94c2c4c127430000 | [
"MIT"
] | 4 | 2021-06-08T20:08:26.000Z | 2022-03-11T23:54:16.000Z | venv/Scripts/f2py.py | nfuster2017/AmazonWebCrawler | d45e2dec826b5cadd632ed8a94c2c4c127430000 | [
"MIT"
] | null | null | null | #!D:\School\UMD\INST326\Group Project\venv\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| 27.62069 | 67 | 0.645443 |
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| true | true |
f72a8d3c2c03ab88dbb2873eded7eee4dbec4655 | 1,951 | py | Python | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | 1 | 2021-05-27T07:59:17.000Z | 2021-05-27T07:59:17.000Z | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | null | null | null | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | null | null | null | import unittest
from python_oop.testing.exercise.vehicle.project.vehicle import Vehicle
# from project.vehicle import Vehicle
class VehicleTest(unittest.TestCase):
def setUp(self):
self.vehicle = Vehicle(50.0, 300.0)
def test_vehicle__init_method(self):
self.assertEqual(50.0, self.vehicle.fuel)
self.assertEqual(50.0, self.vehicle.capacity)
self.assertEqual(300.0, self.vehicle.horse_power)
self.assertEqual(self.vehicle.DEFAULT_FUEL_CONSUMPTION, self.vehicle.fuel_consumption)
def test_vehicle__fuel_capacity_if_fuel_changed(self):
self.assertEqual(50.0, self.vehicle.capacity)
self.vehicle.fuel = 20.0
self.assertEqual(50.0, self.vehicle.capacity)
def test_vehicle__str_method(self):
expected_result = f"The vehicle has {self.vehicle.horse_power} " \
f"horse power with {self.vehicle.fuel} fuel left and {self.vehicle.fuel_consumption} fuel consumption"
actual_result = self.vehicle.__str__()
self.assertEqual(expected_result, actual_result)
def test_vehicle__drive_method_success(self):
self.vehicle.drive(5)
self.assertEqual(43.75, self.vehicle.fuel)
def test_vehicle__drive_method__expect_exception(self):
expected_result = "Not enough fuel"
with self.assertRaises(Exception) as context:
self.vehicle.drive(100)
self.assertEqual(expected_result, str(context.exception))
def test_vehicle__refuel_method_success(self):
self.vehicle.drive(5)
self.vehicle.refuel(6.25)
self.assertEqual(50.0, self.vehicle.fuel)
def test_vehicle__refuel_method__expect_exception(self):
expected_result = "Too much fuel"
with self.assertRaises(Exception) as context:
self.vehicle.refuel(100)
self.assertEqual(expected_result, str(context.exception))
if __name__ == '__main__':
unittest.main()
| 36.811321 | 128 | 0.708355 | import unittest
from python_oop.testing.exercise.vehicle.project.vehicle import Vehicle
class VehicleTest(unittest.TestCase):
def setUp(self):
self.vehicle = Vehicle(50.0, 300.0)
def test_vehicle__init_method(self):
self.assertEqual(50.0, self.vehicle.fuel)
self.assertEqual(50.0, self.vehicle.capacity)
self.assertEqual(300.0, self.vehicle.horse_power)
self.assertEqual(self.vehicle.DEFAULT_FUEL_CONSUMPTION, self.vehicle.fuel_consumption)
def test_vehicle__fuel_capacity_if_fuel_changed(self):
self.assertEqual(50.0, self.vehicle.capacity)
self.vehicle.fuel = 20.0
self.assertEqual(50.0, self.vehicle.capacity)
def test_vehicle__str_method(self):
expected_result = f"The vehicle has {self.vehicle.horse_power} " \
f"horse power with {self.vehicle.fuel} fuel left and {self.vehicle.fuel_consumption} fuel consumption"
actual_result = self.vehicle.__str__()
self.assertEqual(expected_result, actual_result)
def test_vehicle__drive_method_success(self):
self.vehicle.drive(5)
self.assertEqual(43.75, self.vehicle.fuel)
def test_vehicle__drive_method__expect_exception(self):
expected_result = "Not enough fuel"
with self.assertRaises(Exception) as context:
self.vehicle.drive(100)
self.assertEqual(expected_result, str(context.exception))
def test_vehicle__refuel_method_success(self):
self.vehicle.drive(5)
self.vehicle.refuel(6.25)
self.assertEqual(50.0, self.vehicle.fuel)
def test_vehicle__refuel_method__expect_exception(self):
expected_result = "Too much fuel"
with self.assertRaises(Exception) as context:
self.vehicle.refuel(100)
self.assertEqual(expected_result, str(context.exception))
if __name__ == '__main__':
unittest.main()
| true | true |
f72a8f6331ddd325a61a09e43d5a54e5309a6648 | 1,760 | py | Python | examples/ad_manager/v201911/user_service/get_all_users.py | MattCardoso/googleads-python-lib | 62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201911/user_service/get_all_users.py | MattCardoso/googleads-python-lib | 62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201911/user_service/get_all_users.py | MattCardoso/googleads-python-lib | 62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all users.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201911')
# Create a statement to select users.
statement = ad_manager.StatementBuilder(version='v201911')
# Retrieve a small amount of users at a time, paging
# through until all users have been retrieved.
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
# Print out some information for each user.
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 35.2 | 78 | 0.702841 |
from googleads import ad_manager
def main(client):
user_service = client.GetService('UserService', version='v201911')
statement = ad_manager.StatementBuilder(version='v201911')
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| true | true |
f72a8ffa64b566ce6eec8c0c16ba2850ac0a95f6 | 2,245 | py | Python | examples/lm1b/main.py | mjsML/fast_flax | d982b59b715524884d08d6ed506ab325e8be1ece | [
"Apache-2.0"
] | null | null | null | examples/lm1b/main.py | mjsML/fast_flax | d982b59b715524884d08d6ed506ab325e8be1ece | [
"Apache-2.0"
] | 1 | 2021-08-16T09:16:55.000Z | 2021-08-16T09:16:55.000Z | examples/lm1b/main.py | mjsML/fast_flax | d982b59b715524884d08d6ed506ab325e8be1ece | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for running the Language Modelling example with LM1B.
This file is intentionally kept short. The majority for logic is in libraries
than can be easily tested and imported in Colab.
"""
from absl import app
from absl import flags
from absl import logging
from clu import platform
import train
import jax
from ml_collections import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('workdir', None, 'Directory to store model data.')
config_flags.DEFINE_config_file(
'config',
'configs/default.py',
'File path to the training hyperparameter configuration.',
lock_config=True)
flags.mark_flags_as_required(['config', 'workdir'])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.experimental.set_visible_devices([], 'GPU')
logging.info('JAX process: %d / %d', jax.process_index(), jax.process_count())
logging.info('JAX local devices: %r', jax.local_devices())
# Add a note so that we can tell which task is which JAX host.
# (Depending on the platform task 0 is not guaranteed to be host 0)
platform.work_unit().set_task_status(f'process_index: {jax.process_index()}, '
f'process_count: {jax.process_count()}')
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, 'workdir')
train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
if __name__ == '__main__':
jax.config.parse_flags_with_absl()
app.run(main)
| 34.015152 | 80 | 0.728285 |
from absl import app
from absl import flags
from absl import logging
from clu import platform
import train
import jax
from ml_collections import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('workdir', None, 'Directory to store model data.')
config_flags.DEFINE_config_file(
'config',
'configs/default.py',
'File path to the training hyperparameter configuration.',
lock_config=True)
flags.mark_flags_as_required(['config', 'workdir'])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.config.experimental.set_visible_devices([], 'GPU')
logging.info('JAX process: %d / %d', jax.process_index(), jax.process_count())
logging.info('JAX local devices: %r', jax.local_devices())
platform.work_unit().set_task_status(f'process_index: {jax.process_index()}, '
f'process_count: {jax.process_count()}')
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, 'workdir')
train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
if __name__ == '__main__':
jax.config.parse_flags_with_absl()
app.run(main)
| true | true |
f72a91102600de8d03a0b64c5ee35b9767a86fd4 | 4,373 | py | Python | dash_core_components/RangeSlider.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | dash_core_components/RangeSlider.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | dash_core_components/RangeSlider.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class RangeSlider(Component):
"""A RangeSlider component.
A double slider with two handles.
Used for specifying a range of numerical values.
Keyword arguments:
- id (string; optional)
- marks (optional): Marks on the slider.
The key determines the position,
and the value determines what will show.
If you want to set the style of a specific mark point,
the value should be an object which
contains style and label properties.. marks has the following type: dict containing keys 'number'.
Those keys have the following types:
- number (optional): . number has the following type: string | dict containing keys 'style', 'label'.
Those keys have the following types:
- style (dict; optional)
- label (string; optional)
- value (list; optional): The value of the input
- allowCross (boolean; optional): allowCross could be set as true to allow those handles to cross.
- className (string; optional): Additional CSS class for the root DOM node
- count (number; optional): Determine how many ranges to render, and multiple handles
will be rendered (number + 1).
- disabled (boolean; optional): If true, the handles can't be moved.
- dots (boolean; optional): When the step value is greater than 1,
you can set the dots to true if you want to
render the slider with dots.
- included (boolean; optional): If the value is true, it means a continuous
value is included. Otherwise, it is an independent value.
- min (number; optional): Minimum allowed value of the slider
- max (number; optional): Maximum allowed value of the slider
- pushable (boolean | number; optional): pushable could be set as true to allow pushing of
surrounding handles when moving an handle.
When set to a number, the number will be the
minimum ensured distance between handles.
- step (number; optional): Value by which increments or decrements are made
- vertical (boolean; optional): If true, the slider will be vertical
- updatemode (a value equal to: 'mouseup', 'drag'; optional): Determines when the component should update
its value. If `mouseup`, then the slider
will only trigger its value when the user has
finished dragging the slider. If `drag`, then
the slider will update its value continuously
as it is being dragged.
Only use `drag` if your updates are fast.
- loading_state (optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, marks=Component.UNDEFINED, value=Component.UNDEFINED, allowCross=Component.UNDEFINED, className=Component.UNDEFINED, count=Component.UNDEFINED, disabled=Component.UNDEFINED, dots=Component.UNDEFINED, included=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, pushable=Component.UNDEFINED, step=Component.UNDEFINED, vertical=Component.UNDEFINED, updatemode=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self._type = 'RangeSlider'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(RangeSlider, self).__init__(**args)
| 59.094595 | 490 | 0.732678 |
from dash.development.base_component import Component, _explicitize_args
class RangeSlider(Component):
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, marks=Component.UNDEFINED, value=Component.UNDEFINED, allowCross=Component.UNDEFINED, className=Component.UNDEFINED, count=Component.UNDEFINED, disabled=Component.UNDEFINED, dots=Component.UNDEFINED, included=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, pushable=Component.UNDEFINED, step=Component.UNDEFINED, vertical=Component.UNDEFINED, updatemode=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self._type = 'RangeSlider'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs)
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(RangeSlider, self).__init__(**args)
| true | true |
f72a9127d88cdefba5bdb5fb1446f97866851501 | 1,697 | py | Python | model/optimizer.py | keonlee9420/DiffSinger | 2bfcae4a78068c2061eae64ee675959a077aa54b | [
"MIT"
] | 95 | 2021-06-04T02:22:36.000Z | 2022-03-25T03:19:51.000Z | model/optimizer.py | keonlee9420/DiffSinger | 2bfcae4a78068c2061eae64ee675959a077aa54b | [
"MIT"
] | 3 | 2021-06-23T08:57:00.000Z | 2021-10-14T10:44:43.000Z | model/optimizer.py | keonlee9420/DiffSinger | 2bfcae4a78068c2061eae64ee675959a077aa54b | [
"MIT"
] | 15 | 2021-06-04T03:09:12.000Z | 2022-03-30T08:23:05.000Z | import torch
import numpy as np
class ScheduledOptim:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = train_config["optimizer"]["init_lr"]
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
# print("self.init_lr:", self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
""" Learning rate scheduling per step """
self.current_step += 1
lr = self.init_lr
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
| 32.634615 | 73 | 0.592222 | import torch
import numpy as np
class ScheduledOptim:
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = train_config["optimizer"]["init_lr"]
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
self.current_step += 1
lr = self.init_lr
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
| true | true |
f72a913c6611cb848d4d5714cbbb1562b72dda22 | 14,624 | py | Python | billforward/models/resume_subscription_amendment.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/models/resume_subscription_amendment.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/models/resume_subscription_amendment.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ResumeSubscriptionAmendment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created=None, changed_by=None, updated=None, type=None, id=None, organization_id=None, subscription_id=None, amendment_type=None, actioning_time=None, actioned_time=None, state=None, deleted=False):
"""
ResumeSubscriptionAmendment - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created': 'datetime',
'changed_by': 'str',
'updated': 'datetime',
'type': 'str',
'id': 'str',
'organization_id': 'str',
'subscription_id': 'str',
'amendment_type': 'str',
'actioning_time': 'datetime',
'actioned_time': 'datetime',
'state': 'str',
'deleted': 'bool'
}
self.attribute_map = {
'created': 'created',
'changed_by': 'changedBy',
'updated': 'updated',
'type': '@type',
'id': 'id',
'organization_id': 'organizationID',
'subscription_id': 'subscriptionID',
'amendment_type': 'amendmentType',
'actioning_time': 'actioningTime',
'actioned_time': 'actionedTime',
'state': 'state',
'deleted': 'deleted'
}
self._created = created
self._changed_by = changed_by
self._updated = updated
self._type = type
self._id = id
self._organization_id = organization_id
self._subscription_id = subscription_id
self._amendment_type = amendment_type
self._actioning_time = actioning_time
self._actioned_time = actioned_time
self._state = state
self._deleted = deleted
@property
def created(self):
"""
Gets the created of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was created.\", \"verbs\":[] }
:return: The created of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was created.\", \"verbs\":[] }
:param created: The created of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._created = created
@property
def changed_by(self):
"""
Gets the changed_by of this ResumeSubscriptionAmendment.
{ \"description\" : \"ID of the user who last updated the entity.\", \"verbs\":[] }
:return: The changed_by of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._changed_by
@changed_by.setter
def changed_by(self, changed_by):
"""
Sets the changed_by of this ResumeSubscriptionAmendment.
{ \"description\" : \"ID of the user who last updated the entity.\", \"verbs\":[] }
:param changed_by: The changed_by of this ResumeSubscriptionAmendment.
:type: str
"""
self._changed_by = changed_by
@property
def updated(self):
"""
Gets the updated of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was last updated.\", \"verbs\":[] }
:return: The updated of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was last updated.\", \"verbs\":[] }
:param updated: The updated of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._updated = updated
@property
def type(self):
"""
Gets the type of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"default\" : \"\", \"verbs\":[\"POST\",\"GET\"] }
:return: The type of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"default\" : \"\", \"verbs\":[\"POST\",\"GET\"] }
:param type: The type of this ResumeSubscriptionAmendment.
:type: str
"""
allowed_values = ["InvoiceOutstandingChargesAmendment", "IssueInvoiceAmendment", "PricingComponentValueAmendment", "InvoiceRecalculationAmendment", "CancellationAmendment", "InvoiceNextExecutionAttemptAmendment", "FixedTermExpiryAmendment", "EndTrialAmendment", "ProductRatePlanMigrationAmendment", "AmendmentDiscardAmendment", "UpdateComponentValueAmendment", "ServiceEndAmendment", "ResumeSubscriptionAmendment", "CreateSubscriptionChargeAmendment", "TimerAmendment"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def id(self):
"""
Gets the id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"GET\"] }
:return: The id of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"GET\"] }
:param id: The id of this ResumeSubscriptionAmendment.
:type: str
"""
self._id = id
@property
def organization_id(self):
"""
Gets the organization_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"\"] }
:return: The organization_id of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
"""
Sets the organization_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"\"] }
:param organization_id: The organization_id of this ResumeSubscriptionAmendment.
:type: str
"""
self._organization_id = organization_id
@property
def subscription_id(self):
"""
Gets the subscription_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The subscription_id of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._subscription_id
@subscription_id.setter
def subscription_id(self, subscription_id):
"""
Sets the subscription_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param subscription_id: The subscription_id of this ResumeSubscriptionAmendment.
:type: str
"""
self._subscription_id = subscription_id
@property
def amendment_type(self):
"""
Gets the amendment_type of this ResumeSubscriptionAmendment.
{ \"description\" : \"Type of amendment\", \"verbs\":[\"POST\",\"GET\"] }
:return: The amendment_type of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._amendment_type
@amendment_type.setter
def amendment_type(self, amendment_type):
"""
Sets the amendment_type of this ResumeSubscriptionAmendment.
{ \"description\" : \"Type of amendment\", \"verbs\":[\"POST\",\"GET\"] }
:param amendment_type: The amendment_type of this ResumeSubscriptionAmendment.
:type: str
"""
allowed_values = ["InvoiceNextExecutionAttempt", "Cancellation", "PricingComponentValue", "AmendmentDiscard", "Compound", "FixedTermExpiry", "InvoiceRecalculation", "EndTrial", "InvoiceOutstandingCharges", "IssueInvoice", "ProductRatePlanMigration", "UpdateComponentValue", "ServiceEnd", "ResumeSubscription", "CreateSubscriptionCharge", "Timer"]
if amendment_type not in allowed_values:
raise ValueError(
"Invalid value for `amendment_type` ({0}), must be one of {1}"
.format(amendment_type, allowed_values)
)
self._amendment_type = amendment_type
@property
def actioning_time(self):
"""
Gets the actioning_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"When the amendment will run\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The actioning_time of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._actioning_time
@actioning_time.setter
def actioning_time(self, actioning_time):
"""
Sets the actioning_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"When the amendment will run\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param actioning_time: The actioning_time of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._actioning_time = actioning_time
@property
def actioned_time(self):
"""
Gets the actioned_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"The time the amendment completed.\", \"verbs\":[\"GET\"] }
:return: The actioned_time of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._actioned_time
@actioned_time.setter
def actioned_time(self, actioned_time):
"""
Sets the actioned_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"The time the amendment completed.\", \"verbs\":[\"GET\"] }
:param actioned_time: The actioned_time of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._actioned_time = actioned_time
@property
def state(self):
"""
Gets the state of this ResumeSubscriptionAmendment.
Whether the subscription-amendment is: pending (to be actioned in the future), succeeded (actioning completed), failed (actioning was attempted but no effect was made) or discarded (the amendment had been cancelled before being actioned). Default: Pending
:return: The state of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this ResumeSubscriptionAmendment.
Whether the subscription-amendment is: pending (to be actioned in the future), succeeded (actioning completed), failed (actioning was attempted but no effect was made) or discarded (the amendment had been cancelled before being actioned). Default: Pending
:param state: The state of this ResumeSubscriptionAmendment.
:type: str
"""
allowed_values = ["Pending", "Succeeded", "Failed", "Discarded"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def deleted(self):
"""
Gets the deleted of this ResumeSubscriptionAmendment.
{ \"description\" : \"Is the amendment deleted.\", \"verbs\":[\"GET\"] }
:return: The deleted of this ResumeSubscriptionAmendment.
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""
Sets the deleted of this ResumeSubscriptionAmendment.
{ \"description\" : \"Is the amendment deleted.\", \"verbs\":[\"GET\"] }
:param deleted: The deleted of this ResumeSubscriptionAmendment.
:type: bool
"""
self._deleted = deleted
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.088578 | 477 | 0.601409 |
from pprint import pformat
from six import iteritems
import re
class ResumeSubscriptionAmendment(object):
def __init__(self, created=None, changed_by=None, updated=None, type=None, id=None, organization_id=None, subscription_id=None, amendment_type=None, actioning_time=None, actioned_time=None, state=None, deleted=False):
self.swagger_types = {
'created': 'datetime',
'changed_by': 'str',
'updated': 'datetime',
'type': 'str',
'id': 'str',
'organization_id': 'str',
'subscription_id': 'str',
'amendment_type': 'str',
'actioning_time': 'datetime',
'actioned_time': 'datetime',
'state': 'str',
'deleted': 'bool'
}
self.attribute_map = {
'created': 'created',
'changed_by': 'changedBy',
'updated': 'updated',
'type': '@type',
'id': 'id',
'organization_id': 'organizationID',
'subscription_id': 'subscriptionID',
'amendment_type': 'amendmentType',
'actioning_time': 'actioningTime',
'actioned_time': 'actionedTime',
'state': 'state',
'deleted': 'deleted'
}
self._created = created
self._changed_by = changed_by
self._updated = updated
self._type = type
self._id = id
self._organization_id = organization_id
self._subscription_id = subscription_id
self._amendment_type = amendment_type
self._actioning_time = actioning_time
self._actioned_time = actioned_time
self._state = state
self._deleted = deleted
@property
def created(self):
return self._created
@created.setter
def created(self, created):
self._created = created
@property
def changed_by(self):
return self._changed_by
@changed_by.setter
def changed_by(self, changed_by):
self._changed_by = changed_by
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, updated):
self._updated = updated
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["InvoiceOutstandingChargesAmendment", "IssueInvoiceAmendment", "PricingComponentValueAmendment", "InvoiceRecalculationAmendment", "CancellationAmendment", "InvoiceNextExecutionAttemptAmendment", "FixedTermExpiryAmendment", "EndTrialAmendment", "ProductRatePlanMigrationAmendment", "AmendmentDiscardAmendment", "UpdateComponentValueAmendment", "ServiceEndAmendment", "ResumeSubscriptionAmendment", "CreateSubscriptionChargeAmendment", "TimerAmendment"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def organization_id(self):
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
self._organization_id = organization_id
@property
def subscription_id(self):
return self._subscription_id
@subscription_id.setter
def subscription_id(self, subscription_id):
self._subscription_id = subscription_id
@property
def amendment_type(self):
return self._amendment_type
@amendment_type.setter
def amendment_type(self, amendment_type):
allowed_values = ["InvoiceNextExecutionAttempt", "Cancellation", "PricingComponentValue", "AmendmentDiscard", "Compound", "FixedTermExpiry", "InvoiceRecalculation", "EndTrial", "InvoiceOutstandingCharges", "IssueInvoice", "ProductRatePlanMigration", "UpdateComponentValue", "ServiceEnd", "ResumeSubscription", "CreateSubscriptionCharge", "Timer"]
if amendment_type not in allowed_values:
raise ValueError(
"Invalid value for `amendment_type` ({0}), must be one of {1}"
.format(amendment_type, allowed_values)
)
self._amendment_type = amendment_type
@property
def actioning_time(self):
return self._actioning_time
@actioning_time.setter
def actioning_time(self, actioning_time):
self._actioning_time = actioning_time
@property
def actioned_time(self):
return self._actioned_time
@actioned_time.setter
def actioned_time(self, actioned_time):
self._actioned_time = actioned_time
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["Pending", "Succeeded", "Failed", "Discarded"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, deleted):
self._deleted = deleted
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72a91979056173940df72257c9632371d082496 | 7,763 | py | Python | zun/objects/container_action.py | hualingson/zun | 4fc4e9e0e0f5478d749215c7ba0679a8502f7737 | [
"Apache-2.0"
] | null | null | null | zun/objects/container_action.py | hualingson/zun | 4fc4e9e0e0f5478d749215c7ba0679a8502f7737 | [
"Apache-2.0"
] | null | null | null | zun/objects/container_action.py | hualingson/zun | 4fc4e9e0e0f5478d749215c7ba0679a8502f7737 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_versionedobjects import fields
import six
from zun.db import api as dbapi
from zun.objects import base
LOG = logging.getLogger(__name__)
@base.ZunObjectRegistry.register
class ContainerAction(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
# Version 1.1: Add uuid column.
# Version 1.2: Remove uuid column.
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'container_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
setattr(action, field, getattr(db_action, field, None))
action.obj_reset_changes()
return action
@staticmethod
def _from_db_object_list(context, cls, db_objects):
"""Converts a list of database entities to a list of formal objects."""
return [ContainerAction._from_db_object(context, cls(context), obj)
for obj in db_objects]
@staticmethod
def pack_action_start(context, container_uuid, action_name):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, container_uuid, action_name,
exc_val=None, exc_tb=None):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'action': action_name,
'finish_time': timeutils.utcnow()}
if exc_tb is not None:
values['message'] = 'Error'
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, container_uuid, request_id):
db_action = dbapi.action_get_by_request_id(context, container_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_start(cls, context, container_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, container_uuid, action_name)
db_action = dbapi.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_finish(cls, context, container_uuid, action_name, exc_val=None,
exc_tb=None, want_result=True):
values = cls.pack_action_finish(context, container_uuid, action_name,
exc_val=exc_val, exc_tb=exc_tb)
db_action = dbapi.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def get_by_container_uuid(cls, context, container_uuid):
db_actions = dbapi.actions_get(context, container_uuid)
return ContainerAction._from_db_object_list(context, cls, db_actions)
@base.ZunObjectRegistry.register
class ContainerActionEvent(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
setattr(event, field, db_event[field])
event.obj_reset_changes()
return event
@staticmethod
def _from_db_object_list(context, cls, db_objects):
"""Converts a list of database entities to a list of formal objects."""
return [ContainerActionEvent._from_db_object(context, cls(context),
obj)
for obj in db_objects]
@staticmethod
def pack_action_event_start(context, container_uuid, event_name):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, container_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def event_start(cls, context, container_uuid, event_name,
want_result=True):
values = cls.pack_action_event_start(context, container_uuid,
event_name)
db_event = dbapi.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def event_finish(cls, context, container_uuid, event_name, exc_val=None,
exc_tb=None, want_result=None):
if exc_val:
exc_val = six.text_type(exc_val)
if exc_tb and not isinstance(exc_tb, six.string_types):
exc_tb = ''.join(traceback.format_tb(exc_tb))
values = cls.pack_action_event_finish(context, container_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = dbapi.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = dbapi.action_events_get(context, action_id)
return ContainerActionEvent._from_db_object_list(context, cls,
db_events)
| 40.643979 | 79 | 0.637382 |
import traceback
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_versionedobjects import fields
import six
from zun.db import api as dbapi
from zun.objects import base
LOG = logging.getLogger(__name__)
@base.ZunObjectRegistry.register
class ContainerAction(base.ZunPersistentObject, base.ZunObject):
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'container_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
setattr(action, field, getattr(db_action, field, None))
action.obj_reset_changes()
return action
@staticmethod
def _from_db_object_list(context, cls, db_objects):
return [ContainerAction._from_db_object(context, cls(context), obj)
for obj in db_objects]
@staticmethod
def pack_action_start(context, container_uuid, action_name):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, container_uuid, action_name,
exc_val=None, exc_tb=None):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'action': action_name,
'finish_time': timeutils.utcnow()}
if exc_tb is not None:
values['message'] = 'Error'
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, container_uuid, request_id):
db_action = dbapi.action_get_by_request_id(context, container_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_start(cls, context, container_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, container_uuid, action_name)
db_action = dbapi.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_finish(cls, context, container_uuid, action_name, exc_val=None,
exc_tb=None, want_result=True):
values = cls.pack_action_finish(context, container_uuid, action_name,
exc_val=exc_val, exc_tb=exc_tb)
db_action = dbapi.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def get_by_container_uuid(cls, context, container_uuid):
db_actions = dbapi.actions_get(context, container_uuid)
return ContainerAction._from_db_object_list(context, cls, db_actions)
@base.ZunObjectRegistry.register
class ContainerActionEvent(base.ZunPersistentObject, base.ZunObject):
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
setattr(event, field, db_event[field])
event.obj_reset_changes()
return event
@staticmethod
def _from_db_object_list(context, cls, db_objects):
return [ContainerActionEvent._from_db_object(context, cls(context),
obj)
for obj in db_objects]
@staticmethod
def pack_action_event_start(context, container_uuid, event_name):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, container_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def event_start(cls, context, container_uuid, event_name,
want_result=True):
values = cls.pack_action_event_start(context, container_uuid,
event_name)
db_event = dbapi.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def event_finish(cls, context, container_uuid, event_name, exc_val=None,
exc_tb=None, want_result=None):
if exc_val:
exc_val = six.text_type(exc_val)
if exc_tb and not isinstance(exc_tb, six.string_types):
exc_tb = ''.join(traceback.format_tb(exc_tb))
values = cls.pack_action_event_finish(context, container_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = dbapi.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = dbapi.action_events_get(context, action_id)
return ContainerActionEvent._from_db_object_list(context, cls,
db_events)
| true | true |
f72a92773a71cfadb3c6851d87b7e65eff09358d | 6,096 | py | Python | BaseExtension.py | heyzec/Inkscape-Extentions | dedfc5e6d567218a397d48133c4cb5a62cd5b09b | [
"MIT"
] | 5 | 2021-07-04T10:28:49.000Z | 2022-02-22T16:48:04.000Z | BaseExtension.py | heyzec/Inkscape-Extentions | dedfc5e6d567218a397d48133c4cb5a62cd5b09b | [
"MIT"
] | null | null | null | BaseExtension.py | heyzec/Inkscape-Extentions | dedfc5e6d567218a397d48133c4cb5a62cd5b09b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=too-many-ancestors
# standard library
import os
import sys
import re
import argparse
from shutil import copy2
# from subprocess import Popen, PIPE
# import time
# from lxml import etree
# local library
import inkex
from inkex.command import inkscape
from inkex.elements import _selected as selection
MIN_PYTHON_VERSION = (3, 6) # Mainly for f-strings
if (sys.version_info.major, sys.version_info.minor) < (3, 6):
inkex.Effect.msg(f"Python {MIN_PYTHON_VERSION[0]}.{MIN_PYTHON_VERSION[1]} or later required.")
sys.exit(1)
class BaseExtension(inkex.Effect):
"""Custom class that makes creation of extensions easier.
Users of this class need not worry about boilerplates, such as how to
call inkscape via shell, and the management of tempfiles. Useful functions
are also provided."""
def __init__(self, custom_effect, args_adder=None):
"""Init base class.
In a typical Inkscape extension that does not make use of BaseExtension,
the effect is determined by the "effect" method of the extension class.
This init function will take in a method, and run it in the "effect" method
together with the other boilerplate.
This init method takes in a function under the custom_effect argument.
This function will handle the user's effects, minus the boilerplate. It
has to return a list[str] object, with each str being a verb that inkscape
can execute."""
inkex.Effect.__init__(self)
self.custom_effect = custom_effect
self._msg = self.msg # The old msg function provided by inkex (only accepts strings)
def msg(*args, sep=' '):
"""Improved msg method, similar to Python's print"""
self._msg(sep.join([str(arg) for arg in args]))
self.msg = msg
if args_adder is not None:
args_adder(self.arg_parser)
self.args_adder = args_adder
def z_sort(self, alist):
"""Return new list sorted in document order (depth-first traversal)."""
return list(self.z_iter(alist))
def z_iter(self, alist):
"""Return iterator over ids in document order (depth-first traversal)."""
id_list = list(alist)
count = len(id_list)
for element in self.document.getroot().iter():
# element_id = element.get('id')
# if element_id is not None and element_id in id_list:
if element in alist:
id_list.remove(element)
yield element
count -= 1
if not count:
return
@staticmethod
def show(obj):
"""Returns a str representation of object"""
def rep(obj):
if hasattr(obj, 'get_id'):
return f"{type(obj).__name__}({obj.get_id()})"
return f"{type(obj).__name__}"
if type(obj).__name__ == 'ElementList':
return ('ElementList(' +
', '.join([rep(child) for child in obj.values()]) +
')')
if isinstance(obj, list):
return '[' + ', '.join(rep(child) for child in obj) + ']'
return rep(obj)
def find(self, obj: any, xpath='/*') -> list:
"""Returns a list of objects which satisfies XPath
Args:
obj (any): Parent object to recurse into. Examples include root, selected, or a group.
xpath (str, optional): Defaults to '/*'.
Returns:
list: [description]
"""
BASIC_TAGS = ('circle', 'ellipse', 'line', 'polygon', 'polyline', 'rect', 'path', 'image', 'g')
SPECIAL_TAGS = {
'l': "svg:g[@inkscape:groupmode='layer']",
'p': 'svg:path'
}
xpath = re.sub(r'((?<=/)(' + '|'.join(BASIC_TAGS) + r')\b)', r'svg:\1', xpath)
for k, v in SPECIAL_TAGS.items():
xpath = re.sub('(?<=/)' + k + r'\b', v, xpath)
xpath = re.sub(r'(?<=\[)(\d+):(\d+)(?=\])', r'position()>=\1 and position()<\2', xpath)
if type(obj).__name__ != 'ElementList':
obj = [obj]
output = []
for child in obj:
matches = child.xpath(xpath, namespaces={
'svg': 'http://www.w3.org/2000/svg',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape'})
for match in matches:
if type(match).__name__ not in ('Defs', 'NamedView', 'Metadata'):
output.append(match)
return output
def effect(self):
"""Main entry point to process current document. Not to be called externally."""
actions_list = self.custom_effect(self)
if actions_list is None or actions_list == []:
self.msg("No actions received. Perhaps you are calling inkex object methods?")
elif isinstance(actions_list, list):
tempfile = self.options.input_file + "-BaseExtension.svg"
# prepare
copy2(self.options.input_file, tempfile)
actions_list.append("FileSave")
actions_list.append("FileQuit")
actions = ";".join(actions_list)
inkscape(tempfile, "--with-gui", actions=actions)
# finish up
# replace current document with content of temp copy file
self.document = inkex.load_svg(tempfile)
# update self.svg
self.svg = self.document.getroot()
# Clean up tempfile
try:
os.remove(tempfile)
except Exception: # pylint: disable=broad-except
pass
def call(self, child, ext_options):
"""Used to call an extension from another extension"""
old_options = self.options
parser = argparse.ArgumentParser()
child.args_adder(parser)
self.options = parser.parse_args([])
for k, v in ext_options.items():
setattr(self.options, k, v)
output = child.custom_effect(self)
self.options = old_options
return output
| 32.425532 | 103 | 0.58563 |
import os
import sys
import re
import argparse
from shutil import copy2
import inkex
from inkex.command import inkscape
from inkex.elements import _selected as selection
MIN_PYTHON_VERSION = (3, 6)
if (sys.version_info.major, sys.version_info.minor) < (3, 6):
inkex.Effect.msg(f"Python {MIN_PYTHON_VERSION[0]}.{MIN_PYTHON_VERSION[1]} or later required.")
sys.exit(1)
class BaseExtension(inkex.Effect):
def __init__(self, custom_effect, args_adder=None):
inkex.Effect.__init__(self)
self.custom_effect = custom_effect
self._msg = self.msg
def msg(*args, sep=' '):
self._msg(sep.join([str(arg) for arg in args]))
self.msg = msg
if args_adder is not None:
args_adder(self.arg_parser)
self.args_adder = args_adder
def z_sort(self, alist):
return list(self.z_iter(alist))
def z_iter(self, alist):
id_list = list(alist)
count = len(id_list)
for element in self.document.getroot().iter():
if element in alist:
id_list.remove(element)
yield element
count -= 1
if not count:
return
@staticmethod
def show(obj):
def rep(obj):
if hasattr(obj, 'get_id'):
return f"{type(obj).__name__}({obj.get_id()})"
return f"{type(obj).__name__}"
if type(obj).__name__ == 'ElementList':
return ('ElementList(' +
', '.join([rep(child) for child in obj.values()]) +
')')
if isinstance(obj, list):
return '[' + ', '.join(rep(child) for child in obj) + ']'
return rep(obj)
def find(self, obj: any, xpath='/*') -> list:
BASIC_TAGS = ('circle', 'ellipse', 'line', 'polygon', 'polyline', 'rect', 'path', 'image', 'g')
SPECIAL_TAGS = {
'l': "svg:g[@inkscape:groupmode='layer']",
'p': 'svg:path'
}
xpath = re.sub(r'((?<=/)(' + '|'.join(BASIC_TAGS) + r')\b)', r'svg:\1', xpath)
for k, v in SPECIAL_TAGS.items():
xpath = re.sub('(?<=/)' + k + r'\b', v, xpath)
xpath = re.sub(r'(?<=\[)(\d+):(\d+)(?=\])', r'position()>=\1 and position()<\2', xpath)
if type(obj).__name__ != 'ElementList':
obj = [obj]
output = []
for child in obj:
matches = child.xpath(xpath, namespaces={
'svg': 'http://www.w3.org/2000/svg',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape'})
for match in matches:
if type(match).__name__ not in ('Defs', 'NamedView', 'Metadata'):
output.append(match)
return output
def effect(self):
actions_list = self.custom_effect(self)
if actions_list is None or actions_list == []:
self.msg("No actions received. Perhaps you are calling inkex object methods?")
elif isinstance(actions_list, list):
tempfile = self.options.input_file + "-BaseExtension.svg"
copy2(self.options.input_file, tempfile)
actions_list.append("FileSave")
actions_list.append("FileQuit")
actions = ";".join(actions_list)
inkscape(tempfile, "--with-gui", actions=actions)
self.document = inkex.load_svg(tempfile)
self.svg = self.document.getroot()
try:
os.remove(tempfile)
except Exception:
pass
def call(self, child, ext_options):
old_options = self.options
parser = argparse.ArgumentParser()
child.args_adder(parser)
self.options = parser.parse_args([])
for k, v in ext_options.items():
setattr(self.options, k, v)
output = child.custom_effect(self)
self.options = old_options
return output
| true | true |
f72a938f9b1d2ca33b06ff8d32d27e738b47788b | 1,775 | py | Python | whales/architectures/new_gsc3_for_512.py | CKhan1/READ-PSB-AI-right-whale-photo-id-Kaggle | b6723724148029f68187bbd7ac598ea90a7542f3 | [
"MIT"
] | 2 | 2020-08-19T11:03:42.000Z | 2022-02-18T02:49:28.000Z | whales/architectures/new_gsc3_for_512.py | X10Khan/whales | 313fd487dec6080bb3a518d312cd9f1e29958f16 | [
"MIT"
] | null | null | null | whales/architectures/new_gsc3_for_512.py | X10Khan/whales | 313fd487dec6080bb3a518d312cd9f1e29958f16 | [
"MIT"
] | 4 | 2018-10-23T15:47:22.000Z | 2021-02-03T03:35:13.000Z | import copy
from TheanoLib.init import Normal
from TheanoLib.modules import Sequential, Flatten, Dropout, Dense, identity, Softmax, FanOut, Parallel, Subtensor, \
SimpleApply, softmax
from architecture import create_conv_colum
import theano.tensor as T
def create(image_size=(448, 448), n_outs=[447], dropout=False,
fc_l2_reg=None, conv_l2_reg=None, **kwargs):
print '... building the model'
print 'image_size', image_size, kwargs
classifier = Sequential(name='classifier')
net = Sequential(name='sequential')
convs = [(32, 1, 0), (64, 1, 0), (64, 1, 0), (128, 0, 0), (128, 1, 0), (256, 0, 0), (256, 1, 0),
(256, 0, 0), (256, 1, 0), (256, 0, 0), (256, 1, 0)]
features, size1 = create_conv_colum(image_size, 'MAIN.', convs)
net.add(features)
classifier.add(Flatten())
if dropout:
classifier.add(Dropout(p_of_zero=dropout))
def f(input):
outs = []
s = 0
for n_out in n_outs:
outs.append(softmax(input[:, s: s + n_out]))
s += n_out
return T.concatenate(outs, axis=1)
classifier.add(Dense(
n_input= convs[-1][0] * size1[0] * size1[1],
n_output=sum(n_outs),
nonlinearity=identity,
W_init=Normal(0.001),
name='dense'
))
classifier.add(SimpleApply(f))
net.add(classifier)
##########
arch = copy.deepcopy(net)
print 'Calling allocate_params()'
net.allocate_params()
print 'Calling initialize_params()'
net.initialize_params()
reg_params = (zip(classifier.get_reg_params(), len(classifier.get_reg_params()) * [fc_l2_reg]) +
zip(features.get_reg_params(), len(features.get_reg_params()) * [conv_l2_reg]))
return arch, net, reg_params | 29.583333 | 116 | 0.618592 | import copy
from TheanoLib.init import Normal
from TheanoLib.modules import Sequential, Flatten, Dropout, Dense, identity, Softmax, FanOut, Parallel, Subtensor, \
SimpleApply, softmax
from architecture import create_conv_colum
import theano.tensor as T
def create(image_size=(448, 448), n_outs=[447], dropout=False,
fc_l2_reg=None, conv_l2_reg=None, **kwargs):
print '... building the model'
print 'image_size', image_size, kwargs
classifier = Sequential(name='classifier')
net = Sequential(name='sequential')
convs = [(32, 1, 0), (64, 1, 0), (64, 1, 0), (128, 0, 0), (128, 1, 0), (256, 0, 0), (256, 1, 0),
(256, 0, 0), (256, 1, 0), (256, 0, 0), (256, 1, 0)]
features, size1 = create_conv_colum(image_size, 'MAIN.', convs)
net.add(features)
classifier.add(Flatten())
if dropout:
classifier.add(Dropout(p_of_zero=dropout))
def f(input):
outs = []
s = 0
for n_out in n_outs:
outs.append(softmax(input[:, s: s + n_out]))
s += n_out
return T.concatenate(outs, axis=1)
classifier.add(Dense(
n_input= convs[-1][0] * size1[0] * size1[1],
n_output=sum(n_outs),
nonlinearity=identity,
W_init=Normal(0.001),
name='dense'
))
classifier.add(SimpleApply(f))
net.add(classifier)
lling allocate_params()'
net.allocate_params()
print 'Calling initialize_params()'
net.initialize_params()
reg_params = (zip(classifier.get_reg_params(), len(classifier.get_reg_params()) * [fc_l2_reg]) +
zip(features.get_reg_params(), len(features.get_reg_params()) * [conv_l2_reg]))
return arch, net, reg_params | false | true |
f72a93a6ef5bdbef78fc92eeacc5548f6c09045a | 181 | py | Python | delphi_epidata/_constants.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | null | null | null | delphi_epidata/_constants.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | null | null | null | delphi_epidata/_constants.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | 1 | 2021-12-22T23:56:58.000Z | 2021-12-22T23:56:58.000Z | from typing import Final
__version__: Final = "1.0.0"
HTTP_HEADERS: Final = {"User-Agent": f"delphi_epidata/{__version__}"}
BASE_URL: Final = "https://delphi.cmu.edu/epidata/"
| 18.1 | 69 | 0.712707 | from typing import Final
__version__: Final = "1.0.0"
HTTP_HEADERS: Final = {"User-Agent": f"delphi_epidata/{__version__}"}
BASE_URL: Final = "https://delphi.cmu.edu/epidata/"
| true | true |
f72a948c70be197e27db61e983500bbbb2328e4d | 1,015 | py | Python | plugins/readme/girder_readme/rest.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 395 | 2015-01-12T19:20:13.000Z | 2022-03-30T05:40:40.000Z | plugins/readme/girder_readme/rest.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 2,388 | 2015-01-01T20:09:19.000Z | 2022-03-29T16:49:14.000Z | plugins/readme/girder_readme/rest.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 177 | 2015-01-04T14:47:00.000Z | 2022-03-25T09:01:51.000Z | # -*- coding: utf-8 -*-
import re
import cherrypy
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.constants import AccessType, TokenScope
from girder.models.file import File as FileModel
from girder.models.folder import Folder as FolderModel
from girder.models.item import Item as ItemModel
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get the README for a folder, if it exists.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the folder.', 403)
)
def _getFolderReadme(folder):
query = {
'folderId': folder['_id'],
'name': {'$regex': re.compile(r'^README(\..+)?$')},
}
item = ItemModel().findOne(query)
if item:
files = list(ItemModel().childFiles(item=item, limit=1))
if len(files) >= 1:
return FileModel().download(files[0])
cherrypy.response.status = 204
return ''
| 32.741935 | 64 | 0.693596 |
import re
import cherrypy
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.constants import AccessType, TokenScope
from girder.models.file import File as FileModel
from girder.models.folder import Folder as FolderModel
from girder.models.item import Item as ItemModel
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get the README for a folder, if it exists.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the folder.', 403)
)
def _getFolderReadme(folder):
query = {
'folderId': folder['_id'],
'name': {'$regex': re.compile(r'^README(\..+)?$')},
}
item = ItemModel().findOne(query)
if item:
files = list(ItemModel().childFiles(item=item, limit=1))
if len(files) >= 1:
return FileModel().download(files[0])
cherrypy.response.status = 204
return ''
| true | true |
f72a950bc8465538bf3a53bd013a276fafc97895 | 28,988 | py | Python | sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py | lynshi/azure-sdk-for-python | 40c530f2e9a6d93025b01cc8f6c94829c7fe95fc | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py | lynshi/azure-sdk-for-python | 40c530f2e9a6d93025b01cc8f6c94829c7fe95fc | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py | lynshi/azure-sdk-for-python | 40c530f2e9a6d93025b01cc8f6c94829c7fe95fc | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
import itertools
import datetime
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from azure.ai.textanalytics import (
TextAnalyticsClient,
RecognizeEntitiesAction,
RecognizeLinkedEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
AnalyzeSentimentAction,
TextDocumentInput,
VERSION,
TextAnalyticsApiVersion,
AnalyzeActionsType,
)
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestAnalyze(TextAnalyticsTest):
def _interval(self):
return 5 if self.is_live else 0
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval())
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_key_phrase_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertIn("Paul Allen", doc.key_phrases)
self.assertIn("Bill Gates", doc.key_phrases)
self.assertIn("Microsoft", doc.key_phrases)
self.assertIsNotNone(doc.id)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_sentiment_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = client.begin_analyze_actions(
docs,
actions=[AnalyzeSentimentAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].sentiment, "neutral")
self.assertEqual(action_result.document_results[1].sentiment, "negative")
self.assertEqual(action_result.document_results[2].sentiment, "positive")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.validateConfidenceScores(doc.confidence_scores)
self.assertIsNotNone(doc.sentences)
self.assertEqual(len(action_result.document_results[0].sentences), 1)
self.assertEqual(action_result.document_results[0].sentences[0].text, "Microsoft was founded by Bill Gates and Paul Allen.")
self.assertEqual(len(action_result.document_results[1].sentences), 2)
self.assertEqual(action_result.document_results[1].sentences[0].text, "I did not like the hotel we stayed at.")
self.assertEqual(action_result.document_results[1].sentences[1].text, "It was too expensive.")
self.assertEqual(len(action_result.document_results[2].sentences), 2)
self.assertEqual(action_result.document_results[2].sentences[0].text, "The restaurant had really good food.")
self.assertEqual(action_result.document_results[2].sentences[1].text, "I recommend you try it.")
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_sentiment_analysis_task_with_opinion_mining(self, client):
documents = [
"It has a sleek premium aluminum design that makes it beautiful to look at.",
"The food and service is not good"
]
response = client.begin_analyze_actions(
documents,
actions=[AnalyzeSentimentAction(show_opinion_mining=True)],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(documents)
for idx, doc in enumerate(action_result.document_results):
for sentence in doc.sentences:
if idx == 0:
for mined_opinion in sentence.mined_opinions:
target = mined_opinion.target
self.assertEqual('design', target.text)
self.assertEqual('positive', target.sentiment)
self.assertEqual(0.0, target.confidence_scores.neutral)
self.validateConfidenceScores(target.confidence_scores)
self.assertEqual(32, target.offset)
sleek_opinion = mined_opinion.assessments[0]
self.assertEqual('sleek', sleek_opinion.text)
self.assertEqual('positive', sleek_opinion.sentiment)
self.assertEqual(0.0, sleek_opinion.confidence_scores.neutral)
self.validateConfidenceScores(sleek_opinion.confidence_scores)
self.assertEqual(9, sleek_opinion.offset)
self.assertFalse(sleek_opinion.is_negated)
premium_opinion = mined_opinion.assessments[1]
self.assertEqual('premium', premium_opinion.text)
self.assertEqual('positive', premium_opinion.sentiment)
self.assertEqual(0.0, premium_opinion.confidence_scores.neutral)
self.validateConfidenceScores(premium_opinion.confidence_scores)
self.assertEqual(15, premium_opinion.offset)
self.assertFalse(premium_opinion.is_negated)
else:
food_target = sentence.mined_opinions[0].target
service_target = sentence.mined_opinions[1].target
self.validateConfidenceScores(food_target.confidence_scores)
self.assertEqual(4, food_target.offset)
self.assertEqual('service', service_target.text)
self.assertEqual('negative', service_target.sentiment)
self.assertEqual(0.0, service_target.confidence_scores.neutral)
self.validateConfidenceScores(service_target.confidence_scores)
self.assertEqual(13, service_target.offset)
food_opinion = sentence.mined_opinions[0].assessments[0]
service_opinion = sentence.mined_opinions[1].assessments[0]
self.assertOpinionsEqual(food_opinion, service_opinion)
self.assertEqual('good', food_opinion.text)
self.assertEqual('negative', food_opinion.sentiment)
self.assertEqual(0.0, food_opinion.confidence_scores.neutral)
self.validateConfidenceScores(food_opinion.confidence_scores)
self.assertEqual(28, food_opinion.offset)
self.assertTrue(food_opinion.is_negated)
service_target = sentence.mined_opinions[1].target
self.assertEqual('food', food_target.text)
self.assertEqual('negative', food_target.sentiment)
self.assertEqual(0.0, food_target.confidence_scores.neutral)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_text_document_input_entities_task(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975", language="en"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.", language="es"),
TextDocumentInput(id="3", text="Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.", language="de"),
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizeEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertEqual(len(doc.entities), 4)
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_string_pii_entities_task(self, client):
docs = ["My SSN is 859-98-0987.",
"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
"Is 998.214.865-68 your Brazilian CPF number?"
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizePiiEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].entities[0].text, "859-98-0987")
self.assertEqual(action_result.document_results[0].entities[0].category, "USSocialSecurityNumber")
self.assertEqual(action_result.document_results[1].entities[0].text, "111000025")
# self.assertEqual(results[1].entities[0].category, "ABA Routing Number") # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# self.assertEqual(action_result.document_results[2].entities[0].text, "998.214.865-68")
# self.assertEqual(action_result.document_results[2].entities[0].category, "Brazil CPF Number")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_request_on_empty_document(self, client):
docs = [u""]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "",
})
def test_empty_credential_class(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "xxxxxxxxxxxx",
})
def test_bad_credentials(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_out_of_order_ids_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
action_results = [r for r in action_results if not r.is_error]
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
in_order = ["56", "0", "19", "1"]
for action_result in action_results:
for idx, resp in enumerate(action_result.document_results):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_show_stats_and_model_version_multiple_tasks(self, client):
def callback(resp):
if resp.raw_response:
a = "b"
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="latest"),
RecognizePiiEntitiesAction(model_version="latest"),
RecognizeLinkedEntitiesAction(model_version="latest"),
AnalyzeSentimentAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=callback,
)
response = poller.result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
for action_result in action_results:
assert action_result.statistics
for doc in action_result.document_results:
assert doc.statistics
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_poller_metadata(self, client):
docs = [{"id": "56", "text": ":)"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
)
response = poller.result()
assert isinstance(poller.created_on, datetime.datetime)
poller._polling_method.display_name
assert isinstance(poller.expires_on, datetime.datetime)
assert poller.actions_failed_count == 0
assert poller.actions_in_progress_count == 0
assert poller.actions_succeeded_count == 1
assert isinstance(poller.last_modified_on, datetime.datetime)
assert poller.total_actions_count == 1
assert poller.id
### TODO: Commenting out language tests. Right now analyze only supports language 'en', so no point to these tests yet
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer()
# def test_whole_batch_language_hint(self, client):
# def callback(resp):
# language_str = "\"language\": \"fr\""
# if resp.http_request.body:
# language = resp.http_request.body.count(language_str)
# self.assertEqual(language, 3)
# docs = [
# u"This was the best day of my life.",
# u"I did not like the hotel we stayed at. It was too expensive.",
# u"The restaurant was not as good as I hoped."
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# language="fr",
# polling_interval=self._interval(),
# raw_response_hook=callback
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# self.assertFalse(doc.is_error)
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer(client_kwargs={
# "default_language": "en"
# })
# def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
# def callback(resp):
# pass
# # if resp.http_request.body:
# # language_str = "\"language\": \"es\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 2)
# # language_str = "\"language\": \"en\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 1)
# docs = [
# TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
# TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
# TextDocumentInput(id="3", text="猫は幸せ"),
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# polling_interval=self._interval(),
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# assert not doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_invalid_language_hint_method(self, client):
response = list(client.begin_analyze_actions(
["This should fail because we're passing in an invalid language hint"],
language="notalanguage",
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result())
for action_result in response:
for doc in action_result.document_results:
assert doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_multiple_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with pytest.raises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavior of service
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="bad"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(None, None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.begin_analyze_actions(
documents=["Test passing cls to endpoint"],
actions=[
RecognizeEntitiesAction(),
],
cls=callback,
polling_interval=self._interval(),
).result()
assert res == "cls result"
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_multiple_pages_of_results_returned_successfully(self, client):
single_doc = "hello world"
docs = [{"id": str(idx), "text": val} for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))] # max number of documents is 25
result = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
show_stats=True,
polling_interval=self._interval(),
).result()
recognize_entities_results = []
extract_key_phrases_results = []
recognize_pii_entities_results = []
recognize_linked_entities_results = []
analyze_sentiment_results = []
action_results = list(result)
# do 2 pages of 5 task results
for idx, action_result in enumerate(action_results):
if idx % 5 == 0:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
recognize_entities_results.append(action_result)
elif idx % 5 == 1:
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
extract_key_phrases_results.append(action_result)
elif idx % 5 == 2:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
recognize_pii_entities_results.append(action_result)
elif idx % 5 == 3:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
recognize_linked_entities_results.append(action_result)
else:
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
analyze_sentiment_results.append(action_result)
if idx < 5: # first page of task results
assert len(action_result.document_results) == 20
else:
assert len(action_result.document_results) == 5
assert all([action_result for action_result in recognize_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in extract_key_phrases_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_pii_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_linked_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in analyze_sentiment_results if len(action_result.document_results) == len(docs)])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_too_many_documents(self, client):
docs = list(itertools.repeat("input document", 26)) # Maximum number of documents per request is 25
with pytest.raises(HttpResponseError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert excinfo.value.status_code == 400
| 44.054711 | 146 | 0.630537 |
import os
import pytest
import platform
import functools
import itertools
import datetime
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from azure.ai.textanalytics import (
TextAnalyticsClient,
RecognizeEntitiesAction,
RecognizeLinkedEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
AnalyzeSentimentAction,
TextDocumentInput,
VERSION,
TextAnalyticsApiVersion,
AnalyzeActionsType,
)
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestAnalyze(TextAnalyticsTest):
def _interval(self):
return 5 if self.is_live else 0
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval())
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_key_phrase_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertIn("Paul Allen", doc.key_phrases)
self.assertIn("Bill Gates", doc.key_phrases)
self.assertIn("Microsoft", doc.key_phrases)
self.assertIsNotNone(doc.id)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_sentiment_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = client.begin_analyze_actions(
docs,
actions=[AnalyzeSentimentAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].sentiment, "neutral")
self.assertEqual(action_result.document_results[1].sentiment, "negative")
self.assertEqual(action_result.document_results[2].sentiment, "positive")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.validateConfidenceScores(doc.confidence_scores)
self.assertIsNotNone(doc.sentences)
self.assertEqual(len(action_result.document_results[0].sentences), 1)
self.assertEqual(action_result.document_results[0].sentences[0].text, "Microsoft was founded by Bill Gates and Paul Allen.")
self.assertEqual(len(action_result.document_results[1].sentences), 2)
self.assertEqual(action_result.document_results[1].sentences[0].text, "I did not like the hotel we stayed at.")
self.assertEqual(action_result.document_results[1].sentences[1].text, "It was too expensive.")
self.assertEqual(len(action_result.document_results[2].sentences), 2)
self.assertEqual(action_result.document_results[2].sentences[0].text, "The restaurant had really good food.")
self.assertEqual(action_result.document_results[2].sentences[1].text, "I recommend you try it.")
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_sentiment_analysis_task_with_opinion_mining(self, client):
documents = [
"It has a sleek premium aluminum design that makes it beautiful to look at.",
"The food and service is not good"
]
response = client.begin_analyze_actions(
documents,
actions=[AnalyzeSentimentAction(show_opinion_mining=True)],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(documents)
for idx, doc in enumerate(action_result.document_results):
for sentence in doc.sentences:
if idx == 0:
for mined_opinion in sentence.mined_opinions:
target = mined_opinion.target
self.assertEqual('design', target.text)
self.assertEqual('positive', target.sentiment)
self.assertEqual(0.0, target.confidence_scores.neutral)
self.validateConfidenceScores(target.confidence_scores)
self.assertEqual(32, target.offset)
sleek_opinion = mined_opinion.assessments[0]
self.assertEqual('sleek', sleek_opinion.text)
self.assertEqual('positive', sleek_opinion.sentiment)
self.assertEqual(0.0, sleek_opinion.confidence_scores.neutral)
self.validateConfidenceScores(sleek_opinion.confidence_scores)
self.assertEqual(9, sleek_opinion.offset)
self.assertFalse(sleek_opinion.is_negated)
premium_opinion = mined_opinion.assessments[1]
self.assertEqual('premium', premium_opinion.text)
self.assertEqual('positive', premium_opinion.sentiment)
self.assertEqual(0.0, premium_opinion.confidence_scores.neutral)
self.validateConfidenceScores(premium_opinion.confidence_scores)
self.assertEqual(15, premium_opinion.offset)
self.assertFalse(premium_opinion.is_negated)
else:
food_target = sentence.mined_opinions[0].target
service_target = sentence.mined_opinions[1].target
self.validateConfidenceScores(food_target.confidence_scores)
self.assertEqual(4, food_target.offset)
self.assertEqual('service', service_target.text)
self.assertEqual('negative', service_target.sentiment)
self.assertEqual(0.0, service_target.confidence_scores.neutral)
self.validateConfidenceScores(service_target.confidence_scores)
self.assertEqual(13, service_target.offset)
food_opinion = sentence.mined_opinions[0].assessments[0]
service_opinion = sentence.mined_opinions[1].assessments[0]
self.assertOpinionsEqual(food_opinion, service_opinion)
self.assertEqual('good', food_opinion.text)
self.assertEqual('negative', food_opinion.sentiment)
self.assertEqual(0.0, food_opinion.confidence_scores.neutral)
self.validateConfidenceScores(food_opinion.confidence_scores)
self.assertEqual(28, food_opinion.offset)
self.assertTrue(food_opinion.is_negated)
service_target = sentence.mined_opinions[1].target
self.assertEqual('food', food_target.text)
self.assertEqual('negative', food_target.sentiment)
self.assertEqual(0.0, food_target.confidence_scores.neutral)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_text_document_input_entities_task(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975", language="en"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.", language="es"),
TextDocumentInput(id="3", text="Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.", language="de"),
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizeEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertEqual(len(doc.entities), 4)
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_string_pii_entities_task(self, client):
docs = ["My SSN is 859-98-0987.",
"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
"Is 998.214.865-68 your Brazilian CPF number?"
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizePiiEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].entities[0].text, "859-98-0987")
self.assertEqual(action_result.document_results[0].entities[0].category, "USSocialSecurityNumber")
self.assertEqual(action_result.document_results[1].entities[0].text, "111000025")
# self.assertEqual(results[1].entities[0].category, "ABA Routing Number") # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# self.assertEqual(action_result.document_results[2].entities[0].text, "998.214.865-68")
# self.assertEqual(action_result.document_results[2].entities[0].category, "Brazil CPF Number")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_request_on_empty_document(self, client):
docs = [u""]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "",
})
def test_empty_credential_class(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "xxxxxxxxxxxx",
})
def test_bad_credentials(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_out_of_order_ids_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
action_results = [r for r in action_results if not r.is_error]
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
in_order = ["56", "0", "19", "1"]
for action_result in action_results:
for idx, resp in enumerate(action_result.document_results):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_show_stats_and_model_version_multiple_tasks(self, client):
def callback(resp):
if resp.raw_response:
a = "b"
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="latest"),
RecognizePiiEntitiesAction(model_version="latest"),
RecognizeLinkedEntitiesAction(model_version="latest"),
AnalyzeSentimentAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=callback,
)
response = poller.result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
for action_result in action_results:
assert action_result.statistics
for doc in action_result.document_results:
assert doc.statistics
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_poller_metadata(self, client):
docs = [{"id": "56", "text": ":)"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
)
response = poller.result()
assert isinstance(poller.created_on, datetime.datetime)
poller._polling_method.display_name
assert isinstance(poller.expires_on, datetime.datetime)
assert poller.actions_failed_count == 0
assert poller.actions_in_progress_count == 0
assert poller.actions_succeeded_count == 1
assert isinstance(poller.last_modified_on, datetime.datetime)
assert poller.total_actions_count == 1
assert poller.id
### TODO: Commenting out language tests. Right now analyze only supports language 'en', so no point to these tests yet
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer()
# def test_whole_batch_language_hint(self, client):
# def callback(resp):
# language_str = "\"language\": \"fr\""
# if resp.http_request.body:
# language = resp.http_request.body.count(language_str)
# self.assertEqual(language, 3)
# docs = [
# u"This was the best day of my life.",
# u"I did not like the hotel we stayed at. It was too expensive.",
# u"The restaurant was not as good as I hoped."
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# language="fr",
# polling_interval=self._interval(),
# raw_response_hook=callback
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# self.assertFalse(doc.is_error)
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer(client_kwargs={
# "default_language": "en"
# })
# def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
# def callback(resp):
# pass
# # if resp.http_request.body:
# # language_str = "\"language\": \"es\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 2)
# # language_str = "\"language\": \"en\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 1)
# docs = [
# TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
# TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
# TextDocumentInput(id="3", text="猫は幸せ"),
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# polling_interval=self._interval(),
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# assert not doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_invalid_language_hint_method(self, client):
response = list(client.begin_analyze_actions(
["This should fail because we're passing in an invalid language hint"],
language="notalanguage",
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result())
for action_result in response:
for doc in action_result.document_results:
assert doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_multiple_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with pytest.raises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_all_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="bad"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(None, None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.begin_analyze_actions(
documents=["Test passing cls to endpoint"],
actions=[
RecognizeEntitiesAction(),
],
cls=callback,
polling_interval=self._interval(),
).result()
assert res == "cls result"
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_multiple_pages_of_results_returned_successfully(self, client):
single_doc = "hello world"
docs = [{"id": str(idx), "text": val} for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))]
result = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
show_stats=True,
polling_interval=self._interval(),
).result()
recognize_entities_results = []
extract_key_phrases_results = []
recognize_pii_entities_results = []
recognize_linked_entities_results = []
analyze_sentiment_results = []
action_results = list(result)
for idx, action_result in enumerate(action_results):
if idx % 5 == 0:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
recognize_entities_results.append(action_result)
elif idx % 5 == 1:
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
extract_key_phrases_results.append(action_result)
elif idx % 5 == 2:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
recognize_pii_entities_results.append(action_result)
elif idx % 5 == 3:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
recognize_linked_entities_results.append(action_result)
else:
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
analyze_sentiment_results.append(action_result)
if idx < 5:
assert len(action_result.document_results) == 20
else:
assert len(action_result.document_results) == 5
assert all([action_result for action_result in recognize_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in extract_key_phrases_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_pii_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_linked_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in analyze_sentiment_results if len(action_result.document_results) == len(docs)])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_too_many_documents(self, client):
docs = list(itertools.repeat("input document", 26))
with pytest.raises(HttpResponseError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert excinfo.value.status_code == 400
| true | true |
f72a95a36cb73e6369f0ae2694d3a4c317a14ec5 | 1,549 | py | Python | package/spack-py-backcall/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-backcall/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-backcall/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBackcall(PythonPackage):
"""Specifications for callback functions passed in to an API"""
homepage = "https://github.com/takluyver/backcall"
url = "https://pypi.io/packages/source/b/backcall/backcall-0.1.0.tar.gz"
version('0.1.0', '87ce0c7839808e6a3427d57df6a792e7')
| 44.257143 | 78 | 0.68173 | true | true | |
f72a95f93d1a01fdc3492157c0b5fc9e8d191481 | 524 | py | Python | alembic/versions/1c697a5bd34f_addeding_lessons_tau.py | codeforamerica/bizfriendly-api | b3f3b9f83652ec67752d629baaf0bc1d4ec67695 | [
"BSD-Source-Code"
] | 13 | 2015-04-27T14:26:19.000Z | 2021-11-21T16:11:17.000Z | alembic/versions/1c697a5bd34f_addeding_lessons_tau.py | codeforamerica/bizfriendly-api | b3f3b9f83652ec67752d629baaf0bc1d4ec67695 | [
"BSD-Source-Code"
] | 15 | 2015-04-25T22:29:50.000Z | 2016-09-01T16:59:21.000Z | alembic/versions/1c697a5bd34f_addeding_lessons_tau.py | codeforamerica/bizfriendly-api | b3f3b9f83652ec67752d629baaf0bc1d4ec67695 | [
"BSD-Source-Code"
] | 9 | 2015-06-19T19:48:40.000Z | 2021-04-16T10:27:29.000Z | """Addeding lessons taught to user
Revision ID: 1c697a5bd34f
Revises: 23aebf11a765
Create Date: 2014-01-04 13:13:39.599020
"""
# revision identifiers, used by Alembic.
revision = '1c697a5bd34f'
down_revision = '23aebf11a765'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| 19.407407 | 63 | 0.694656 |
revision = '1c697a5bd34f'
down_revision = '23aebf11a765'
from alembic import op
import sqlalchemy as sa
def upgrade():
| true | true |
f72a96fc280c4b6ed142ea022c0e6662161e6044 | 378 | py | Python | tests/operator/function_test.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | tests/operator/function_test.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | tests/operator/function_test.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | import yaml
from tests.base.io_test import BaseIOTest
from tests.base.value_error import BaseValueErrorTest
from queenbee.operator.function import Function
ASSET_FOLDER = 'tests/assets/functions'
class TestIO(BaseIOTest):
klass = Function
asset_folder = ASSET_FOLDER
class TestValueError(BaseValueErrorTest):
klass = Function
asset_folder = ASSET_FOLDER
| 18.9 | 53 | 0.793651 | import yaml
from tests.base.io_test import BaseIOTest
from tests.base.value_error import BaseValueErrorTest
from queenbee.operator.function import Function
ASSET_FOLDER = 'tests/assets/functions'
class TestIO(BaseIOTest):
klass = Function
asset_folder = ASSET_FOLDER
class TestValueError(BaseValueErrorTest):
klass = Function
asset_folder = ASSET_FOLDER
| true | true |
f72a99520193f77a04dcbe1808375927c8ee383b | 289 | py | Python | Feature/structure_tensor_eigenvalues.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Feature/structure_tensor_eigenvalues.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Feature/structure_tensor_eigenvalues.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | # 计算结构张量的特征值。
from skimage.feature import structure_tensor
from skimage.feature import structure_tensor_eigenvalues
import numpy as np
square = np.zeros((5, 5))
square[2, 2] = 1
A_elems = structure_tensor(square, sigma=0.1, order='rc')
print(structure_tensor_eigenvalues(A_elems)[0])
| 20.642857 | 57 | 0.778547 |
from skimage.feature import structure_tensor
from skimage.feature import structure_tensor_eigenvalues
import numpy as np
square = np.zeros((5, 5))
square[2, 2] = 1
A_elems = structure_tensor(square, sigma=0.1, order='rc')
print(structure_tensor_eigenvalues(A_elems)[0])
| true | true |
f72a99912cc462e12e16df173b954984f4d5d9a7 | 6,184 | py | Python | nomadgram/images/views.py | wayhome25/nomadgram | 54d578e5674a0b35786d6c889b06ba019b648575 | [
"MIT"
] | null | null | null | nomadgram/images/views.py | wayhome25/nomadgram | 54d578e5674a0b35786d6c889b06ba019b648575 | [
"MIT"
] | 11 | 2020-09-05T05:23:03.000Z | 2022-03-11T23:26:18.000Z | nomadgram/images/views.py | wayhome25/nomadgram | 54d578e5674a0b35786d6c889b06ba019b648575 | [
"MIT"
] | 4 | 2017-12-22T05:53:37.000Z | 2020-04-25T03:13:47.000Z | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import Q
from django.shortcuts import get_object_or_404
from nomadgram.images.models import Comment
from nomadgram.images.models import Image
from nomadgram.images.models import Like
from nomadgram.images.serializers import CommentSerializer
from nomadgram.images.serializers import CountImageSerializer
from nomadgram.images.serializers import ImageSerializer
from nomadgram.images.serializers import InputImageSerializer
from nomadgram.notifications.models import Notification
from nomadgram.users.models import User
from nomadgram.users.serializer import ListUserSerializer
class Images(APIView):
def get(self, request):
user = request.user
following_users = user.following.all()
feed_images = Image.objects.filter(Q(creator__in=following_users) | Q(creator=user))[:3]
query = feed_images.select_related('creator').prefetch_related('comments__creator', 'tags', 'likes')
serializer = ImageSerializer(query, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request):
user = request.user
serializer = InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = Image.objects.get(id=image_id, creator=user)
return image
except Image.DoesNotExist:
return None
def get(self, request, image_id):
image = get_object_or_404(Image, id=image_id)
serializer = ImageSerializer(image)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
serializer = InputImageSerializer(image, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.erros, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
def delete(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class LikeImage(APIView):
def get(self, request, image_id):
"""like 유저 리스트를 가져온다"""
likes = Like.objects.filter(image_id=image_id)
likes_creator_ids = likes.values('creator_id')
like_users = User.objects.filter(id__in=likes_creator_ids)
serializer = ListUserSerializer(like_users, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id):
"""like를 추가한다"""
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
Like.objects.get(creator=user, image=image)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except Like.DoesNotExist:
Like.objects.create(creator=user, image=image) # NOTE(다른방법): image.likes.create(creator=user)
Notification.objects.create(creator=user, to=image.creator, image=image,
notificaiton_type=Notification.NotificationType.LIKE)
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
preexisting_like = Like.objects.get(creator=user, image=image)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
serializer = CommentSerializer(data=request.POST)
if serializer.is_valid():
comment = serializer.save(creator=user, image=image) # NOTE: serializer.save() 는 모델 인스턴스를 리턴
Notification.objects.create(creator=user, to=image.creator, image=image, comment=comment,
notificaiton_type=Notification.NotificationType.COMMENT)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentView(APIView):
def delete(self, request, comment_id):
user = request.user
comment = get_object_or_404(Comment, id=comment_id, creator=user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ModerateComments(APIView):
def delete(self, request, image_id, comment_id):
comment = get_object_or_404(Comment, id=comment_id, image_id=image_id, image__creatorgs=request.user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request):
tags = request.query_params.get('tags', None) # NOTE: query_params 를 통해서 query string을 가져온다.
if tags:
tags = tags.split(',')
images = Image.objects.filter(tags__name__in=tags).distinct()
serializer = CountImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
| 37.02994 | 109 | 0.687581 | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import Q
from django.shortcuts import get_object_or_404
from nomadgram.images.models import Comment
from nomadgram.images.models import Image
from nomadgram.images.models import Like
from nomadgram.images.serializers import CommentSerializer
from nomadgram.images.serializers import CountImageSerializer
from nomadgram.images.serializers import ImageSerializer
from nomadgram.images.serializers import InputImageSerializer
from nomadgram.notifications.models import Notification
from nomadgram.users.models import User
from nomadgram.users.serializer import ListUserSerializer
class Images(APIView):
def get(self, request):
user = request.user
following_users = user.following.all()
feed_images = Image.objects.filter(Q(creator__in=following_users) | Q(creator=user))[:3]
query = feed_images.select_related('creator').prefetch_related('comments__creator', 'tags', 'likes')
serializer = ImageSerializer(query, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request):
user = request.user
serializer = InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = Image.objects.get(id=image_id, creator=user)
return image
except Image.DoesNotExist:
return None
def get(self, request, image_id):
image = get_object_or_404(Image, id=image_id)
serializer = ImageSerializer(image)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
serializer = InputImageSerializer(image, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.erros, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
def delete(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class LikeImage(APIView):
def get(self, request, image_id):
likes = Like.objects.filter(image_id=image_id)
likes_creator_ids = likes.values('creator_id')
like_users = User.objects.filter(id__in=likes_creator_ids)
serializer = ListUserSerializer(like_users, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
Like.objects.get(creator=user, image=image)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except Like.DoesNotExist:
Like.objects.create(creator=user, image=image)
Notification.objects.create(creator=user, to=image.creator, image=image,
notificaiton_type=Notification.NotificationType.LIKE)
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
preexisting_like = Like.objects.get(creator=user, image=image)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
serializer = CommentSerializer(data=request.POST)
if serializer.is_valid():
comment = serializer.save(creator=user, image=image)
Notification.objects.create(creator=user, to=image.creator, image=image, comment=comment,
notificaiton_type=Notification.NotificationType.COMMENT)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentView(APIView):
def delete(self, request, comment_id):
user = request.user
comment = get_object_or_404(Comment, id=comment_id, creator=user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ModerateComments(APIView):
def delete(self, request, image_id, comment_id):
comment = get_object_or_404(Comment, id=comment_id, image_id=image_id, image__creatorgs=request.user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request):
tags = request.query_params.get('tags', None)
if tags:
tags = tags.split(',')
images = Image.objects.filter(tags__name__in=tags).distinct()
serializer = CountImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
| true | true |
f72a99a11e52fd71703f1220515453c9acbbe085 | 552 | py | Python | socialnews/mptt/tests/settings.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | 30 | 2015-01-18T16:34:03.000Z | 2021-05-23T20:05:54.000Z | socialnews/mptt/tests/settings.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | null | null | null | socialnews/mptt/tests/settings.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | 11 | 2015-02-21T10:45:41.000Z | 2021-01-24T21:08:20.000Z | import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = os.path.join(DIRNAME, 'mptt.db')
#DATABASE_ENGINE = 'mysql'
#DATABASE_NAME = 'mptt_test'
#DATABASE_USER = 'root'
#DATABASE_PASSWORD = ''
#DATABASE_HOST = 'localhost'
#DATABASE_PORT = '3306'
#DATABASE_ENGINE = 'postgresql_psycopg2'
#DATABASE_NAME = 'mptt_test'
#DATABASE_USER = 'postgres'
#DATABASE_PASSWORD = ''
#DATABASE_HOST = 'localhost'
#DATABASE_PORT = '5432'
INSTALLED_APPS = (
'mptt',
'mptt.tests',
)
| 19.714286 | 49 | 0.684783 | import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = os.path.join(DIRNAME, 'mptt.db')
INSTALLED_APPS = (
'mptt',
'mptt.tests',
)
| true | true |
f72a9a36d3435f37d58e7109c367b25a53d50743 | 541 | py | Python | samples/fan_in_fan_out/HttpStart/__init__.py | sebastianburckhardt/azure-functions-durable-python | 634f70887e415f0ff9e7ee1e2fb3f58f90112772 | [
"MIT"
] | 78 | 2020-03-30T19:05:23.000Z | 2022-03-30T06:55:47.000Z | samples/fan_in_fan_out/HttpStart/__init__.py | sebastianburckhardt/azure-functions-durable-python | 634f70887e415f0ff9e7ee1e2fb3f58f90112772 | [
"MIT"
] | 180 | 2020-04-01T22:25:59.000Z | 2022-03-29T14:23:16.000Z | samples/fan_in_fan_out/HttpStart/__init__.py | sebastianburckhardt/azure-functions-durable-python | 634f70887e415f0ff9e7ee1e2fb3f58f90112772 | [
"MIT"
] | 40 | 2020-03-31T19:52:31.000Z | 2022-02-06T05:52:44.000Z | import logging
import json
import azure.functions as func
import azure.durable_functions as df
async def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:
client = df.DurableOrchestrationClient(starter)
payload: str = json.loads(req.get_body().decode()) # Load JSON post request data
instance_id = await client.start_new(req.route_params["functionName"], client_input=payload)
logging.info(f"Started orchestration with ID = '{instance_id}'.")
return client.create_check_status_response(req, instance_id) | 38.642857 | 96 | 0.770795 | import logging
import json
import azure.functions as func
import azure.durable_functions as df
async def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:
client = df.DurableOrchestrationClient(starter)
payload: str = json.loads(req.get_body().decode())
instance_id = await client.start_new(req.route_params["functionName"], client_input=payload)
logging.info(f"Started orchestration with ID = '{instance_id}'.")
return client.create_check_status_response(req, instance_id) | true | true |
f72a9a3bdb59d938db776e22dab6ecf91d768216 | 9,420 | py | Python | Ryven/packages/auto_generated/ctypes.test.test_pickling/nodes.py | tfroehlich82/Ryven | cb57c91d13949712844a4410a9302c4a90d28dcd | [
"MIT"
] | 2,872 | 2020-07-01T09:06:34.000Z | 2022-03-31T05:52:32.000Z | Ryven/packages/auto_generated/ctypes.test.test_pickling/nodes.py | dhf327/Ryven | a11e361528d982a9dd3c489dd536f8b05ffd56e1 | [
"MIT"
] | 59 | 2020-06-28T12:50:50.000Z | 2022-03-27T19:07:54.000Z | Ryven/packages/auto_generated/ctypes.test.test_pickling/nodes.py | dhf327/Ryven | a11e361528d982a9dd3c489dd536f8b05ffd56e1 | [
"MIT"
] | 339 | 2020-07-05T04:36:20.000Z | 2022-03-24T07:25:18.000Z |
from NENV import *
import ctypes.test.test_pickling
class NodeBase(Node):
pass
class Array_Node(NodeBase):
"""
"""
title = 'ARRAY'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='len'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.ARRAY(self.input(0), self.input(1)))
class Cfunctype_Node(NodeBase):
"""
CFUNCTYPE(restype, *argtypes,
use_errno=False, use_last_error=False) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
"""
title = 'CFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.CFUNCTYPE(self.input(0)))
class Dllcanunloadnow_Node(NodeBase):
"""
"""
title = 'DllCanUnloadNow'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllCanUnloadNow())
class Dllgetclassobject_Node(NodeBase):
"""
"""
title = 'DllGetClassObject'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='rclsid'),
NodeInputBP(label='riid'),
NodeInputBP(label='ppv'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllGetClassObject(self.input(0), self.input(1), self.input(2)))
class Pyfunctype_Node(NodeBase):
"""
"""
title = 'PYFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.PYFUNCTYPE(self.input(0)))
class Setpointertype_Node(NodeBase):
"""
"""
title = 'SetPointerType'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='pointer'),
NodeInputBP(label='cls'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.SetPointerType(self.input(0), self.input(1)))
class Winfunctype_Node(NodeBase):
"""
"""
title = 'WINFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WINFUNCTYPE(self.input(0)))
class Winerror_Node(NodeBase):
"""
"""
title = 'WinError'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='code', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='descr', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WinError(self.input(0), self.input(1)))
class _Calcsize_Node(NodeBase):
"""
Return size in bytes of the struct described by the format string."""
title = '_calcsize'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='format'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._calcsize(self.input(0)))
class _Check_Size_Node(NodeBase):
"""
"""
title = '_check_size'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='typecode', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._check_size(self.input(0), self.input(1)))
class _Reset_Cache_Node(NodeBase):
"""
"""
title = '_reset_cache'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._reset_cache())
class C_Buffer_Node(NodeBase):
"""
"""
title = 'c_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.c_buffer(self.input(0), self.input(1)))
class Cast_Node(NodeBase):
"""
"""
title = 'cast'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='obj'),
NodeInputBP(label='typ'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.cast(self.input(0), self.input(1)))
class Create_String_Buffer_Node(NodeBase):
"""
create_string_buffer(aBytes) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aBytes, anInteger) -> character array
"""
title = 'create_string_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_string_buffer(self.input(0), self.input(1)))
class Create_Unicode_Buffer_Node(NodeBase):
"""
create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
"""
title = 'create_unicode_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_unicode_buffer(self.input(0), self.input(1)))
class String_At_Node(NodeBase):
"""
string_at(addr[, size]) -> string
Return the string at addr."""
title = 'string_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.string_at(self.input(0), self.input(1)))
class Wstring_At_Node(NodeBase):
"""
wstring_at(addr[, size]) -> string
Return the string at addr."""
title = 'wstring_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.wstring_at(self.input(0), self.input(1)))
export_nodes(
Array_Node,
Cfunctype_Node,
Dllcanunloadnow_Node,
Dllgetclassobject_Node,
Pyfunctype_Node,
Setpointertype_Node,
Winfunctype_Node,
Winerror_Node,
_Calcsize_Node,
_Check_Size_Node,
_Reset_Cache_Node,
C_Buffer_Node,
Cast_Node,
Create_String_Buffer_Node,
Create_Unicode_Buffer_Node,
String_At_Node,
Wstring_At_Node,
)
| 25.254692 | 120 | 0.619639 |
from NENV import *
import ctypes.test.test_pickling
class NodeBase(Node):
pass
class Array_Node(NodeBase):
title = 'ARRAY'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='len'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.ARRAY(self.input(0), self.input(1)))
class Cfunctype_Node(NodeBase):
title = 'CFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.CFUNCTYPE(self.input(0)))
class Dllcanunloadnow_Node(NodeBase):
title = 'DllCanUnloadNow'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllCanUnloadNow())
class Dllgetclassobject_Node(NodeBase):
title = 'DllGetClassObject'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='rclsid'),
NodeInputBP(label='riid'),
NodeInputBP(label='ppv'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllGetClassObject(self.input(0), self.input(1), self.input(2)))
class Pyfunctype_Node(NodeBase):
title = 'PYFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.PYFUNCTYPE(self.input(0)))
class Setpointertype_Node(NodeBase):
title = 'SetPointerType'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='pointer'),
NodeInputBP(label='cls'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.SetPointerType(self.input(0), self.input(1)))
class Winfunctype_Node(NodeBase):
title = 'WINFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WINFUNCTYPE(self.input(0)))
class Winerror_Node(NodeBase):
title = 'WinError'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='code', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='descr', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WinError(self.input(0), self.input(1)))
class _Calcsize_Node(NodeBase):
title = '_calcsize'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='format'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._calcsize(self.input(0)))
class _Check_Size_Node(NodeBase):
title = '_check_size'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='typecode', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._check_size(self.input(0), self.input(1)))
class _Reset_Cache_Node(NodeBase):
title = '_reset_cache'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._reset_cache())
class C_Buffer_Node(NodeBase):
title = 'c_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.c_buffer(self.input(0), self.input(1)))
class Cast_Node(NodeBase):
title = 'cast'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='obj'),
NodeInputBP(label='typ'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.cast(self.input(0), self.input(1)))
class Create_String_Buffer_Node(NodeBase):
title = 'create_string_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_string_buffer(self.input(0), self.input(1)))
class Create_Unicode_Buffer_Node(NodeBase):
title = 'create_unicode_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_unicode_buffer(self.input(0), self.input(1)))
class String_At_Node(NodeBase):
title = 'string_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.string_at(self.input(0), self.input(1)))
class Wstring_At_Node(NodeBase):
title = 'wstring_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.wstring_at(self.input(0), self.input(1)))
export_nodes(
Array_Node,
Cfunctype_Node,
Dllcanunloadnow_Node,
Dllgetclassobject_Node,
Pyfunctype_Node,
Setpointertype_Node,
Winfunctype_Node,
Winerror_Node,
_Calcsize_Node,
_Check_Size_Node,
_Reset_Cache_Node,
C_Buffer_Node,
Cast_Node,
Create_String_Buffer_Node,
Create_Unicode_Buffer_Node,
String_At_Node,
Wstring_At_Node,
)
| true | true |
f72a9b400fc3d0e9b4c84e2cd50ded8e71059a28 | 6,080 | py | Python | corehq/apps/smsforms/app.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/smsforms/app.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/smsforms/app.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | import re
import uuid
from xml.etree.cElementTree import XML, tostring
from django.conf import settings
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.touchforms_api import CaseSessionDataHelper
from corehq.apps.formplayer_api.smsforms import sms as tfsms
from corehq.apps.formplayer_api.smsforms.api import (
InvalidSessionIdException,
TouchformsError,
XFormsConfig,
get_raw_instance,
)
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.apps.users.models import CouchUser
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling.util import utcnow
from .models import XFORMS_SESSION_SMS, SQLXFormsSession
COMMCONNECT_DEVICE_ID = "commconnect"
def start_session(session, domain, contact, app, module, form, case_id=None, yield_responses=False,
case_for_case_submission=False):
"""
Starts a session in touchforms and saves the record in the database.
Returns a tuple containing the session object and the (text-only)
list of generated questions/responses based on the form.
Special params:
yield_responses - If True, the list of xforms responses is returned, otherwise the text prompt for each is returned
session_type - XFORMS_SESSION_SMS or XFORMS_SESSION_IVR
case_for_case_submission - True if this is a submission that a case is making to alter another related case. For example, if a parent case is filling out
an SMS survey which will update its child case, this should be True.
"""
# NOTE: this call assumes that "contact" will expose three
# properties: .raw_username, .get_id, and .get_language_code
session_data = CaseSessionDataHelper(domain, contact, case_id, app, form).get_session_data(COMMCONNECT_DEVICE_ID)
# since the API user is a superuser, force touchforms to query only
# the contact's cases by specifying it as an additional filter
if is_commcarecase(contact) and form.requires_case():
session_data["additional_filters"] = {
"case_id": case_id,
"footprint": "true" if form.uses_parent_case() else "false",
}
elif isinstance(contact, CouchUser):
session_data["additional_filters"] = {
"user_id": contact.get_id,
"footprint": "true"
}
kwargs = {}
if is_commcarecase(contact):
kwargs['restore_as_case_id'] = contact.case_id
else:
kwargs['restore_as'] = contact.raw_username
if app and form:
session_data.update(get_cloudcare_session_data(domain, form, contact))
language = contact.get_language_code()
config = XFormsConfig(form_content=form.render_xform().decode('utf-8'),
language=language,
session_data=session_data,
domain=domain,
**kwargs)
session_start_info = tfsms.start_session(config)
session.session_id = session_start_info.session_id
session.save()
responses = session_start_info.first_responses
if len(responses) > 0 and responses[0].status == 'http-error':
session.mark_completed(False)
session.save()
raise TouchformsError('Cannot connect to touchforms.')
# Prevent future update conflicts by getting the session again from the db
# since the session could have been updated separately in the first_responses call
session = SQLXFormsSession.objects.get(pk=session.pk)
if yield_responses:
return (session, responses)
else:
return (session, _responses_to_text(responses))
def get_responses(domain, session_id, text):
"""
Try to process this message like a session-based submission against
an xform.
Returns a list of responses if there are any.
"""
return list(tfsms.next_responses(session_id, text, domain))
def _responses_to_text(responses):
return [r.text_prompt for r in responses if r.text_prompt]
def submit_unfinished_form(session):
"""
Gets the raw instance of the session's form and submits it. This is used with
sms and ivr surveys to save all questions answered so far in a session that
needs to close.
If session.include_case_updates_in_partial_submissions is False, no case
create / update / close actions will be performed, but the form will still be submitted.
The form is only submitted if the smsforms session has not yet completed.
"""
# Get and clean the raw xml
try:
response = get_raw_instance(session.session_id, session.domain)
xml = response['output']
except InvalidSessionIdException:
return
root = XML(xml)
case_tag_regex = re.compile(r"^(\{.*\}){0,1}case$") # Use regex in order to search regardless of namespace
meta_tag_regex = re.compile(r"^(\{.*\}){0,1}meta$")
timeEnd_tag_regex = re.compile(r"^(\{.*\}){0,1}timeEnd$")
current_timstamp = json_format_datetime(utcnow())
for child in root:
if case_tag_regex.match(child.tag) is not None:
# Found the case tag
case_element = child
case_element.set("date_modified", current_timstamp)
if not session.include_case_updates_in_partial_submissions:
# Remove case actions (create, update, close)
child_elements = [case_action for case_action in case_element]
for case_action in child_elements:
case_element.remove(case_action)
elif meta_tag_regex.match(child.tag) is not None:
# Found the meta tag, now set the value for timeEnd
for meta_child in child:
if timeEnd_tag_regex.match(meta_child.tag):
meta_child.text = current_timstamp
cleaned_xml = tostring(root)
# Submit the xml
result = submit_form_locally(cleaned_xml, session.domain, app_id=session.app_id, partial_submission=True)
session.submission_id = result.xform.form_id
| 40.533333 | 157 | 0.702138 | import re
import uuid
from xml.etree.cElementTree import XML, tostring
from django.conf import settings
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.touchforms_api import CaseSessionDataHelper
from corehq.apps.formplayer_api.smsforms import sms as tfsms
from corehq.apps.formplayer_api.smsforms.api import (
InvalidSessionIdException,
TouchformsError,
XFormsConfig,
get_raw_instance,
)
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.apps.users.models import CouchUser
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling.util import utcnow
from .models import XFORMS_SESSION_SMS, SQLXFormsSession
COMMCONNECT_DEVICE_ID = "commconnect"
def start_session(session, domain, contact, app, module, form, case_id=None, yield_responses=False,
case_for_case_submission=False):
session_data = CaseSessionDataHelper(domain, contact, case_id, app, form).get_session_data(COMMCONNECT_DEVICE_ID)
if is_commcarecase(contact) and form.requires_case():
session_data["additional_filters"] = {
"case_id": case_id,
"footprint": "true" if form.uses_parent_case() else "false",
}
elif isinstance(contact, CouchUser):
session_data["additional_filters"] = {
"user_id": contact.get_id,
"footprint": "true"
}
kwargs = {}
if is_commcarecase(contact):
kwargs['restore_as_case_id'] = contact.case_id
else:
kwargs['restore_as'] = contact.raw_username
if app and form:
session_data.update(get_cloudcare_session_data(domain, form, contact))
language = contact.get_language_code()
config = XFormsConfig(form_content=form.render_xform().decode('utf-8'),
language=language,
session_data=session_data,
domain=domain,
**kwargs)
session_start_info = tfsms.start_session(config)
session.session_id = session_start_info.session_id
session.save()
responses = session_start_info.first_responses
if len(responses) > 0 and responses[0].status == 'http-error':
session.mark_completed(False)
session.save()
raise TouchformsError('Cannot connect to touchforms.')
# Prevent future update conflicts by getting the session again from the db
# since the session could have been updated separately in the first_responses call
session = SQLXFormsSession.objects.get(pk=session.pk)
if yield_responses:
return (session, responses)
else:
return (session, _responses_to_text(responses))
def get_responses(domain, session_id, text):
return list(tfsms.next_responses(session_id, text, domain))
def _responses_to_text(responses):
return [r.text_prompt for r in responses if r.text_prompt]
def submit_unfinished_form(session):
# Get and clean the raw xml
try:
response = get_raw_instance(session.session_id, session.domain)
xml = response['output']
except InvalidSessionIdException:
return
root = XML(xml)
case_tag_regex = re.compile(r"^(\{.*\}){0,1}case$") # Use regex in order to search regardless of namespace
meta_tag_regex = re.compile(r"^(\{.*\}){0,1}meta$")
timeEnd_tag_regex = re.compile(r"^(\{.*\}){0,1}timeEnd$")
current_timstamp = json_format_datetime(utcnow())
for child in root:
if case_tag_regex.match(child.tag) is not None:
# Found the case tag
case_element = child
case_element.set("date_modified", current_timstamp)
if not session.include_case_updates_in_partial_submissions:
# Remove case actions (create, update, close)
child_elements = [case_action for case_action in case_element]
for case_action in child_elements:
case_element.remove(case_action)
elif meta_tag_regex.match(child.tag) is not None:
# Found the meta tag, now set the value for timeEnd
for meta_child in child:
if timeEnd_tag_regex.match(meta_child.tag):
meta_child.text = current_timstamp
cleaned_xml = tostring(root)
# Submit the xml
result = submit_form_locally(cleaned_xml, session.domain, app_id=session.app_id, partial_submission=True)
session.submission_id = result.xform.form_id
| true | true |
f72a9b93aae1aefa7bf9852e1961a1a1a0e15237 | 667 | py | Python | src/actuariat_python/data/data_population/__init__.py | Pandinosaurus/actuariat_python | 77533a75fcc63a5a7ebca664a19a24c9439670ee | [
"MIT"
] | 5 | 2017-03-13T15:58:40.000Z | 2021-02-03T12:52:58.000Z | src/actuariat_python/data/data_population/__init__.py | Pandinosaurus/actuariat_python | 77533a75fcc63a5a7ebca664a19a24c9439670ee | [
"MIT"
] | 13 | 2015-06-14T22:01:37.000Z | 2021-01-05T13:57:00.000Z | src/actuariat_python/data/data_population/__init__.py | Pandinosaurus/actuariat_python | 77533a75fcc63a5a7ebca664a19a24c9439670ee | [
"MIT"
] | 9 | 2017-01-15T15:06:55.000Z | 2022-01-18T20:42:48.000Z | # -*- coding: utf-8 -*-
"""
@file
@brief Data from INSEE
**Source**
* ``irsocsd2014_G10.xlsx``: ?
* ``fm-fecondite-age-mere.csv``: `INSEE Bilan Démographique 2016 <https://www.insee.fr/fr/statistiques/1892259?sommaire=1912926>`_
* ``pop-totale-france.xlsx``: `INED Population totale
<https://www.ined.fr/fr/tout-savoir-population/chiffres/france/evolution-population/population-totale/>`_
* ``TF00-02_D.xls``: `spac-actuaires, tables de mortalité <http://www.spac-actuaires.fr/glossaire/Table_de_mortalit%C3%A9>`_
* ``TH00-02_D.xls``: `spac-actuaires, tables de mortalité <http://www.spac-actuaires.fr/glossaire/Table_de_mortalit%C3%A9>`_
""" # pragma: no cover
| 44.466667 | 130 | 0.724138 | true | true | |
f72a9be347a7824928143bcd46fdc086c947e678 | 18,930 | py | Python | orbital_utilities.py | desertfireballnetwork/DFN_darkflight | f41d2a2b82ce96f380f26acfe278c0afa536b9cd | [
"MIT"
] | 1 | 2020-10-19T15:13:09.000Z | 2020-10-19T15:13:09.000Z | orbital_utilities.py | desertfireballnetwork/DFN_darkflight | f41d2a2b82ce96f380f26acfe278c0afa536b9cd | [
"MIT"
] | null | null | null | orbital_utilities.py | desertfireballnetwork/DFN_darkflight | f41d2a2b82ce96f380f26acfe278c0afa536b9cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Functions and objects to deal with meteoroids orbits
"""
__author__ = "Hadrien A.R. Devillepoix, Trent Jansen-Sturgeon "
__copyright__ = "Copyright 2016-2017, Desert Fireball Network"
__license__ = "MIT"
__version__ = "1.0"
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import HCRS, ITRS, GCRS
from astropy.utils.iers import IERS_A, IERS_A_URL, IERS
from astropy.utils.data import download_file
from trajectory_utilities import ECEF2LLH, \
EarthPosition, HCRS2HCI, HCI2ECI_pos, \
OrbitalElements2PosVel, ECI2ECEF_pos
try:
iers_a_file = download_file(IERS_A_URL, cache=True)
iers_a = IERS_A.open(iers_a_file)
IERS.iers_table = iers_a
except:
print('IERS_A_URL is temporarily unavailable')
pass
AU = 1*u.au.to(u.m)
SMA_JUPITER = 5.20336301 * u.au
def tisserand_wrt_jupiter(a, e, i):
'''
Calculate the Tisserrand criterion with respect to Jupiter
'''
T_j = (SMA_JUPITER / a +
2 * np.cos(i) *
np.sqrt(a / SMA_JUPITER * (1 - e**2)))
return T_j
# Conversion vector
AU_Deg2m_Rad = np.vstack((AU, 1, np.pi / 180 * np.ones((4, 1))))
Planets = {'Mercury': np.vstack((0.387099, 0.205636, 7.004979, 29.127030, 48.330766, 252.250324)),
'Venus': np.vstack((0.723336, 0.006777, 3.394676, 54.922625, 76.679843, 181.979100)),
'Earth': np.vstack((1.000003, 0.016711, -0.000015, 102.937682, 0.000000, 100.464572)),
'Mars': np.vstack((1.523710, 0.093394, 1.849691, -73.503169, 49.559539, -4.553432)),
'Jupiter': np.vstack((5.202887, 0.048386, 1.304397, -85.745429, 100.473909, 34.396441)),
'Saturn': np.vstack((9.536676,0.053862,2.485992,-21.063546,113.662424,49.954244)),
'Uranus': np.vstack((19.189165,0.047257,0.772638,96.937351,74.016925,313.238105)),
'Neptune': np.vstack((30.069923,0.008590,1.770043,-86.819463,131.784226,-55.120030))}
class OrbitObject(object):
"""
Solar system object osculating orbit
"""
def __init__(self,
orbit_type,
a, e, i, omega, Omega, theta,
ra_corr=np.nan*u.rad, dec_corr=np.nan*u.rad,
v_g=np.nan*u.m/u.second):
self.semi_major_axis = a.to(u.au)
self.eccentricity = e
self.inclination = i.to(u.deg)
self.argument_periapsis = omega.to(u.deg)
self.longitude_ascending_node = Omega.to(u.deg)
self.longitude_perihelion = (self.longitude_ascending_node + self.argument_periapsis) % (360 * u.deg)
self.true_anomaly = theta.to(u.deg)
self.orbit_type = orbit_type
self.perihelion = (1 - self.eccentricity) * self.semi_major_axis
self.aphelion = (1 + self.eccentricity) * self.semi_major_axis
self.corr_radiant_ra = (ra_corr.to(u.deg)) % (360 * u.deg)
self.corr_radiant_dec = dec_corr.to(u.deg)
radiant = HCRS(ra=self.corr_radiant_ra, dec=self.corr_radiant_dec, distance=1.0*u.au)
ecpliptic_radiant = HCRS2HCI(np.vstack(radiant.cartesian.xyz.value))
self.ecliptic_latitude = np.rad2deg(np.arcsin(ecpliptic_radiant[2] / norm(ecpliptic_radiant)))*u.deg
self.velocity_g = v_g.to(u.m / u.second)
self.T_j = self.tisserand_criterion_wrt_jupiter()
def tisserand_criterion_wrt_jupiter(self):
'''
Calculate the Tisserrand criterion with respect to Jupiter
'''
return tisserand_wrt_jupiter(self.semi_major_axis, self.eccentricity, self.inclination)
def __str__(self):
return str("Semi-major axis: " + str(self.semi_major_axis) + "\n" +
"Eccentricity: " + str(self.eccentricity) + "\n" +
"Inclination: " + str(self.inclination) + "\n" +
"Argument of Periapsis: " + str(self.argument_periapsis) + "\n" +
"Longitude of Ascending Node: " + str(self.longitude_ascending_node) + "\n" +
"True Anomaly: " + str(self.true_anomaly) + "\n\n" +
"Ra_corrected: " + str(self.corr_radiant_ra) + "\n" +
"Dec_corrected: " + str(self.corr_radiant_dec) + "\n" +
"Vel_g: " + str(self.velocity_g))
'''
Function delibaretely outside of native StateVector class to allow multithreaded call
'''
def random_compute_orbit_ceplecha(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='Ceplecha')
return sv
def random_compute_orbit_integration_EOE(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_EOE')
return sv
def random_compute_orbit_integration_posvel(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_posvel')
return sv
def PlotOrbitalElements(COE, t_jd, t_soi, Sol):
Colour = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
i = 2 #FIXME error
plt.figure()
plt.subplot(321)
plt.plot(t_jd, COE[0] / AU, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Semi-major Axis (AU)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(322)
plt.plot(t_jd, COE[1], Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Eccentricity")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(323)
plt.plot(t_jd, COE[2] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Inclination (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(324)
plt.plot(t_jd, COE[3] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Argument of Periapsis (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(325)
plt.plot(t_jd, COE[4] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Longitude of the Ascending Node (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(326)
plt.plot(t_jd, COE[5] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("True Anomaly (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
if Sol != 'NoSol':
plt.subplot(321)
plt.axhline(Sol.semi_major_axis.value, color='g')
plt.subplot(322)
plt.axhline(Sol.eccentricity, color='g')
plt.subplot(323)
plt.axhline(Sol.inclination.value, color='g')
plt.subplot(324)
plt.axhline(Sol.argument_periapsis.value, color='g')
plt.subplot(325)
plt.axhline(Sol.longitude_ascending_node.value, color='g')
plt.subplot(326)
plt.axhline(Sol.true_anomaly.value, color='g')
plt.show()
def PlotOrbit3D(OrbObjList, t0=2457535.0, Sol='NoSol'):
from mpl_toolkits.mplot3d import Axes3D
''' 3D Orbit Plot'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for OrbObj in OrbObjList:
COE = np.vstack((OrbObj.semi_major_axis.value,
OrbObj.eccentricity,
OrbObj.inclination.value,
OrbObj.argument_periapsis.value,
OrbObj.longitude_ascending_node.value,
OrbObj.true_anomaly.value)) * AU_Deg2m_Rad
COE = COE + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[Pos_HCI, Vel_HCI] = OrbitalElements2PosVel(COE, 'Sun', 'Classical')
ax.plot(Pos_HCI[0]/AU, Pos_HCI[1]/AU, Pos_HCI[2]/AU, color='r', label='Determined Orbit')
''' Plot the planets'''
for Planet in Planets:
COE = Planets[Planet] * AU_Deg2m_Rad
COEs = COE + np.vstack((np.zeros((5, 200)), np.linspace(0, 2 * np.pi, 200)))
[pos, vel] = OrbitalElements2PosVel(COEs, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='b')
# t_yr = t0 + np.linspace(0, 365.25, 100)
# pos_earth = EarthPosition(t_yr)
# ax.plot(pos_earth[0]/AU, pos_earth[1]/AU, pos_earth[2]/AU,
# color='b', linewidth=2.0, label='Earth')
''' Plot the solution (if given) '''
if Sol != 'NoSol':
Sol_oe = np.vstack((Sol.semi_major_axis.value,
Sol.eccentricity,
Sol.inclination.value,
Sol.argument_periapsis.value,
Sol.longitude_ascending_node.value,
Sol.true_anomaly.value)) * AU_Deg2m_Rad
Sol_oe = Sol_oe + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[pos, vel] = OrbitalElements2PosVel(Sol_oe, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='g', label='Published Orbit')
plt.legend()
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_zlim([-5, 5])
plt.show()
def PlotPerts(Pert):
PPert = np.vstack(Pert).T; t = PPert[0]
plt.figure(figsize=(16,9))
t_rel = t - np.max(t) # Days
plt.plot(t_rel, PPert[1], '-b', linewidth=3.0, label='Earth')
plt.plot(t_rel, PPert[2], '--k', linewidth=3.0, label='Moon')
plt.plot(t_rel, PPert[3], '-.r', linewidth=3.0, label='Sun')
PertJ2 = PPert[4][~np.isnan(PPert[4])]
plt.plot(t_rel[~np.isnan(PPert[4])], PertJ2, ':g', linewidth=3.0, label='J2')
PertDrag = PPert[5][~np.isnan(PPert[5])]
plt.plot(t_rel[~np.isnan(PPert[5])], PertDrag, '-.c', linewidth=3.0, label='Drag')
plt.yscale('log'); plt.grid(True); plt.legend(loc='best')
plt.xlabel('Relative Time [days]'); plt.ylabel('Perturbation Acceleration [m/s^2]')
plt.show()
def PlotIntStep(t):
dt=[]
for k in range(len(t)-1):
dt.append((t[k+1] - t[k]) * 24*60*60)
plt.figure(figsize=(16,9))
t_rel = t - np.max(t) # Days
plt.plot(t_rel[1:], abs(np.array(dt)))
plt.yscale('log'); plt.grid(True)#; plt.legend()
plt.xlabel('Relative Time [days]'); plt.ylabel('Timestep [sec]')
plt.show()
def ThirdBodyPerturbation(Pos, rho, mu):
'''
Pos is the position of the meteoroid (m)
rho is the position of the third body (m)
mu is the standard gravitational parameter of the third body (m3/s2)
'''
# Battin's scalar formula for vector difference
q = np.dot(Pos.T, (Pos - 2 * rho) / (np.dot(rho.T, rho)))
f = (3 * q + 3 * q**2 + q**3) / (1 + (1 + q)**1.5)
# Third body perturbation acceleration (with indirect term)
u = -mu * (Pos + f * rho) / ((norm(Pos - rho))**3)
return u
def NRLMSISE_00(pos, time, pos_type='eci'):
''' Courtesy of Ellie Sansom '''
"""
Inputs: inertial position and time
Outputs: [altitude, temp, atm_pres, atm density, sos, dyn_vis]
"""
from nrlmsise_00_header import nrlmsise_input, nrlmsise_output, nrlmsise_flags
from nrlmsise_00 import gtd7
time = Time(time, format='jd', scale='utc')
# Convert ECI to LLH coordinates
if pos_type == 'eci':
Pos_LLH = ECEF2LLH(ECI2ECEF_pos(pos, time))
elif pos_type == 'ecef':
Pos_LLH = ECEF2LLH(pos)
elif pos_type == 'llh':
Pos_LLH = pos
else:
print('NRLMSISE_00 error: Invalid pos_type')
exit()
g_lat = np.rad2deg(Pos_LLH[0][0])
g_long = np.rad2deg(Pos_LLH[1][0])
alt = Pos_LLH[2][0]
# Break up time into year, day of year, and seconds of the day
yDay = time.yday.split(':'); yr = float(yDay[0]); doy = float(yDay[1])
sec = float(yDay[2]) * 60*60 + float(yDay[3]) * 60 + float(yDay[4])
# Assign our variables into the nrmsise inputs
Input = nrlmsise_input(yr, doy, sec, alt/1000, g_lat, g_long)
Output = nrlmsise_output(); Flags = nrlmsise_flags()
# Switches
for i in range(1, 24):
Flags.switches[i]=1
# GTD7 atmospheric model subroutine
gtd7(Input, Flags, Output)
# Temperature at alt [deg K]
T = Output.t[1]
# Molecular number densities [m-3]
He = Output.d[0] # He
O = Output.d[1] # O
N2 = Output.d[2] # N2
O2 = Output.d[3] # O2
Ar = Output.d[4] # Ar
H = Output.d[6] # H
N = Output.d[7] # N
# ano_O = Output.d[8] # Anomalous oxygen
sum_mass = He + O + N2 + O2 + Ar + H + N
# Molar mass
He_mass = 4.0026 # g/mol
O_mass = 15.9994 # g/mol
N2_mass = 28.013 # g/mol
O2_mass = 31.998 # g/mol
Ar_mass = 39.948 # g/mol
H_mass = 1.0079 # g/mol
N_mass = 14.0067 # g/mol
# Molecular weight of air [kg/mol]
mol_mass_air = (He_mass * He + O_mass * O + N2_mass * N2 + O2_mass * O2
+ Ar_mass * Ar + H_mass * H + N_mass * N) / (1000 * sum_mass)
# Total mass density [kg*m-3]
po = Output.d[5] * 1000
Ru = 8.3144621 # Universal gas constant [J/(K*mol)]
R = Ru / mol_mass_air # Individual gas constant [J/(kg*K)] #287.058
# Ideal gas law
atm_pres = po * T * R
# Speed of sound in atm
sos = 331.3 * np.sqrt(1 + T / 273.15)
# Dynamic viscosity (http://en.wikipedia.org/wiki/Viscosity)
C = 120 #Sutherland's constant for air [deg K]
mu_ref = 18.27e-6 # Reference viscosity [[mu_Pa s] * e-6]
T_ref = 291.15 # Reference temperature [deg K]
dyn_vis = mu_ref * (T_ref + C) / (T + C) * (T / T_ref)**1.5
return T, atm_pres, po, sos, dyn_vis
# def compute_infinity_radiant(stateVec):
# ''' This method computing the apparent radiant, it doesn't consider the zenith attraction '''
# Pos_geo = stateVec.position
# Vel_geo = stateVec.vel_xyz
# t0 = stateVec.epoch
# # Compute radiant (apparent ORIGIN of meteoroid)
# Vel_eci = ECEF2ECI(Pos_geo, Vel_geo, t0)[1]
# ra_eci = np.arctan2(-Vel_eci[1], -Vel_eci[0])
# dec_eci = np.arcsin(-Vel_eci[2] / norm(Vel_eci))
# # ^-- redundant information. Already have it in metadata
# return ra_eci, dec_eci
def compute_cartesian_velocities_from_radiant(stateVec):
'''
Turn apparent ecef radiant and velocity into cartesian velocity component
'''
vel_geo = -(stateVec.velocity_inf *
np.vstack((np.cos(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.dec_ecef_inf)))))
return vel_geo
def SimilarityCriterion(COE1, COE2, method='SH'):
'''
Southworth & Hawkins similarity criterion (1963); or
Drummond's similarity criterion (1981); or
Jopek's similarity criterion (1993).
'''
if type(COE1) == np.ndarray:
a1 = COE1[0]/AU; a2 = COE2[0]/AU # [AU]
e1 = COE1[1]; e2 = COE2[1] # []
i1 = COE1[2]; i2 = COE2[2] # [rad]
w1 = COE1[3]; w2 = COE2[3] # [rad]
W1 = COE1[4]; W2 = COE2[4] # [rad]
else:
a1 = COE1.semi_major_axis.value; a2 = COE2.semi_major_axis.value # [AU]
e1 = COE1.eccentricity; e2 = COE2.eccentricity # []
i1 = COE1.inclination.to(u.rad).value; i2 = COE2.inclination.to(u.rad).value # [rad]
w1 = COE1.argument_periapsis.to(u.rad).value; w2 = COE2.argument_periapsis.to(u.rad).value # [rad]
W1 = COE1.longitude_ascending_node.to(u.rad).value; W2 = COE2.longitude_ascending_node.to(u.rad).value # [rad]
q1 = a1 * (1 - e1) # [AU]
q2 = a2 * (1 - e2) # [AU]
# Angle between the orbital planes (I21)
var = (2 * np.sin((i2 - i1) / 2))**2 + np.sin(i1) * np.sin(i2) * (2 * np.sin((W2 - W1) / 2))**2
I21 = 2 * np.arcsin(np.sqrt(var) / 2)
if method == 'SH':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + (q2 - q1)**2 + var + (((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
elif method == 'D':
# Angle between the orbital lines of apsides (theta21)
# l1 = W1 + np.arcsin(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
# l2 = W2 + np.arcsin(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
l1 = W1 + np.arctan(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
l2 = W2 + np.arctan(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
theta21 = np.arccos(np.sin(b1) * np.sin(b2) + np.cos(b1) * np.cos(b2) * np.cos(l2 - l1))
Similarity2 = ((e2 - e1) / (e2 + e1))**2 + ((q2 - q1) / (q2 + q1))**2 + \
(I21 / np.pi)**2 + ((e2 + e1) / 2)**2 * (theta21 / np.pi)**2
Similarity = np.sqrt(Similarity2)
elif method == 'H':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + ((q2 - q1) / (q2 + q1))**2 + var + \
(((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
return Similarity
def generate_ephemeris(pos_hci, t_jd):
# Save the datetime
ephem_dict = {'datetime': Time(t_jd, format='jd', scale='utc').isot}
ephem_dict['MJD'] = Time(t_jd, format='jd', scale='utc').mjd
# distance to sun
ephem_dict['distance_to_sun'] = norm(pos_hci, axis=0) / 1000 #km
# Convert to eci coordinates
pos_eci = HCI2ECI_pos(pos_hci, t_jd)
ephem_dict['pos_eci_x'] = pos_eci[0]
ephem_dict['pos_eci_y'] = pos_eci[1]
ephem_dict['pos_eci_z'] = pos_eci[2]
pos_hcrs = HCI2HCRS(pos_hci)
# Calculate phase angle
ephem_dict['phase_angle'] = np.rad2deg(np.arccos(np.sum(pos_hcrs * pos_eci, axis=0)
/ (norm(pos_hcrs, axis=0) * norm(pos_eci, axis=0))))
# Calculate elongation angle
pos_sun = pos_eci - pos_hcrs
ephem_dict['elongation_angle'] = np.rad2deg(np.arccos(np.sum(pos_sun * pos_eci, axis=0)
/ (norm(pos_sun, axis=0) * norm(pos_eci, axis=0))))
# Calculate ephemeris
dist = norm(pos_eci, axis=0) #m
ephem_dict['ra'] = np.rad2deg(np.arctan2(pos_eci[1], pos_eci[0]))%360 #deg
ephem_dict['dec'] = np.rad2deg(np.arcsin(pos_eci[2] / dist)) #deg
ephem_dict['distance_to_earth'] = norm(pos_eci, axis=0) / 1000 #km
return ephem_dict
| 37.485149 | 118 | 0.592974 |
__author__ = "Hadrien A.R. Devillepoix, Trent Jansen-Sturgeon "
__copyright__ = "Copyright 2016-2017, Desert Fireball Network"
__license__ = "MIT"
__version__ = "1.0"
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import HCRS, ITRS, GCRS
from astropy.utils.iers import IERS_A, IERS_A_URL, IERS
from astropy.utils.data import download_file
from trajectory_utilities import ECEF2LLH, \
EarthPosition, HCRS2HCI, HCI2ECI_pos, \
OrbitalElements2PosVel, ECI2ECEF_pos
try:
iers_a_file = download_file(IERS_A_URL, cache=True)
iers_a = IERS_A.open(iers_a_file)
IERS.iers_table = iers_a
except:
print('IERS_A_URL is temporarily unavailable')
pass
AU = 1*u.au.to(u.m)
SMA_JUPITER = 5.20336301 * u.au
def tisserand_wrt_jupiter(a, e, i):
T_j = (SMA_JUPITER / a +
2 * np.cos(i) *
np.sqrt(a / SMA_JUPITER * (1 - e**2)))
return T_j
AU_Deg2m_Rad = np.vstack((AU, 1, np.pi / 180 * np.ones((4, 1))))
Planets = {'Mercury': np.vstack((0.387099, 0.205636, 7.004979, 29.127030, 48.330766, 252.250324)),
'Venus': np.vstack((0.723336, 0.006777, 3.394676, 54.922625, 76.679843, 181.979100)),
'Earth': np.vstack((1.000003, 0.016711, -0.000015, 102.937682, 0.000000, 100.464572)),
'Mars': np.vstack((1.523710, 0.093394, 1.849691, -73.503169, 49.559539, -4.553432)),
'Jupiter': np.vstack((5.202887, 0.048386, 1.304397, -85.745429, 100.473909, 34.396441)),
'Saturn': np.vstack((9.536676,0.053862,2.485992,-21.063546,113.662424,49.954244)),
'Uranus': np.vstack((19.189165,0.047257,0.772638,96.937351,74.016925,313.238105)),
'Neptune': np.vstack((30.069923,0.008590,1.770043,-86.819463,131.784226,-55.120030))}
class OrbitObject(object):
def __init__(self,
orbit_type,
a, e, i, omega, Omega, theta,
ra_corr=np.nan*u.rad, dec_corr=np.nan*u.rad,
v_g=np.nan*u.m/u.second):
self.semi_major_axis = a.to(u.au)
self.eccentricity = e
self.inclination = i.to(u.deg)
self.argument_periapsis = omega.to(u.deg)
self.longitude_ascending_node = Omega.to(u.deg)
self.longitude_perihelion = (self.longitude_ascending_node + self.argument_periapsis) % (360 * u.deg)
self.true_anomaly = theta.to(u.deg)
self.orbit_type = orbit_type
self.perihelion = (1 - self.eccentricity) * self.semi_major_axis
self.aphelion = (1 + self.eccentricity) * self.semi_major_axis
self.corr_radiant_ra = (ra_corr.to(u.deg)) % (360 * u.deg)
self.corr_radiant_dec = dec_corr.to(u.deg)
radiant = HCRS(ra=self.corr_radiant_ra, dec=self.corr_radiant_dec, distance=1.0*u.au)
ecpliptic_radiant = HCRS2HCI(np.vstack(radiant.cartesian.xyz.value))
self.ecliptic_latitude = np.rad2deg(np.arcsin(ecpliptic_radiant[2] / norm(ecpliptic_radiant)))*u.deg
self.velocity_g = v_g.to(u.m / u.second)
self.T_j = self.tisserand_criterion_wrt_jupiter()
def tisserand_criterion_wrt_jupiter(self):
return tisserand_wrt_jupiter(self.semi_major_axis, self.eccentricity, self.inclination)
def __str__(self):
return str("Semi-major axis: " + str(self.semi_major_axis) + "\n" +
"Eccentricity: " + str(self.eccentricity) + "\n" +
"Inclination: " + str(self.inclination) + "\n" +
"Argument of Periapsis: " + str(self.argument_periapsis) + "\n" +
"Longitude of Ascending Node: " + str(self.longitude_ascending_node) + "\n" +
"True Anomaly: " + str(self.true_anomaly) + "\n\n" +
"Ra_corrected: " + str(self.corr_radiant_ra) + "\n" +
"Dec_corrected: " + str(self.corr_radiant_dec) + "\n" +
"Vel_g: " + str(self.velocity_g))
def random_compute_orbit_ceplecha(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='Ceplecha')
return sv
def random_compute_orbit_integration_EOE(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_EOE')
return sv
def random_compute_orbit_integration_posvel(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_posvel')
return sv
def PlotOrbitalElements(COE, t_jd, t_soi, Sol):
Colour = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
i = 2
plt.figure()
plt.subplot(321)
plt.plot(t_jd, COE[0] / AU, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Semi-major Axis (AU)")
plt.subplot(322)
plt.plot(t_jd, COE[1], Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Eccentricity")
plt.subplot(323)
plt.plot(t_jd, COE[2] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Inclination (deg)")
plt.subplot(324)
plt.plot(t_jd, COE[3] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Argument of Periapsis (deg)")
plt.subplot(325)
plt.plot(t_jd, COE[4] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Longitude of the Ascending Node (deg)")
plt.subplot(326)
plt.plot(t_jd, COE[5] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("True Anomaly (deg)")
if Sol != 'NoSol':
plt.subplot(321)
plt.axhline(Sol.semi_major_axis.value, color='g')
plt.subplot(322)
plt.axhline(Sol.eccentricity, color='g')
plt.subplot(323)
plt.axhline(Sol.inclination.value, color='g')
plt.subplot(324)
plt.axhline(Sol.argument_periapsis.value, color='g')
plt.subplot(325)
plt.axhline(Sol.longitude_ascending_node.value, color='g')
plt.subplot(326)
plt.axhline(Sol.true_anomaly.value, color='g')
plt.show()
def PlotOrbit3D(OrbObjList, t0=2457535.0, Sol='NoSol'):
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for OrbObj in OrbObjList:
COE = np.vstack((OrbObj.semi_major_axis.value,
OrbObj.eccentricity,
OrbObj.inclination.value,
OrbObj.argument_periapsis.value,
OrbObj.longitude_ascending_node.value,
OrbObj.true_anomaly.value)) * AU_Deg2m_Rad
COE = COE + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[Pos_HCI, Vel_HCI] = OrbitalElements2PosVel(COE, 'Sun', 'Classical')
ax.plot(Pos_HCI[0]/AU, Pos_HCI[1]/AU, Pos_HCI[2]/AU, color='r', label='Determined Orbit')
for Planet in Planets:
COE = Planets[Planet] * AU_Deg2m_Rad
COEs = COE + np.vstack((np.zeros((5, 200)), np.linspace(0, 2 * np.pi, 200)))
[pos, vel] = OrbitalElements2PosVel(COEs, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='b')
if Sol != 'NoSol':
Sol_oe = np.vstack((Sol.semi_major_axis.value,
Sol.eccentricity,
Sol.inclination.value,
Sol.argument_periapsis.value,
Sol.longitude_ascending_node.value,
Sol.true_anomaly.value)) * AU_Deg2m_Rad
Sol_oe = Sol_oe + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[pos, vel] = OrbitalElements2PosVel(Sol_oe, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='g', label='Published Orbit')
plt.legend()
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_zlim([-5, 5])
plt.show()
def PlotPerts(Pert):
PPert = np.vstack(Pert).T; t = PPert[0]
plt.figure(figsize=(16,9))
t_rel = t - np.max(t)
plt.plot(t_rel, PPert[1], '-b', linewidth=3.0, label='Earth')
plt.plot(t_rel, PPert[2], '--k', linewidth=3.0, label='Moon')
plt.plot(t_rel, PPert[3], '-.r', linewidth=3.0, label='Sun')
PertJ2 = PPert[4][~np.isnan(PPert[4])]
plt.plot(t_rel[~np.isnan(PPert[4])], PertJ2, ':g', linewidth=3.0, label='J2')
PertDrag = PPert[5][~np.isnan(PPert[5])]
plt.plot(t_rel[~np.isnan(PPert[5])], PertDrag, '-.c', linewidth=3.0, label='Drag')
plt.yscale('log'); plt.grid(True); plt.legend(loc='best')
plt.xlabel('Relative Time [days]'); plt.ylabel('Perturbation Acceleration [m/s^2]')
plt.show()
def PlotIntStep(t):
dt=[]
for k in range(len(t)-1):
dt.append((t[k+1] - t[k]) * 24*60*60)
plt.figure(figsize=(16,9))
t_rel = t - np.max(t)
plt.plot(t_rel[1:], abs(np.array(dt)))
plt.yscale('log'); plt.grid(True)
plt.xlabel('Relative Time [days]'); plt.ylabel('Timestep [sec]')
plt.show()
def ThirdBodyPerturbation(Pos, rho, mu):
q = np.dot(Pos.T, (Pos - 2 * rho) / (np.dot(rho.T, rho)))
f = (3 * q + 3 * q**2 + q**3) / (1 + (1 + q)**1.5)
# Third body perturbation acceleration (with indirect term)
u = -mu * (Pos + f * rho) / ((norm(Pos - rho))**3)
return u
def NRLMSISE_00(pos, time, pos_type='eci'):
from nrlmsise_00_header import nrlmsise_input, nrlmsise_output, nrlmsise_flags
from nrlmsise_00 import gtd7
time = Time(time, format='jd', scale='utc')
# Convert ECI to LLH coordinates
if pos_type == 'eci':
Pos_LLH = ECEF2LLH(ECI2ECEF_pos(pos, time))
elif pos_type == 'ecef':
Pos_LLH = ECEF2LLH(pos)
elif pos_type == 'llh':
Pos_LLH = pos
else:
print('NRLMSISE_00 error: Invalid pos_type')
exit()
g_lat = np.rad2deg(Pos_LLH[0][0])
g_long = np.rad2deg(Pos_LLH[1][0])
alt = Pos_LLH[2][0]
# Break up time into year, day of year, and seconds of the day
yDay = time.yday.split(':'); yr = float(yDay[0]); doy = float(yDay[1])
sec = float(yDay[2]) * 60*60 + float(yDay[3]) * 60 + float(yDay[4])
# Assign our variables into the nrmsise inputs
Input = nrlmsise_input(yr, doy, sec, alt/1000, g_lat, g_long)
Output = nrlmsise_output(); Flags = nrlmsise_flags()
# Switches
for i in range(1, 24):
Flags.switches[i]=1
# GTD7 atmospheric model subroutine
gtd7(Input, Flags, Output)
# Temperature at alt [deg K]
T = Output.t[1]
# Molecular number densities [m-3]
He = Output.d[0] # He
O = Output.d[1] # O
N2 = Output.d[2] # N2
O2 = Output.d[3] # O2
Ar = Output.d[4] # Ar
H = Output.d[6] # H
N = Output.d[7] # N
# ano_O = Output.d[8] # Anomalous oxygen
sum_mass = He + O + N2 + O2 + Ar + H + N
# Molar mass
He_mass = 4.0026 # g/mol
O_mass = 15.9994 # g/mol
N2_mass = 28.013 # g/mol
O2_mass = 31.998 # g/mol
Ar_mass = 39.948 # g/mol
H_mass = 1.0079 # g/mol
N_mass = 14.0067 # g/mol
# Molecular weight of air [kg/mol]
mol_mass_air = (He_mass * He + O_mass * O + N2_mass * N2 + O2_mass * O2
+ Ar_mass * Ar + H_mass * H + N_mass * N) / (1000 * sum_mass)
# Total mass density [kg*m-3]
po = Output.d[5] * 1000
Ru = 8.3144621 # Universal gas constant [J/(K*mol)]
R = Ru / mol_mass_air # Individual gas constant [J/(kg*K)] #287.058
# Ideal gas law
atm_pres = po * T * R
# Speed of sound in atm
sos = 331.3 * np.sqrt(1 + T / 273.15)
# Dynamic viscosity (http://en.wikipedia.org/wiki/Viscosity)
C = 120 #Sutherland's constant for air [deg K]
mu_ref = 18.27e-6
T_ref = 291.15
dyn_vis = mu_ref * (T_ref + C) / (T + C) * (T / T_ref)**1.5
return T, atm_pres, po, sos, dyn_vis
# Pos_geo = stateVec.position
# Vel_geo = stateVec.vel_xyz
# t0 = stateVec.epoch
# # Compute radiant (apparent ORIGIN of meteoroid)
# Vel_eci = ECEF2ECI(Pos_geo, Vel_geo, t0)[1]
# ra_eci = np.arctan2(-Vel_eci[1], -Vel_eci[0])
# dec_eci = np.arcsin(-Vel_eci[2] / norm(Vel_eci))
# # ^-- redundant information. Already have it in metadata
# return ra_eci, dec_eci
def compute_cartesian_velocities_from_radiant(stateVec):
vel_geo = -(stateVec.velocity_inf *
np.vstack((np.cos(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.dec_ecef_inf)))))
return vel_geo
def SimilarityCriterion(COE1, COE2, method='SH'):
if type(COE1) == np.ndarray:
a1 = COE1[0]/AU; a2 = COE2[0]/AU # [AU]
e1 = COE1[1]; e2 = COE2[1] # []
i1 = COE1[2]; i2 = COE2[2] # [rad]
w1 = COE1[3]; w2 = COE2[3] # [rad]
W1 = COE1[4]; W2 = COE2[4] # [rad]
else:
a1 = COE1.semi_major_axis.value; a2 = COE2.semi_major_axis.value # [AU]
e1 = COE1.eccentricity; e2 = COE2.eccentricity # []
i1 = COE1.inclination.to(u.rad).value; i2 = COE2.inclination.to(u.rad).value # [rad]
w1 = COE1.argument_periapsis.to(u.rad).value; w2 = COE2.argument_periapsis.to(u.rad).value # [rad]
W1 = COE1.longitude_ascending_node.to(u.rad).value; W2 = COE2.longitude_ascending_node.to(u.rad).value # [rad]
q1 = a1 * (1 - e1) # [AU]
q2 = a2 * (1 - e2) # [AU]
# Angle between the orbital planes (I21)
var = (2 * np.sin((i2 - i1) / 2))**2 + np.sin(i1) * np.sin(i2) * (2 * np.sin((W2 - W1) / 2))**2
I21 = 2 * np.arcsin(np.sqrt(var) / 2)
if method == 'SH':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + (q2 - q1)**2 + var + (((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
elif method == 'D':
# Angle between the orbital lines of apsides (theta21)
# l1 = W1 + np.arcsin(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
# l2 = W2 + np.arcsin(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
l1 = W1 + np.arctan(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
l2 = W2 + np.arctan(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
theta21 = np.arccos(np.sin(b1) * np.sin(b2) + np.cos(b1) * np.cos(b2) * np.cos(l2 - l1))
Similarity2 = ((e2 - e1) / (e2 + e1))**2 + ((q2 - q1) / (q2 + q1))**2 + \
(I21 / np.pi)**2 + ((e2 + e1) / 2)**2 * (theta21 / np.pi)**2
Similarity = np.sqrt(Similarity2)
elif method == 'H':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + ((q2 - q1) / (q2 + q1))**2 + var + \
(((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
return Similarity
def generate_ephemeris(pos_hci, t_jd):
# Save the datetime
ephem_dict = {'datetime': Time(t_jd, format='jd', scale='utc').isot}
ephem_dict['MJD'] = Time(t_jd, format='jd', scale='utc').mjd
# distance to sun
ephem_dict['distance_to_sun'] = norm(pos_hci, axis=0) / 1000 #km
# Convert to eci coordinates
pos_eci = HCI2ECI_pos(pos_hci, t_jd)
ephem_dict['pos_eci_x'] = pos_eci[0]
ephem_dict['pos_eci_y'] = pos_eci[1]
ephem_dict['pos_eci_z'] = pos_eci[2]
pos_hcrs = HCI2HCRS(pos_hci)
# Calculate phase angle
ephem_dict['phase_angle'] = np.rad2deg(np.arccos(np.sum(pos_hcrs * pos_eci, axis=0)
/ (norm(pos_hcrs, axis=0) * norm(pos_eci, axis=0))))
# Calculate elongation angle
pos_sun = pos_eci - pos_hcrs
ephem_dict['elongation_angle'] = np.rad2deg(np.arccos(np.sum(pos_sun * pos_eci, axis=0)
/ (norm(pos_sun, axis=0) * norm(pos_eci, axis=0))))
# Calculate ephemeris
dist = norm(pos_eci, axis=0) #m
ephem_dict['ra'] = np.rad2deg(np.arctan2(pos_eci[1], pos_eci[0]))%360 #deg
ephem_dict['dec'] = np.rad2deg(np.arcsin(pos_eci[2] / dist)) #deg
ephem_dict['distance_to_earth'] = norm(pos_eci, axis=0) / 1000 #km
return ephem_dict
| true | true |
f72a9c84ade667bf4db98b90f5ec6bc9cc38d9af | 5,205 | py | Python | tests/integration/modules/hosts.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | 1 | 2020-09-06T16:03:14.000Z | 2020-09-06T16:03:14.000Z | tests/integration/modules/hosts.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | tests/integration/modules/hosts.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | '''
Test the hosts module
'''
# Import python libs
import os
import shutil
# Import Salt libs
import integration
HFN = os.path.join(integration.TMP, 'hosts')
class HostsModuleTest(integration.ModuleCase):
'''
Test the hosts module
'''
def __clean_hosts(self):
'''
Clean out the hosts file
'''
shutil.copyfile(os.path.join(integration.FILES, 'hosts'), HFN)
def __clear_hosts(self):
'''
Delete the tmp hosts file
'''
if os.path.isfile(HFN):
os.remove(HFN)
def tearDown(self):
'''
Make sure the tmp hosts file is gone
'''
self.__clear_hosts()
def test_list_hosts(self):
'''
hosts.list_hosts
'''
self.__clean_hosts()
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(len(hosts), 6)
self.assertEqual(hosts['::1'], ['ip6-localhost', 'ip6-loopback'])
self.assertEqual(hosts['127.0.0.1'], ['localhost', 'myname'])
def test_list_hosts_nofile(self):
'''
hosts.list_hosts
without a hosts file
'''
if os.path.isfile(HFN):
os.remove(HFN)
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(hosts, {})
def test_get_ip(self):
'''
hosts.get_ip
'''
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['myname']), '127.0.0.1')
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
def test_get_alias(self):
'''
hosts.get_alias
'''
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), ['localhost', 'myname'])
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.2']), [])
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), [])
def test_has_pair(self):
'''
hosts.has_pair
'''
self.__clean_hosts()
self.assertTrue(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']))
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'othername']))
def test_set_host(self):
'''
hosts.set_hosts
'''
self.__clean_hosts()
assert self.run_function('hosts.set_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.set_host', ['127.0.0.1', 'localhost'])
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']), 'should remove second entry')
def test_add_host(self):
'''
hosts.add_host
'''
self.__clean_hosts()
assert self.run_function('hosts.add_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.add_host', ['127.0.0.1', 'othernameip'])
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
def test_rm_host(self):
self.__clean_hosts()
assert self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'myname'])
assert not self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'unknown'])
def test_add_host_formatting(self):
'''
Ensure that hosts.add_host isn't adding duplicates and that
it's formatting the output correctly
'''
# instead of using the "clean" hosts file we're going to
# use an empty one so we can prove the syntax of the entries
# being added by the hosts module
self.__clear_hosts()
f = open(HFN, 'w')
f.close()
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'oldhost2'])
assert self.run_function('hosts.add_host', ['192.168.1.3', 'host3.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2-reorder'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1-reorder'])
# now read the lines and ensure they're formatted correctly
lines = open(HFN, 'r').readlines()
self.assertEqual(lines, [
"192.168.1.3\t\thost3.fqdn.com\n",
"192.168.1.2\t\thost2.fqdn.com\thost2\toldhost2\thost2-reorder\n",
"192.168.1.1\t\thost1.fqdn.com\thost1\thost1-reorder\n",
])
| 36.914894 | 116 | 0.599039 |
import os
import shutil
import integration
HFN = os.path.join(integration.TMP, 'hosts')
class HostsModuleTest(integration.ModuleCase):
def __clean_hosts(self):
shutil.copyfile(os.path.join(integration.FILES, 'hosts'), HFN)
def __clear_hosts(self):
if os.path.isfile(HFN):
os.remove(HFN)
def tearDown(self):
self.__clear_hosts()
def test_list_hosts(self):
self.__clean_hosts()
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(len(hosts), 6)
self.assertEqual(hosts['::1'], ['ip6-localhost', 'ip6-loopback'])
self.assertEqual(hosts['127.0.0.1'], ['localhost', 'myname'])
def test_list_hosts_nofile(self):
if os.path.isfile(HFN):
os.remove(HFN)
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(hosts, {})
def test_get_ip(self):
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['myname']), '127.0.0.1')
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
def test_get_alias(self):
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), ['localhost', 'myname'])
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.2']), [])
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), [])
def test_has_pair(self):
self.__clean_hosts()
self.assertTrue(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']))
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'othername']))
def test_set_host(self):
self.__clean_hosts()
assert self.run_function('hosts.set_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.set_host', ['127.0.0.1', 'localhost'])
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']), 'should remove second entry')
def test_add_host(self):
self.__clean_hosts()
assert self.run_function('hosts.add_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.add_host', ['127.0.0.1', 'othernameip'])
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
def test_rm_host(self):
self.__clean_hosts()
assert self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'myname'])
assert not self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'unknown'])
def test_add_host_formatting(self):
# use an empty one so we can prove the syntax of the entries
# being added by the hosts module
self.__clear_hosts()
f = open(HFN, 'w')
f.close()
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'oldhost2'])
assert self.run_function('hosts.add_host', ['192.168.1.3', 'host3.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2-reorder'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1-reorder'])
# now read the lines and ensure they're formatted correctly
lines = open(HFN, 'r').readlines()
self.assertEqual(lines, [
"192.168.1.3\t\thost3.fqdn.com\n",
"192.168.1.2\t\thost2.fqdn.com\thost2\toldhost2\thost2-reorder\n",
"192.168.1.1\t\thost1.fqdn.com\thost1\thost1-reorder\n",
])
| true | true |
f72a9c92bcaf6e4caffd7e1804851dab456f389c | 5,440 | py | Python | vi_engine_s.py | idigitopia/Distributed-VI | 323be8c50862d8dff9cae68313c518080a9df72e | [
"MIT"
] | 6 | 2019-08-18T17:04:36.000Z | 2022-03-26T08:31:22.000Z | vi_engine_s.py | idigitopia/Distributed-VI | 323be8c50862d8dff9cae68313c518080a9df72e | [
"MIT"
] | null | null | null | vi_engine_s.py | idigitopia/Distributed-VI | 323be8c50862d8dff9cae68313c518080a9df72e | [
"MIT"
] | null | null | null | import numpy as np
import ray
ray.shutdown()
ray.init()
# A : Action Space
# S : State Space
@ray.remote
class VI_worker(object):
def __init__(self, list_of_actions, tran_dict, reward_dict, beta, backup_states, true_action_prob=0.8,
unknown_value=0):
self.backup_states = backup_states
self.list_of_actions = list_of_actions
self.tran_dict = tran_dict
self.reward_dict = reward_dict
self.beta = beta
self.unknown_value = unknown_value # Default Value for any states that do not have transitions defined.
self.true_action_prob = true_action_prob
self.slip_prob = 1 - self.true_action_prob
self.slip_action_prob = self.slip_prob / len(self.list_of_actions)
def compute(self, V_t, backup_states=None):
"""
:param V_t: Value Vector at t
:return:
"""
backup_states = backup_states or self.backup_states
V_tplus1 = {s: 0 for s in backup_states}
max_vals = {s: float("-inf") for s in backup_states}
max_error = 0
for s in backup_states:
for a in self.tran_dict[s]:
expected_ns_val = 0
for ns in self.tran_dict[s][a]:
try:
expected_ns_val += self.tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += self.tran_dict[s][a][ns] * self.unknown_value
expect_s_val = self.reward_dict[s][a] + self.beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += self.slip_action_prob * expect_s_val
V_tplus1[s] += (self.true_action_prob - self.slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
return V_tplus1, max_error
def distributed_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
# Split the state space evenly to be distributed to VI workers
state_chunks = [a.tolist() for a in np.array_split(np.array(S), workers_num)]
V_t = {s: 0 for s in S} if seed_value is None else seed_value
# Make VI workers
workers_list = [VI_worker.remote(list_of_actions=A,
tran_dict=tran_dict,
reward_dict=reward_dict,
beta=beta,
backup_states=state_chunk,
unknown_value=unknown_value,
true_action_prob=true_action_prob)
for state_chunk in state_chunks]
# Do VI computation
error = float('inf')
while error > epsilon:
object_list = [workers_list[i].compute.remote(V_t) for i in range(workers_num)]
error_list = []
for i in range(workers_num):
finish_id = ray.wait(object_list, num_returns=1, timeout=None)[0][0]
object_list.remove(finish_id)
V_tplus1, error = ray.get(finish_id)
V_t.update(V_tplus1)
error_list.append(error)
if (verbose):
print("Error:", error)
error = max(error_list)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def simple_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
slip_prob = 1 - true_action_prob
slip_action_prob = slip_prob / len(A)
V_t = {s: 0 for s in S} if seed_value is None else seed_value
error = float("inf")
while error > epsilon:
V_tplus1 = {s: 0 for s in S}
max_vals = {s: float("-inf") for s in S}
max_error = 0
for s in S:
for a in tran_dict[s]:
expected_ns_val = 0
for ns in tran_dict[s][a]:
try:
expected_ns_val += tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += tran_dict[s][a][ns] * unknown_value
expect_s_val = reward_dict[s][a] + beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += slip_action_prob * expect_s_val
V_tplus1[s] += (true_action_prob - slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
V_t.update(V_tplus1)
error = max_error
if (verbose):
print("Error:", error)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def get_pi_from_value(V, list_of_actions, tran_dict, reward_dict, beta):
v_max = {s: float('-inf') for s in V}
pi = {}
for s in V:
for a in tran_dict[s]:
expected_val = 0
for ns in tran_dict[s][a]:
try:
expected_val += tran_dict[s][a][ns] * V[ns]
except:
expected_val += tran_dict[s][a][ns] * 0
expect_s_val = reward_dict[s][a] + beta * expected_val
if expect_s_val > v_max[s]:
v_max[s] = expect_s_val
pi[s] = a
return pi
| 34.871795 | 117 | 0.564154 | import numpy as np
import ray
ray.shutdown()
ray.init()
@ray.remote
class VI_worker(object):
def __init__(self, list_of_actions, tran_dict, reward_dict, beta, backup_states, true_action_prob=0.8,
unknown_value=0):
self.backup_states = backup_states
self.list_of_actions = list_of_actions
self.tran_dict = tran_dict
self.reward_dict = reward_dict
self.beta = beta
self.unknown_value = unknown_value
self.true_action_prob = true_action_prob
self.slip_prob = 1 - self.true_action_prob
self.slip_action_prob = self.slip_prob / len(self.list_of_actions)
def compute(self, V_t, backup_states=None):
backup_states = backup_states or self.backup_states
V_tplus1 = {s: 0 for s in backup_states}
max_vals = {s: float("-inf") for s in backup_states}
max_error = 0
for s in backup_states:
for a in self.tran_dict[s]:
expected_ns_val = 0
for ns in self.tran_dict[s][a]:
try:
expected_ns_val += self.tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += self.tran_dict[s][a][ns] * self.unknown_value
expect_s_val = self.reward_dict[s][a] + self.beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += self.slip_action_prob * expect_s_val
V_tplus1[s] += (self.true_action_prob - self.slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
return V_tplus1, max_error
def distributed_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
state_chunks = [a.tolist() for a in np.array_split(np.array(S), workers_num)]
V_t = {s: 0 for s in S} if seed_value is None else seed_value
workers_list = [VI_worker.remote(list_of_actions=A,
tran_dict=tran_dict,
reward_dict=reward_dict,
beta=beta,
backup_states=state_chunk,
unknown_value=unknown_value,
true_action_prob=true_action_prob)
for state_chunk in state_chunks]
error = float('inf')
while error > epsilon:
object_list = [workers_list[i].compute.remote(V_t) for i in range(workers_num)]
error_list = []
for i in range(workers_num):
finish_id = ray.wait(object_list, num_returns=1, timeout=None)[0][0]
object_list.remove(finish_id)
V_tplus1, error = ray.get(finish_id)
V_t.update(V_tplus1)
error_list.append(error)
if (verbose):
print("Error:", error)
error = max(error_list)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def simple_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
slip_prob = 1 - true_action_prob
slip_action_prob = slip_prob / len(A)
V_t = {s: 0 for s in S} if seed_value is None else seed_value
error = float("inf")
while error > epsilon:
V_tplus1 = {s: 0 for s in S}
max_vals = {s: float("-inf") for s in S}
max_error = 0
for s in S:
for a in tran_dict[s]:
expected_ns_val = 0
for ns in tran_dict[s][a]:
try:
expected_ns_val += tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += tran_dict[s][a][ns] * unknown_value
expect_s_val = reward_dict[s][a] + beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += slip_action_prob * expect_s_val
V_tplus1[s] += (true_action_prob - slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
V_t.update(V_tplus1)
error = max_error
if (verbose):
print("Error:", error)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def get_pi_from_value(V, list_of_actions, tran_dict, reward_dict, beta):
v_max = {s: float('-inf') for s in V}
pi = {}
for s in V:
for a in tran_dict[s]:
expected_val = 0
for ns in tran_dict[s][a]:
try:
expected_val += tran_dict[s][a][ns] * V[ns]
except:
expected_val += tran_dict[s][a][ns] * 0
expect_s_val = reward_dict[s][a] + beta * expected_val
if expect_s_val > v_max[s]:
v_max[s] = expect_s_val
pi[s] = a
return pi
| true | true |
f72a9cb5225fd598744b0a2b231293e1f98ddf01 | 78 | py | Python | auto/utils/__init__.py | trisongz/autobot | d1c8eb419ec702a7b38877b4c299807d23692c3d | [
"MIT"
] | null | null | null | auto/utils/__init__.py | trisongz/autobot | d1c8eb419ec702a7b38877b4c299807d23692c3d | [
"MIT"
] | null | null | null | auto/utils/__init__.py | trisongz/autobot | d1c8eb419ec702a7b38877b4c299807d23692c3d | [
"MIT"
] | null | null | null | from .average_meter import AverageMeter
from .progress import TrainingProgress | 39 | 39 | 0.884615 | from .average_meter import AverageMeter
from .progress import TrainingProgress | true | true |
f72a9cc5d314217c9ae136b4edba4248c11cdb62 | 658 | py | Python | scripts/issue_terminal.py | gmatteo/awesome-panel | 7eb6965f4b3a7eca08c07561e631e5beb189ffd3 | [
"Apache-2.0"
] | 179 | 2019-12-04T14:54:53.000Z | 2022-03-30T09:08:38.000Z | scripts/issue_terminal.py | hbueno/awesome-panel | fb27bcaf265cef1278cfa0c78799fbbf6c9a6834 | [
"Apache-2.0"
] | 62 | 2019-12-14T16:51:28.000Z | 2022-03-19T18:47:12.000Z | scripts/issue_terminal.py | hbueno/awesome-panel | fb27bcaf265cef1278cfa0c78799fbbf6c9a6834 | [
"Apache-2.0"
] | 35 | 2019-12-08T13:19:53.000Z | 2022-03-25T10:33:02.000Z | import panel as pn
SCRIPT = """
<script src="https://www.unpkg.com/terminal@0.1.4/lib/terminal.js" type="text/javascript"></script>
"""
script_panel = pn.pane.HTML(SCRIPT, width=0, height=0, margin=0, sizing_mode="fixed")
HTML = """
<div id="terminal-1"></div>
<script>
var t1 = new Terminal()
t1.setHeight("100%")
t1.setWidth('100%')
el = document.getElementById("terminal-1")
el.appendChild(t1.html)
t1.print('Hello, world!')
t1.input('Whats your name?', function (input) {
t1.print('Welcome, ' + input)
})
</script>
"""
terminal = pn.pane.HTML(HTML, height=200, width=200)
pn.Column(terminal).servable()
| 22.689655 | 100 | 0.635258 | import panel as pn
SCRIPT = """
<script src="https://www.unpkg.com/terminal@0.1.4/lib/terminal.js" type="text/javascript"></script>
"""
script_panel = pn.pane.HTML(SCRIPT, width=0, height=0, margin=0, sizing_mode="fixed")
HTML = """
<div id="terminal-1"></div>
<script>
var t1 = new Terminal()
t1.setHeight("100%")
t1.setWidth('100%')
el = document.getElementById("terminal-1")
el.appendChild(t1.html)
t1.print('Hello, world!')
t1.input('Whats your name?', function (input) {
t1.print('Welcome, ' + input)
})
</script>
"""
terminal = pn.pane.HTML(HTML, height=200, width=200)
pn.Column(terminal).servable()
| true | true |
f72a9e641315711aa7910aa3b6ee493d2ef27967 | 822 | py | Python | server/article_topic/urls.py | cuongw/article-topic | 2022908590ada829c286d3f76a8450b4eb33f709 | [
"MIT"
] | 1 | 2020-10-21T18:16:27.000Z | 2020-10-21T18:16:27.000Z | server/article_topic/urls.py | 103cuong/article-topic | 2022908590ada829c286d3f76a8450b4eb33f709 | [
"MIT"
] | 2 | 2020-01-05T08:00:24.000Z | 2020-01-05T08:00:25.000Z | server/article_topic/urls.py | cuongw/article-topic | 2022908590ada829c286d3f76a8450b4eb33f709 | [
"MIT"
] | 1 | 2020-08-18T09:09:42.000Z | 2020-08-18T09:09:42.000Z | """article_topic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from topic_detection import views
urlpatterns = [
path('admin/', admin.site.urls),
path('article/', views.index)
]
| 35.73913 | 77 | 0.715328 | from django.contrib import admin
from django.urls import path
from topic_detection import views
urlpatterns = [
path('admin/', admin.site.urls),
path('article/', views.index)
]
| true | true |
f72a9ee3f1f72fad4a331be0f301eba5e57a8290 | 17,450 | py | Python | apps/render_mandelbulb_slim.py | yyuting/learning_from_program_trace | e0e4ac9bc2d4069eef64bdc2de64a87a735fa508 | [
"MIT"
] | null | null | null | apps/render_mandelbulb_slim.py | yyuting/learning_from_program_trace | e0e4ac9bc2d4069eef64bdc2de64a87a735fa508 | [
"MIT"
] | null | null | null | apps/render_mandelbulb_slim.py | yyuting/learning_from_program_trace | e0e4ac9bc2d4069eef64bdc2de64a87a735fa508 | [
"MIT"
] | null | null | null | from render_util import *
from render_single import *
import numpy
import skimage
import skimage.io
def mb(p, time):
z = [p[0], p[1], p[2]]
dr = 1.0
t0 = 1.0
cond = True
power = 20.0
for i in range(4):
r = sqrt(z[0] ** 2.0 + z[1] ** 2.0 + z[2] ** 2.0)
#cond *= r <= 2.0
#cond = select(r <= 2.0, cond, False)
cond = r <= 2.0
theta = atan(z[1] / z[0]) * power
phi = (asin(z[2] / r) + time * 0.1) * power
#dr = select(cond, (r ** (power - 1.0)) * dr * power + 1.0, dr)
#r = select(cond, r ** power, r)
this_power = select(cond, power, 1.0)
new_dr = (r ** (this_power - 1.0)) * dr * power + 1.0
dr = select(cond, new_dr, dr)
r = select(cond, r ** this_power, r)
cos_phi = cos(phi)
z[0] = select(cond, r * cos(theta) * cos_phi + p[0], z[0])
z[1] = select(cond, r * sin(theta) * cos_phi + p[1], z[1])
z[2] = select(cond, r * sin(phi) + p[2], z[2])
t0 = select(cond, min_nosmooth(t0, r), t0)
return [0.5 * log(r) * r / dr, t0]
def f(p, time):
new_p = rotation_y(p, time * 0.2)
return mb(new_p, time)
def intersect(ro, rd, time, orig_t):
t = orig_t
res_t = ConstExpr(0.0)
res_c1 = ConstExpr(0.0)
max_error = ConstExpr(1000.0)
d = ConstExpr(1.0)
pd = ConstExpr(100.0)
os = ConstExpr(0.0)
step = ConstExpr(0.0)
error = ConstExpr(1000.0)
cond1 = True
c = [ConstExpr(0.0), ConstExpr(0.0)]
for i in loop_generator(48, is_raymarching=True):
compiler.DEFAULT_FOR_LOOP_ITER = i
#cond1 *= (error >= 0.0) * (t <= 20.0)
cond1 = (error >= 0.0) * (t <= 20.0)
c = f(ro + rd * t, time)
d = select(cond1, c[0], d)
cond2 = d > os
os = select(cond2, 0.4 * d * d / pd, 0.0)
step = select(cond2, d + os, -os)
pd = select(cond2, d, 100.0)
d = select(cond2, d, 1.0)
error = select(cond1, d / t, error)
cond3 = cond1 * (error < max_error)
max_error = select(cond3, error, max_error)
res_t = select(cond3, t, res_t)
res_c1 = select(cond3, c[1], res_c1)
t = select(cond1, t + step, t)
#compiler.DEFAULT_FOR_LOOP_NAME = None
#compiler.DEFAULT_FOR_LOOP_ITER = None
ro_len = sqrt(ro[0] ** 2 + ro[1] ** 2 + ro[2] ** 2)
res_t = select(t > ro_len, -1.0, res_t)
#res_t = select(t > 2.0, -1.0, res_t)
#res_t = Var('res_t', select(t <= 1.0, -10.0, res_t))
return [res_t, res_c1]
def mandelbulb_slim(ray_dir_p, ray_origin, time):
sundir = numpy.array([0.1, 0.8, 0.6])
sundir /= numpy.linalg.norm(sundir)
sun = numpy.array([1.64, 1.27, 0.99])
skycolor = numpy.array([0.6, 1.5, 1.0])
ray_origin = numpy.array(ray_origin)
ray_dir_p = numpy.array(ray_dir_p)
orig_t = (ray_origin[0] ** 2.0 + ray_origin[1] ** 2.0 + ray_origin[2] ** 2.0) ** 0.5 / 3.0
res = intersect(ray_origin, ray_dir_p, time, orig_t)
t_ray = Var(log_prefix + 't_ray', res[0])
t_ray.log_intermediates_rank = 2
cond = t_ray > 0.0
p = ray_origin + res[0] * ray_dir_p
n = normal_functor(lambda x: f(x, time)[0], 0.001, 3)(p)
# change log_intermediates_rank for input arguments
old_log_intermediates_rank = compiler.log_intermediates_rank
compiler.log_intermediates_rank = 1
for list in [ray_dir_p, ray_origin, [time], [res[0]], n]:
for item in list:
item.log_intermediates_rank = compiler.log_intermediates_rank
dif = max_nosmooth(0.0, n[0] * sundir[0] + n[1] * sundir[1] + n[2] * sundir[2])
sky = 0.6 + 0.4 * max_nosmooth(0.0, n[1])
bac = max_nosmooth(0.0, 0.3 + 0.7 * (-n[0] * sundir[0] - n[1] - n[2] * sundir[2]))
lin_coef_a = 4.5 * dif + 0.8 * bac
lin_coef_b = 0.6 * sky
lin0 = sun[0] * lin_coef_a + skycolor[0] * lin_coef_b
lin1 = sun[1] * lin_coef_a + skycolor[1] * lin_coef_b
lin2 = sun[2] * lin_coef_a + skycolor[2] * lin_coef_b
tc0_coef = 3.0 + 4.2 * (res[1] ** 0.55)
col0 = lin0 * 0.9 * 0.2 * (0.5 + 0.5 * sin(tc0_coef))
col1 = lin1 * 0.8 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 0.5))
col2 = lin2 * 0.6 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 1.0))
col0 = select(cond, col0 ** 0.45, 0.0)
col1 = select(cond, col1 ** 0.45, 0.0)
col2 = select(cond, col2 ** 0.45, 0.0)
col = numpy.array([col0, col1, col2])
col = col * 0.6 + 0.4 * col * col * (3.0 - 2.0 * col)
col = col * 1.5 - 0.5 * 0.33 * (col[0] + col[1] + col[2])
#col = select(res[0] <= -2.0, numpy.array([1.0, 1.0, 1.0]), col)
compiler.log_intermediates_rank = old_log_intermediates_rank
for expr in col.tolist() + n.tolist() + [t_ray]:
expr.log_intermediates_subset_rank = 1
return output_color(col)
shaders = [mandelbulb_slim]
is_color = True
# use a different rotation parameterization so can easily compute direction to world coord origin
fov = 'small_seperable'
x_center = 0.0
y_center = 0.0
z_center = 0.0
offset = np.array([0.4, 0.4, 0.4])
def pos_solver(x0, x1, x2):
"""
given x (length 3) as camera position,
solve a camera direction that satisfies:
the center of the image points to the point (0.0, 0.4, 0.0) plus some noise,
the actual center is (0.0, 0.4, 0.0) + (random(3) * 2.0 - 1.0) * (0.2, 0.2, 0.07)
the horizonal axis in image is perpendicular to the upward (y axis) in world,
the vertical axis upward in image is in the same direction of the upward y axis in world.
"""
random_offset = (np.random.rand(3) * 2.0 - 1.0) * offset
a = x_center - x0 + random_offset[0]
b = y_center - x1 + random_offset[1]
c = z_center - x2 + random_offset[2]
norm = (a ** 2 + b ** 2 + c ** 2) ** 0.5
d = a / norm
e = b / norm
f = c / norm
ang1 = np.random.rand() * 2 * np.pi
de_norm = (d ** 2 + e ** 2) ** 0.5
if de_norm > 0:
# assume cos2 > 0
ang3 = math.atan2(e / de_norm, d / de_norm)
cos3 = np.cos(ang3)
if cos3 != 0:
ang2 = math.atan2(-f, d / cos3)
else:
sin3 = np.sin(ang3)
ang2 = math.atan2(-f, e / sin3)
else:
if f > 0:
ang2 = - np.pi / 2
else:
ang2 = np.pi / 2
ang3 = np.random.rand() * 2 * np.pi
return ang1, ang2, ang3
def main():
if len(sys.argv) < 3:
print('Usage: python render_[shader].py base_mode base_dir')
raise
base_mode = sys.argv[1]
base_dir = sys.argv[2]
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
if base_mode == 'collect_raw':
camera_pos = numpy.load(os.path.join(camera_dir, 'train.npy'))
render_t = numpy.load(os.path.join(camera_dir, 'train_time.npy'))
nframes = render_t.shape[0]
train_start = numpy.load(os.path.join(camera_dir, 'train_start.npy'))
render_single(os.path.join(preprocess_dir, 'train'), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = (80, 80), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': 'train_small', 'tile_only': True, 'tile_start': train_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
elif base_mode == 'generate_dataset':
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
if mode in ['train', 'validate']:
target_dir = os.path.join(camera_dir, mode + '_img')
else:
target_dir = os.path.join(camera_dir, 'test_img')
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'sample_camera_pos':
test_render_t = None
t_range = 31.5
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
x_min = -4
x_max = 4
y_min = -4
y_max = 4
z_min = -4
z_max = 4
if mode == 'train':
nframes = 800
x_max = 3.5
y_max = 3.5
elif mode == 'validate':
nframes = 80
x_max = 3.5
y_max = 3.5
elif mode == 'test_close':
nframes = 5
x_min = 3.5
elif mode == 'test_far':
nframes = 5
y_min = 3.5
elif mode == 'test_middle':
nframes = 20
x_max = 3.5
y_max = 3.5
camera_pos = numpy.empty([nframes, 6])
for i in range(nframes):
while True:
x = numpy.random.rand() * (x_max - x_min) + x_min
y = numpy.random.rand() * (y_max - y_min) + y_min
z = numpy.random.rand() * (z_max - z_min) + z_min
if (x ** 2 + y ** 2 + z ** 2) > 1.8 ** 2:
break
ang1, ang2, ang3 = pos_solver(x, y, z)
camera_pos[i] = np.array([x, y, z, ang1, ang2, ang3])
numpy.save(os.path.join(preprocess_dir, '%s.npy' % mode), camera_pos)
if mode in ['train', 'validate']:
expand_boundary = 160
render_t = np.random.rand(nframes) * t_range
numpy.save(os.path.join(preprocess_dir, mode + '_time.npy'), render_t)
else:
expand_boundary = 0
if test_render_t is None:
test_render_t = np.random.rand(30) * t_range
np.save(os.path.join(preprocess_dir, 'test_time.npy'), render_t)
if mode == 'test_close':
render_t = test_render_t[:5]
elif mode == 'test_far':
render_t = test_render_t[5:10]
else:
render_t = test_render_t[10:]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = (640, 960), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'expand_boundary': expand_boundary})
elif base_mode == 'generate_temporal_dataset':
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_temporal_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_temporal')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
for mode in ['train', 'test', 'validate']:
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))
render_size = (320, 320)
tile_only = True
render_t_base = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
t_schedule = np.arange(8)
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_base = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
camera_pos = np.concatenate((np.load(os.path.join(camera_dir, 'test_close.npy')),
np.load(os.path.join(camera_dir, 'test_far.npy')),
np.load(os.path.join(camera_dir, 'test_middle.npy'))), axis=0)
t_schedule = [0, 1, 29]
nframes = camera_pos.shape[0]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
for t_val in t_schedule:
render_t = render_t_base + t_val / 30
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground_%d' % (mode, t_val), 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
target_dir = os.path.join(camera_dir, '%s_img' % mode)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'generate_blur_additional':
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_blur')
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'log_t_ray': True, 'log_intermediates_level': 2})
return
if __name__ == '__main__':
main()
| 40.962441 | 541 | 0.544413 | from render_util import *
from render_single import *
import numpy
import skimage
import skimage.io
def mb(p, time):
z = [p[0], p[1], p[2]]
dr = 1.0
t0 = 1.0
cond = True
power = 20.0
for i in range(4):
r = sqrt(z[0] ** 2.0 + z[1] ** 2.0 + z[2] ** 2.0)
cond = r <= 2.0
theta = atan(z[1] / z[0]) * power
phi = (asin(z[2] / r) + time * 0.1) * power
this_power = select(cond, power, 1.0)
new_dr = (r ** (this_power - 1.0)) * dr * power + 1.0
dr = select(cond, new_dr, dr)
r = select(cond, r ** this_power, r)
cos_phi = cos(phi)
z[0] = select(cond, r * cos(theta) * cos_phi + p[0], z[0])
z[1] = select(cond, r * sin(theta) * cos_phi + p[1], z[1])
z[2] = select(cond, r * sin(phi) + p[2], z[2])
t0 = select(cond, min_nosmooth(t0, r), t0)
return [0.5 * log(r) * r / dr, t0]
def f(p, time):
new_p = rotation_y(p, time * 0.2)
return mb(new_p, time)
def intersect(ro, rd, time, orig_t):
t = orig_t
res_t = ConstExpr(0.0)
res_c1 = ConstExpr(0.0)
max_error = ConstExpr(1000.0)
d = ConstExpr(1.0)
pd = ConstExpr(100.0)
os = ConstExpr(0.0)
step = ConstExpr(0.0)
error = ConstExpr(1000.0)
cond1 = True
c = [ConstExpr(0.0), ConstExpr(0.0)]
for i in loop_generator(48, is_raymarching=True):
compiler.DEFAULT_FOR_LOOP_ITER = i
cond1 = (error >= 0.0) * (t <= 20.0)
c = f(ro + rd * t, time)
d = select(cond1, c[0], d)
cond2 = d > os
os = select(cond2, 0.4 * d * d / pd, 0.0)
step = select(cond2, d + os, -os)
pd = select(cond2, d, 100.0)
d = select(cond2, d, 1.0)
error = select(cond1, d / t, error)
cond3 = cond1 * (error < max_error)
max_error = select(cond3, error, max_error)
res_t = select(cond3, t, res_t)
res_c1 = select(cond3, c[1], res_c1)
t = select(cond1, t + step, t)
ro_len = sqrt(ro[0] ** 2 + ro[1] ** 2 + ro[2] ** 2)
res_t = select(t > ro_len, -1.0, res_t)
return [res_t, res_c1]
def mandelbulb_slim(ray_dir_p, ray_origin, time):
sundir = numpy.array([0.1, 0.8, 0.6])
sundir /= numpy.linalg.norm(sundir)
sun = numpy.array([1.64, 1.27, 0.99])
skycolor = numpy.array([0.6, 1.5, 1.0])
ray_origin = numpy.array(ray_origin)
ray_dir_p = numpy.array(ray_dir_p)
orig_t = (ray_origin[0] ** 2.0 + ray_origin[1] ** 2.0 + ray_origin[2] ** 2.0) ** 0.5 / 3.0
res = intersect(ray_origin, ray_dir_p, time, orig_t)
t_ray = Var(log_prefix + 't_ray', res[0])
t_ray.log_intermediates_rank = 2
cond = t_ray > 0.0
p = ray_origin + res[0] * ray_dir_p
n = normal_functor(lambda x: f(x, time)[0], 0.001, 3)(p)
old_log_intermediates_rank = compiler.log_intermediates_rank
compiler.log_intermediates_rank = 1
for list in [ray_dir_p, ray_origin, [time], [res[0]], n]:
for item in list:
item.log_intermediates_rank = compiler.log_intermediates_rank
dif = max_nosmooth(0.0, n[0] * sundir[0] + n[1] * sundir[1] + n[2] * sundir[2])
sky = 0.6 + 0.4 * max_nosmooth(0.0, n[1])
bac = max_nosmooth(0.0, 0.3 + 0.7 * (-n[0] * sundir[0] - n[1] - n[2] * sundir[2]))
lin_coef_a = 4.5 * dif + 0.8 * bac
lin_coef_b = 0.6 * sky
lin0 = sun[0] * lin_coef_a + skycolor[0] * lin_coef_b
lin1 = sun[1] * lin_coef_a + skycolor[1] * lin_coef_b
lin2 = sun[2] * lin_coef_a + skycolor[2] * lin_coef_b
tc0_coef = 3.0 + 4.2 * (res[1] ** 0.55)
col0 = lin0 * 0.9 * 0.2 * (0.5 + 0.5 * sin(tc0_coef))
col1 = lin1 * 0.8 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 0.5))
col2 = lin2 * 0.6 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 1.0))
col0 = select(cond, col0 ** 0.45, 0.0)
col1 = select(cond, col1 ** 0.45, 0.0)
col2 = select(cond, col2 ** 0.45, 0.0)
col = numpy.array([col0, col1, col2])
col = col * 0.6 + 0.4 * col * col * (3.0 - 2.0 * col)
col = col * 1.5 - 0.5 * 0.33 * (col[0] + col[1] + col[2])
compiler.log_intermediates_rank = old_log_intermediates_rank
for expr in col.tolist() + n.tolist() + [t_ray]:
expr.log_intermediates_subset_rank = 1
return output_color(col)
shaders = [mandelbulb_slim]
is_color = True
fov = 'small_seperable'
x_center = 0.0
y_center = 0.0
z_center = 0.0
offset = np.array([0.4, 0.4, 0.4])
def pos_solver(x0, x1, x2):
random_offset = (np.random.rand(3) * 2.0 - 1.0) * offset
a = x_center - x0 + random_offset[0]
b = y_center - x1 + random_offset[1]
c = z_center - x2 + random_offset[2]
norm = (a ** 2 + b ** 2 + c ** 2) ** 0.5
d = a / norm
e = b / norm
f = c / norm
ang1 = np.random.rand() * 2 * np.pi
de_norm = (d ** 2 + e ** 2) ** 0.5
if de_norm > 0:
ang3 = math.atan2(e / de_norm, d / de_norm)
cos3 = np.cos(ang3)
if cos3 != 0:
ang2 = math.atan2(-f, d / cos3)
else:
sin3 = np.sin(ang3)
ang2 = math.atan2(-f, e / sin3)
else:
if f > 0:
ang2 = - np.pi / 2
else:
ang2 = np.pi / 2
ang3 = np.random.rand() * 2 * np.pi
return ang1, ang2, ang3
def main():
if len(sys.argv) < 3:
print('Usage: python render_[shader].py base_mode base_dir')
raise
base_mode = sys.argv[1]
base_dir = sys.argv[2]
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
if base_mode == 'collect_raw':
camera_pos = numpy.load(os.path.join(camera_dir, 'train.npy'))
render_t = numpy.load(os.path.join(camera_dir, 'train_time.npy'))
nframes = render_t.shape[0]
train_start = numpy.load(os.path.join(camera_dir, 'train_start.npy'))
render_single(os.path.join(preprocess_dir, 'train'), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = (80, 80), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': 'train_small', 'tile_only': True, 'tile_start': train_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
elif base_mode == 'generate_dataset':
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
if mode in ['train', 'validate']:
target_dir = os.path.join(camera_dir, mode + '_img')
else:
target_dir = os.path.join(camera_dir, 'test_img')
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'sample_camera_pos':
test_render_t = None
t_range = 31.5
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
x_min = -4
x_max = 4
y_min = -4
y_max = 4
z_min = -4
z_max = 4
if mode == 'train':
nframes = 800
x_max = 3.5
y_max = 3.5
elif mode == 'validate':
nframes = 80
x_max = 3.5
y_max = 3.5
elif mode == 'test_close':
nframes = 5
x_min = 3.5
elif mode == 'test_far':
nframes = 5
y_min = 3.5
elif mode == 'test_middle':
nframes = 20
x_max = 3.5
y_max = 3.5
camera_pos = numpy.empty([nframes, 6])
for i in range(nframes):
while True:
x = numpy.random.rand() * (x_max - x_min) + x_min
y = numpy.random.rand() * (y_max - y_min) + y_min
z = numpy.random.rand() * (z_max - z_min) + z_min
if (x ** 2 + y ** 2 + z ** 2) > 1.8 ** 2:
break
ang1, ang2, ang3 = pos_solver(x, y, z)
camera_pos[i] = np.array([x, y, z, ang1, ang2, ang3])
numpy.save(os.path.join(preprocess_dir, '%s.npy' % mode), camera_pos)
if mode in ['train', 'validate']:
expand_boundary = 160
render_t = np.random.rand(nframes) * t_range
numpy.save(os.path.join(preprocess_dir, mode + '_time.npy'), render_t)
else:
expand_boundary = 0
if test_render_t is None:
test_render_t = np.random.rand(30) * t_range
np.save(os.path.join(preprocess_dir, 'test_time.npy'), render_t)
if mode == 'test_close':
render_t = test_render_t[:5]
elif mode == 'test_far':
render_t = test_render_t[5:10]
else:
render_t = test_render_t[10:]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = (640, 960), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'expand_boundary': expand_boundary})
elif base_mode == 'generate_temporal_dataset':
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_temporal_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_temporal')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
for mode in ['train', 'test', 'validate']:
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))
render_size = (320, 320)
tile_only = True
render_t_base = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
t_schedule = np.arange(8)
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_base = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
camera_pos = np.concatenate((np.load(os.path.join(camera_dir, 'test_close.npy')),
np.load(os.path.join(camera_dir, 'test_far.npy')),
np.load(os.path.join(camera_dir, 'test_middle.npy'))), axis=0)
t_schedule = [0, 1, 29]
nframes = camera_pos.shape[0]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
for t_val in t_schedule:
render_t = render_t_base + t_val / 30
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground_%d' % (mode, t_val), 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
target_dir = os.path.join(camera_dir, '%s_img' % mode)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'generate_blur_additional':
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_blur')
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'log_t_ray': True, 'log_intermediates_level': 2})
return
if __name__ == '__main__':
main()
| true | true |
f72a9f3994030f9517b36005ab3842f621032778 | 4,599 | py | Python | aliddns.py | k4nzdroid/ddns-client | d0177c17da145827a8b08800adc21f7f3e196b43 | [
"Apache-2.0"
] | null | null | null | aliddns.py | k4nzdroid/ddns-client | d0177c17da145827a8b08800adc21f7f3e196b43 | [
"Apache-2.0"
] | null | null | null | aliddns.py | k4nzdroid/ddns-client | d0177c17da145827a8b08800adc21f7f3e196b43 | [
"Apache-2.0"
] | null | null | null | from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkalidns.request.v20150109.DescribeSubDomainRecordsRequest import DescribeSubDomainRecordsRequest
from aliyunsdkalidns.request.v20150109.DescribeDomainRecordsRequest import DescribeDomainRecordsRequest
import requests
from urllib.request import urlopen
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--access-key-id')
parser.add_argument('--access-key-secret')
parser.add_argument('--domain-name')
parser.add_argument('--host')
args = parser.parse_args()
print(args)
accessKeyId = args.access_key_id
accessSecret = args.access_key_secret
domain = args.domain_name
ipv4_flag = 1
name_ipv4 = args.host
ipv6_flag = 0 # 是否开启ipv6 ddns解析,1为开启,0为关闭
name_ipv6 = "ipv6.test" # 要进行ipv6 ddns解析的子域名
client = AcsClient(accessKeyId, accessSecret, 'cn-hangzhou')
def update(RecordId, RR, Type, Value): # 修改域名解析记录
from aliyunsdkalidns.request.v20150109.UpdateDomainRecordRequest import UpdateDomainRecordRequest
request = UpdateDomainRecordRequest()
request.set_accept_format('json')
request.set_RecordId(RecordId)
request.set_RR(RR)
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
def add(DomainName, RR, Type, Value): # 添加新的域名解析记录
from aliyunsdkalidns.request.v20150109.AddDomainRecordRequest import AddDomainRecordRequest
request = AddDomainRecordRequest()
request.set_accept_format('json')
request.set_DomainName(DomainName)
request.set_RR(RR) # https://blog.zeruns.tech
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
if ipv4_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv4 + '.' + domain)
response = client.do_action_with_exception(request) # 获取域名解析记录列表
domain_list = json.loads(response) # 将返回的JSON数据转化为Python能识别的
ip = urlopen('https://api-ipv4.ip.sb/ip').read() # 使用 IP.SB 的接口获取ipv4地址
ipv4 = str(ip, encoding='utf-8')
print("当前 IPv4 地址:%s" % ipv4)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv4, "A", ipv4)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv4.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv4, "A", ipv4)
print("修改域名解析成功")
else: # https://blog.zeruns.tech
print("IPv4地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain) # https://blog.zeruns.tech
request.set_RR(name_ipv4)
response = client.do_action_with_exception(request)
add(domain, name_ipv4, "A", ipv4)
print("修改域名解析成功")
if ipv6_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv6 + '.' + domain)
response = client.do_action_with_exception(request) # 获取域名解析记录列表
domain_list = json.loads(response) # 将返回的JSON数据转化为Python能识别的
ip = urlopen('https://api-ipv6.ip.sb/ip').read() # 使用IP.SB的接口获取ipv6地址
ipv6 = str(ip, encoding='utf-8')
print("获取到IPv6地址:%s" % ipv6)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv6, "AAAA", ipv6)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv6.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
else: # https://blog.zeruns.tech
print("IPv6地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_RR(name_ipv6) # https://blog.zeruns.tech
response = client.do_action_with_exception(request)
add(domain, name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
| 38.974576 | 113 | 0.719069 | from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkalidns.request.v20150109.DescribeSubDomainRecordsRequest import DescribeSubDomainRecordsRequest
from aliyunsdkalidns.request.v20150109.DescribeDomainRecordsRequest import DescribeDomainRecordsRequest
import requests
from urllib.request import urlopen
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--access-key-id')
parser.add_argument('--access-key-secret')
parser.add_argument('--domain-name')
parser.add_argument('--host')
args = parser.parse_args()
print(args)
accessKeyId = args.access_key_id
accessSecret = args.access_key_secret
domain = args.domain_name
ipv4_flag = 1
name_ipv4 = args.host
ipv6_flag = 0
name_ipv6 = "ipv6.test"
client = AcsClient(accessKeyId, accessSecret, 'cn-hangzhou')
def update(RecordId, RR, Type, Value):
from aliyunsdkalidns.request.v20150109.UpdateDomainRecordRequest import UpdateDomainRecordRequest
request = UpdateDomainRecordRequest()
request.set_accept_format('json')
request.set_RecordId(RecordId)
request.set_RR(RR)
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
def add(DomainName, RR, Type, Value):
from aliyunsdkalidns.request.v20150109.AddDomainRecordRequest import AddDomainRecordRequest
request = AddDomainRecordRequest()
request.set_accept_format('json')
request.set_DomainName(DomainName)
request.set_RR(RR)
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
if ipv4_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv4 + '.' + domain)
response = client.do_action_with_exception(request)
domain_list = json.loads(response)
ip = urlopen('https://api-ipv4.ip.sb/ip').read()
ipv4 = str(ip, encoding='utf-8')
print("当前 IPv4 地址:%s" % ipv4)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv4, "A", ipv4)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv4.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv4, "A", ipv4)
print("修改域名解析成功")
else:
print("IPv4地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_RR(name_ipv4)
response = client.do_action_with_exception(request)
add(domain, name_ipv4, "A", ipv4)
print("修改域名解析成功")
if ipv6_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv6 + '.' + domain)
response = client.do_action_with_exception(request)
domain_list = json.loads(response)
ip = urlopen('https://api-ipv6.ip.sb/ip').read()
ipv6 = str(ip, encoding='utf-8')
print("获取到IPv6地址:%s" % ipv6)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv6, "AAAA", ipv6)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv6.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
else:
print("IPv6地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_RR(name_ipv6)
response = client.do_action_with_exception(request)
add(domain, name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
| true | true |
f72a9f4ec04b375aa26c8218249e75c1e2a2db4d | 3,486 | py | Python | bindings/python/ensmallen/datasets/string/halanaerobiumkushneri.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/halanaerobiumkushneri.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/halanaerobiumkushneri.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Halanaerobium kushneri.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HalanaerobiumKushneri(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Halanaerobium kushneri graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Halanaerobium kushneri graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HalanaerobiumKushneri",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.2 | 223 | 0.679002 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def HalanaerobiumKushneri(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="HalanaerobiumKushneri",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f72aa007710dbb5ce8eb5bd9d8566a13f57787d4 | 36,004 | py | Python | tests/i18n/test_extraction.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2017-08-30T06:46:16.000Z | 2017-08-30T06:46:16.000Z | tests/i18n/test_extraction.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/i18n/test_extraction.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2018-07-23T12:13:04.000Z | 2018-07-23T12:13:04.000Z | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import time
import warnings
from unittest import SkipTest, skipUnless
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.testcases import SerializeMixin
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SerializeMixin, SimpleTestCase):
# makemessages scans the current working directory and writes in the
# locale subdirectory. There aren't any options to control this. As a
# consequence tests can't run in parallel. Since i18n tests run in less
# than 4 seconds, serializing them with SerializeMixin is acceptable.
lockfile = __file__
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def _assertPoKeyword(self, keyword, expected_value, haystack, use_quotes=True):
q = '"'
if use_quotes:
expected_value = '"%s"' % expected_value
q = "'"
needle = '%s %s' % (keyword, expected_value)
expected_value = re.escape(expected_value)
return self.assertTrue(re.search('^%s %s' % (keyword, expected_value), haystack, re.MULTILINE),
'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertMsgId(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid', msgid, haystack, use_quotes=use_quotes)
def assertMsgIdPlural(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid_plural', msgid, haystack, use_quotes=use_quotes)
def assertMsgStr(self, msgstr, haystack, use_quotes=True):
return self._assertPoKeyword('msgstr', msgstr, haystack, use_quotes=use_quotes)
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
path = os.path.join(cwd_prefix, *comment_parts)
parts.append(path)
if isinstance(line_number, six.string_types):
line_number = self._get_token_line_number(path, line_number)
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertIn(needle, po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertNotIn(needle, po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def _get_token_line_number(self, path, token):
with open(path) as f:
for line, content in enumerate(f, 1):
if token in force_text(content):
return line
self.fail("The token '%s' could not be found in %s, please check the test config" % (token, path))
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of
the :42 suffix part.
A string token can also be passed as line_number, in which case it
will be searched in the template, and its line number will be used.
A msgid is a suitable candidate.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
"""
Assert that file was recently modified (modification time was less than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
"""
Assert that file was not recently modified (modification time was more than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertIn(
"#. Translators: Django comment block for translators\n#. "
"string's meaning unveiled",
po_contents
)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn(
'#. Translators: One-line translator comment #5 -- with '
'non ASCII characters: áéíóúö',
po_contents
)
self.assertIn(
'#. Translators: Two-line translator comment #5 -- with '
'non ASCII characters: áéíóúö\n#. continued here.',
po_contents
)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("Get my line number", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 'Get my line number', 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
self.assertTrue(MakeMessagesCommand.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
msg = (
'Translation blocks must not include other block tags: blocktrans '
'(file %s, line 3)' % os.path.join('templates', 'template_with_error.tpl')
)
with self.assertRaisesMessage(SyntaxError, msg):
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% trans %} with a filter
for minor_part in 'abcdefgh': # Iterate from #7.1a to #7.1h template markers
self.assertIn('msgctxt "context #7.1{}"'.format(minor_part), po_contents)
self.assertMsgId('Translatable literal #7.1{}'.format(minor_part), po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertIn("Translatable literal #8b-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertIn("Translatable literal #8c-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #1' \(file templates[/\\]comments.thtml, line 4\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #3' \(file templates[/\\]comments.thtml, line 6\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #4' \(file templates[/\\]comments.thtml, line 8\) "
"was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
"""
Test that find_files only discover files having the proper extensions.
"""
cmd = MakeMessagesCommand()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
@mock.patch('django.core.management.commands.makemessages.popen_wrapper')
def test_makemessages_gettext_version(self, mocked_popen_wrapper):
# "Normal" output:
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.18.1\n"
"Copyright (C) 1995-1998, 2000-2010 Free Software Foundation, Inc.\n"
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n"
"Written by Ulrich Drepper.\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 18, 1))
# Version number with only 2 parts (#23788)
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.17\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 17))
# Bad version output
mocked_popen_wrapper.return_value = (
"any other return value\n", '', 0)
cmd = MakeMessagesCommand()
with six.assertRaisesRegex(self, CommandError, "Unable to get gettext version. Is it installed?"):
cmd.gettext_version
def test_po_file_encoding_when_updating(self):
"""Update of PO file doesn't corrupt it with non-UTF-8 encoding on Python3+Windows (#23271)"""
BR_PO_BASE = 'locale/pt_BR/LC_MESSAGES/django'
os.chdir(self.test_dir)
shutil.copyfile(BR_PO_BASE + '.pristine', BR_PO_BASE + '.po')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'locale', 'pt_BR', 'LC_MESSAGES', 'django.po'))
management.call_command('makemessages', locale=['pt_BR'], verbosity=0)
self.assertTrue(os.path.exists(BR_PO_BASE + '.po'))
with io.open(BR_PO_BASE + '.po', 'r', encoding='utf-8') as fp:
po_contents = force_text(fp.read())
self.assertMsgStr("Größe", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# Windows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
def test_trans_and_plural_blocktrans_collision(self):
"""
Ensures a correct workaround for the gettext bug when handling a literal
found inside a {% trans %} tag and also in another file inside a
{% blocktrans %} with a plural (#17375).
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], extensions=['html', 'djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertNotIn("#-#-#-#-# django.pot (PACKAGE VERSION) #-#-#-#-#\\n", po_contents)
self.assertMsgId('First `trans`, then `blocktrans` with a plural', po_contents)
self.assertMsgIdPlural('Plural for a `trans` and `blocktrans` collision case', po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'This literal should also be included wrapped or not wrapped '
'depending on the use of the --no-wrap option.',
po_contents
)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'""\n"This literal should also be included wrapped or not '
'wrapped depending on the "\n"use of the --no-wrap option."',
po_contents,
use_quotes=False
)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 'Translatable literal #6b', 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
super(MultipleLocaleExtractionTests, self).tearDown()
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the .po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
super(CustomLayoutExtractionTests, self).setUp()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=[os.path.join(this_directory, 'project_dir', 'project_locale')],
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
| 45.517067 | 113 | 0.644067 |
from __future__ import unicode_literals
import io
import os
import re
import shutil
import time
import warnings
from unittest import SkipTest, skipUnless
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.testcases import SerializeMixin
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SerializeMixin, SimpleTestCase):
# consequence tests can't run in parallel. Since i18n tests run in less
lockfile = __file__
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def _assertPoKeyword(self, keyword, expected_value, haystack, use_quotes=True):
q = '"'
if use_quotes:
expected_value = '"%s"' % expected_value
q = "'"
needle = '%s %s' % (keyword, expected_value)
expected_value = re.escape(expected_value)
return self.assertTrue(re.search('^%s %s' % (keyword, expected_value), haystack, re.MULTILINE),
'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertMsgId(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid', msgid, haystack, use_quotes=use_quotes)
def assertMsgIdPlural(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid_plural', msgid, haystack, use_quotes=use_quotes)
def assertMsgStr(self, msgstr, haystack, use_quotes=True):
return self._assertPoKeyword('msgstr', msgstr, haystack, use_quotes=use_quotes)
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
path = os.path.join(cwd_prefix, *comment_parts)
parts.append(path)
if isinstance(line_number, six.string_types):
line_number = self._get_token_line_number(path, line_number)
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertIn(needle, po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertNotIn(needle, po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def _get_token_line_number(self, path, token):
with open(path) as f:
for line, content in enumerate(f, 1):
if token in force_text(content):
return line
self.fail("The token '%s' could not be found in %s, please check the test config" % (token, path))
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertIn(
"#. Translators: Django comment block for translators\n#. "
"string's meaning unveiled",
po_contents
)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn(
'#. Translators: One-line translator comment #5 -- with '
'non ASCII characters: áéíóúö',
po_contents
)
self.assertIn(
'#. Translators: Two-line translator comment #5 -- with '
'non ASCII characters: áéíóúö\n#. continued here.',
po_contents
)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("Get my line number", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 'Get my line number', 'templates', 'test.html')
def test_force_en_us_locale(self):
self.assertTrue(MakeMessagesCommand.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
msg = (
'Translation blocks must not include other block tags: blocktrans '
'(file %s, line 3)' % os.path.join('templates', 'template_with_error.tpl')
)
with self.assertRaisesMessage(SyntaxError, msg):
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal
# {% trans %} with a filter
for minor_part in 'abcdefgh': # Iterate from #7.1a to #7.1h template markers
self.assertIn('msgctxt "context #7.1{}"'.format(minor_part), po_contents)
self.assertMsgId('Translatable literal #7.1{}'.format(minor_part), po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn("Translatable literal
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn("Translatable literal
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #1' \(file templates[/\\]comments.thtml, line 4\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #4' \(file templates[/\\]comments.thtml, line 8\) "
"was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
cmd = MakeMessagesCommand()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
@mock.patch('django.core.management.commands.makemessages.popen_wrapper')
def test_makemessages_gettext_version(self, mocked_popen_wrapper):
# "Normal" output:
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.18.1\n"
"Copyright (C) 1995-1998, 2000-2010 Free Software Foundation, Inc.\n"
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n"
"Written by Ulrich Drepper.\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 18, 1))
# Version number with only 2 parts (#23788)
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.17\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 17))
# Bad version output
mocked_popen_wrapper.return_value = (
"any other return value\n", '', 0)
cmd = MakeMessagesCommand()
with six.assertRaisesRegex(self, CommandError, "Unable to get gettext version. Is it installed?"):
cmd.gettext_version
def test_po_file_encoding_when_updating(self):
BR_PO_BASE = 'locale/pt_BR/LC_MESSAGES/django'
os.chdir(self.test_dir)
shutil.copyfile(BR_PO_BASE + '.pristine', BR_PO_BASE + '.po')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'locale', 'pt_BR', 'LC_MESSAGES', 'django.po'))
management.call_command('makemessages', locale=['pt_BR'], verbosity=0)
self.assertTrue(os.path.exists(BR_PO_BASE + '.po'))
with io.open(BR_PO_BASE + '.po', 'r', encoding='utf-8') as fp:
po_contents = force_text(fp.read())
self.assertMsgStr("Größe", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# Windows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
def test_trans_and_plural_blocktrans_collision(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], extensions=['html', 'djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertNotIn("#-#-#-#-# django.pot (PACKAGE VERSION) #-#-#-#-#\\n", po_contents)
self.assertMsgId('First `trans`, then `blocktrans` with a plural', po_contents)
self.assertMsgIdPlural('Plural for a `trans` and `blocktrans` collision case', po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'This literal should also be included wrapped or not wrapped '
'depending on the use of the --no-wrap option.',
po_contents
)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'""\n"This literal should also be included wrapped or not '
'wrapped depending on the "\n"use of the --no-wrap option."',
po_contents,
use_quotes=False
)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 'Translatable literal #6b', 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
super(MultipleLocaleExtractionTests, self).tearDown()
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
super(CustomLayoutExtractionTests, self).setUp()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=[os.path.join(this_directory, 'project_dir', 'project_locale')],
)
def test_project_locale_paths(self):
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
| true | true |
f72aa05957ea518052b423c49d9e13775118a954 | 8,920 | py | Python | downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py | yeonseok-jeong-cm/multimodal_research | bb1140f13f76d4cda6175a072806a0ee0908bd0d | [
"MIT"
] | null | null | null | downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py | yeonseok-jeong-cm/multimodal_research | bb1140f13f76d4cda6175a072806a0ee0908bd0d | [
"MIT"
] | null | null | null | downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py | yeonseok-jeong-cm/multimodal_research | bb1140f13f76d4cda6175a072806a0ee0908bd0d | [
"MIT"
] | null | null | null | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..heads import CausalLMHead, ClassificationHead, MultiLabelClassificationHead
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import (
BertEncoderAdaptersMixin,
BertOutputAdaptersMixin,
BertSelfOutputAdaptersMixin,
ModelWithFlexibleHeadsAdaptersMixin,
)
class GPT2AttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
"""Adds attention adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2OutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
"""Adds output adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2DecoderBlockAdaptersMixin(BertEncoderAdaptersMixin):
"""Adds adapters to the TransformerBlock module of DistilBert."""
def _init_adapter_modules(self):
self.attention_adapters = GPT2AttentionAdaptersModule(self)
self.output_adapters = GPT2OutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
class GPT2ModelAdapterMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _init_adapter_modules(self):
super()._init_adapter_modules()
# add adapters specified in config; invertible adapter will only be added if required
for adapter_name in self.config.adapters.adapters:
self._add_adapter(adapter_name)
# fusion
if hasattr(self.config, "fusion_models"):
for fusion_adapter_names in self.config.fusion_models:
self.add_fusion_layer(fusion_adapter_names)
def _add_adapter(self, adapter_name: str):
adapter_config = self.config.adapters.get(adapter_name)
leave_out = adapter_config.get("leave_out", [])
for i, layer in enumerate(self.base_model.h):
if i not in leave_out:
layer.add_adapter(adapter_name, i)
self.add_invertible_adapter(adapter_name)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, unfreeze_adapters, True)
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def enable_adapters(
self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_attention: bool
):
for layer in self.base_model.h:
layer.enable_adapters(adapter_setup, unfreeze_adapters, unfreeze_attention)
def adjust_attention_mask_for_parallel(self, hidden_states, attention_mask):
if attention_mask is not None and hidden_states.shape[0] != attention_mask.shape[0]:
repeats = [1] * len(attention_mask.shape)
repeats[0] = hidden_states.shape[0] // attention_mask.shape[0]
attention_mask = attention_mask.repeat(*repeats)
return attention_mask
def _add_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
for layer in self.base_model.h:
layer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.base_model.h._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.h):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class GPT2ModelHeadsMixin(ModelWithFlexibleHeadsAdaptersMixin):
"""Adds flexible heads to a GPT-2 model."""
head_types = {
"classification": ClassificationHead,
"multilabel_classification": MultiLabelClassificationHead,
"causal_lm": CausalLMHead,
}
def add_classification_head(
self,
head_name,
num_labels=2,
layers=2,
activation_function="tanh",
overwrite_ok=False,
multilabel=False,
id2label=None,
):
"""
Adds a sequence classification head on top of the model.
Args:
head_name (str): The name of the head.
num_labels (int, optional): Number of classification labels. Defaults to 2.
layers (int, optional): Number of layers. Defaults to 2.
activation_function (str, optional): Activation function. Defaults to 'tanh'.
overwrite_ok (bool, optional): Force overwrite if a head with the same name exists. Defaults to False.
multilabel (bool, optional): Enable multilabel classification setup. Defaults to False.
"""
if multilabel:
head = MultiLabelClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
else:
head = ClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
self.add_prediction_head(head, overwrite_ok)
def add_causal_lm_head(self, head_name, overwrite_ok=False):
"""
Adds a causal language modeling head on top of the model.
Args:
head_name (str): The name of the head.
overwrite_ok (bool, optional): Force overwrite if a head with the same name exists. Defaults to False.
"""
head = CausalLMHead(self, head_name)
self.add_prediction_head(head, overwrite_ok=overwrite_ok)
| 40.545455 | 116 | 0.69361 | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..heads import CausalLMHead, ClassificationHead, MultiLabelClassificationHead
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import (
BertEncoderAdaptersMixin,
BertOutputAdaptersMixin,
BertSelfOutputAdaptersMixin,
ModelWithFlexibleHeadsAdaptersMixin,
)
class GPT2AttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
def __init__(self, parent):
super().__init__()
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2OutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
def __init__(self, parent):
super().__init__()
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2DecoderBlockAdaptersMixin(BertEncoderAdaptersMixin):
def _init_adapter_modules(self):
self.attention_adapters = GPT2AttentionAdaptersModule(self)
self.output_adapters = GPT2OutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
class GPT2ModelAdapterMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _init_adapter_modules(self):
super()._init_adapter_modules()
for adapter_name in self.config.adapters.adapters:
self._add_adapter(adapter_name)
if hasattr(self.config, "fusion_models"):
for fusion_adapter_names in self.config.fusion_models:
self.add_fusion_layer(fusion_adapter_names)
def _add_adapter(self, adapter_name: str):
adapter_config = self.config.adapters.get(adapter_name)
leave_out = adapter_config.get("leave_out", [])
for i, layer in enumerate(self.base_model.h):
if i not in leave_out:
layer.add_adapter(adapter_name, i)
self.add_invertible_adapter(adapter_name)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, unfreeze_adapters, True)
self.set_active_adapters(adapter_setup)
def enable_adapters(
self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_attention: bool
):
for layer in self.base_model.h:
layer.enable_adapters(adapter_setup, unfreeze_adapters, unfreeze_attention)
def adjust_attention_mask_for_parallel(self, hidden_states, attention_mask):
if attention_mask is not None and hidden_states.shape[0] != attention_mask.shape[0]:
repeats = [1] * len(attention_mask.shape)
repeats[0] = hidden_states.shape[0] // attention_mask.shape[0]
attention_mask = attention_mask.repeat(*repeats)
return attention_mask
def _add_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
for layer in self.base_model.h:
layer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.base_model.h._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.h):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class GPT2ModelHeadsMixin(ModelWithFlexibleHeadsAdaptersMixin):
head_types = {
"classification": ClassificationHead,
"multilabel_classification": MultiLabelClassificationHead,
"causal_lm": CausalLMHead,
}
def add_classification_head(
self,
head_name,
num_labels=2,
layers=2,
activation_function="tanh",
overwrite_ok=False,
multilabel=False,
id2label=None,
):
if multilabel:
head = MultiLabelClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
else:
head = ClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
self.add_prediction_head(head, overwrite_ok)
def add_causal_lm_head(self, head_name, overwrite_ok=False):
head = CausalLMHead(self, head_name)
self.add_prediction_head(head, overwrite_ok=overwrite_ok)
| true | true |
f72aa0e8cc0326318ce5bd49e2a78712241bce3f | 1,562 | py | Python | tests/test_elastic_service.py | occidere/blind-review-parser | 72dd3a3c897d87d79f9303597016801e5fb1c648 | [
"Apache-2.0"
] | 7 | 2021-02-15T16:43:20.000Z | 2021-03-23T17:10:47.000Z | tests/test_elastic_service.py | occidere/blind-review-parser | 72dd3a3c897d87d79f9303597016801e5fb1c648 | [
"Apache-2.0"
] | 8 | 2021-02-16T13:38:40.000Z | 2021-02-16T13:51:35.000Z | tests/test_elastic_service.py | occidere/blind-review-parser | 72dd3a3c897d87d79f9303597016801e5fb1c648 | [
"Apache-2.0"
] | null | null | null | import unittest
from blindreviewparser.parser.blind_review_parser import *
class TestElasticService(unittest.TestCase):
def setUp(self) -> None:
self.es_endpoint = 'http://localhost:9200'
self.elastic_service = ElasticService(self.es_endpoint)
self.sample = Review(
company='occidere',
title='"테스트 리뷰"',
url='/kr/company/occidere/review/af9-0df3j',
score=5.0,
auth='현직원 · i*********", · IT 엔지니어 - 2021.02.17'
)
def tearDown(self) -> None:
self.__delete_sample()
def test_exist_any(self):
# BUILD
self.__index_sample()
# OPERATE
exist = self.elastic_service.exist_any([self.sample])
# CHECK
self.assertTrue(exist)
def test_bulk_upsert(self):
# BUILD
self.__delete_sample()
# OPERATE
self.elastic_service.bulk_upsert([self.sample])
# CHECK
resp = requests.get(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
self.assertEqual(resp.status_code, 200)
def __index_sample(self) -> None:
requests.post(
url=f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}',
headers={'Content-Type': 'application/json'},
data=self.sample.to_json_str().encode('utf-8')
)
def __delete_sample(self) -> None:
requests.delete(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
| 30.038462 | 99 | 0.589629 | import unittest
from blindreviewparser.parser.blind_review_parser import *
class TestElasticService(unittest.TestCase):
def setUp(self) -> None:
self.es_endpoint = 'http://localhost:9200'
self.elastic_service = ElasticService(self.es_endpoint)
self.sample = Review(
company='occidere',
title='"테스트 리뷰"',
url='/kr/company/occidere/review/af9-0df3j',
score=5.0,
auth='현직원 · i*********", · IT 엔지니어 - 2021.02.17'
)
def tearDown(self) -> None:
self.__delete_sample()
def test_exist_any(self):
# BUILD
self.__index_sample()
# OPERATE
exist = self.elastic_service.exist_any([self.sample])
# CHECK
self.assertTrue(exist)
def test_bulk_upsert(self):
# BUILD
self.__delete_sample()
# OPERATE
self.elastic_service.bulk_upsert([self.sample])
# CHECK
resp = requests.get(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
self.assertEqual(resp.status_code, 200)
def __index_sample(self) -> None:
requests.post(
url=f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}',
headers={'Content-Type': 'application/json'},
data=self.sample.to_json_str().encode('utf-8')
)
def __delete_sample(self) -> None:
requests.delete(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
| true | true |
f72aa166383d481c6103f4761816a7c123b14e5f | 2,262 | py | Python | adb/systrace/catapult/devil/devil/android/tools/system_app_test.py | mohanedmoh/TBS | 6aebf52643911fe0dce7d02825eb0f046da1b3b1 | [
"Apache-2.0"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | adb/systrace/catapult/devil/devil/android/tools/system_app_test.py | mohanedmoh/TBS | 6aebf52643911fe0dce7d02825eb0f046da1b3b1 | [
"Apache-2.0"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | adb/systrace/catapult/devil/devil/android/tools/system_app_test.py | mohanedmoh/TBS | 6aebf52643911fe0dce7d02825eb0f046da1b3b1 | [
"Apache-2.0"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil import devil_env
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.android.tools import system_app
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock
class SystemAppTest(unittest.TestCase):
def testDoubleEnableModification(self):
"""Ensures that system app modification logic isn't repeated.
If EnableSystemAppModification uses are nested, inner calls should
not need to perform any of the expensive modification logic.
"""
# pylint: disable=no-self-use,protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
type(mock_device).build_version_sdk = mock.PropertyMock(
return_value=version_codes.LOLLIPOP)
system_props = {}
def dict_setprop(prop_name, value):
system_props[prop_name] = value
def dict_getprop(prop_name):
return system_props.get(prop_name, '')
mock_device.SetProp.side_effect = dict_setprop
mock_device.GetProp.side_effect = dict_getprop
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_called_once()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '1')
mock_device.reset_mock()
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_not_called()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_not_called()
mock_device.reset_mock()
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '0')
if __name__ == '__main__':
unittest.main()
| 32.314286 | 72 | 0.751105 |
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil import devil_env
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.android.tools import system_app
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock
class SystemAppTest(unittest.TestCase):
def testDoubleEnableModification(self):
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
type(mock_device).build_version_sdk = mock.PropertyMock(
return_value=version_codes.LOLLIPOP)
system_props = {}
def dict_setprop(prop_name, value):
system_props[prop_name] = value
def dict_getprop(prop_name):
return system_props.get(prop_name, '')
mock_device.SetProp.side_effect = dict_setprop
mock_device.GetProp.side_effect = dict_getprop
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_called_once()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '1')
mock_device.reset_mock()
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_not_called()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_not_called()
mock_device.reset_mock()
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '0')
if __name__ == '__main__':
unittest.main()
| true | true |
f72aa18c0f5b6396366947977df0c5e4b9da7877 | 264 | py | Python | tests/hamiltonian/test_exact.py | ymtz03/freqerica | d79e76181a037da5c11b47f8a4e1bf4387a0468f | [
"BSD-2-Clause"
] | 1 | 2020-05-08T15:28:04.000Z | 2020-05-08T15:28:04.000Z | tests/hamiltonian/test_exact.py | ymtz03/freqerica | d79e76181a037da5c11b47f8a4e1bf4387a0468f | [
"BSD-2-Clause"
] | null | null | null | tests/hamiltonian/test_exact.py | ymtz03/freqerica | d79e76181a037da5c11b47f8a4e1bf4387a0468f | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import freqerica.hamiltonian.exact
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
if __name__ == '__main__':
unittest.main()
| 16.5 | 46 | 0.689394 |
import unittest
import freqerica.hamiltonian.exact
class BasicTestSuite(unittest.TestCase):
def test_absolute_truth_and_meaning(self):
assert True
if __name__ == '__main__':
unittest.main()
| true | true |
f72aa18da6b39da5e2c7c96f9b56b0327cc9cf0a | 1,301 | py | Python | authapp/forms.py | EvgenDEP1/exam | 0c6faf8986e890bc03f8a407fb3d72b7ccecc1e0 | [
"Apache-2.0"
] | null | null | null | authapp/forms.py | EvgenDEP1/exam | 0c6faf8986e890bc03f8a407fb3d72b7ccecc1e0 | [
"Apache-2.0"
] | null | null | null | authapp/forms.py | EvgenDEP1/exam | 0c6faf8986e890bc03f8a407fb3d72b7ccecc1e0 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password'].widget.attrs['placeholder'] = f'Пароль'
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password1'].label = ""
self.fields['password2'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password1'].widget.attrs['placeholder'] = f'Пароль'
self.fields['password2'].widget.attrs['placeholder'] = f'Повтор пароля '
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
item.help_text = '' | 40.65625 | 80 | 0.629516 | from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password'].widget.attrs['placeholder'] = f'Пароль'
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password1'].label = ""
self.fields['password2'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password1'].widget.attrs['placeholder'] = f'Пароль'
self.fields['password2'].widget.attrs['placeholder'] = f'Повтор пароля '
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
item.help_text = '' | true | true |
f72aa1cad8297f5101397a1f8105d26c2f65379d | 415 | py | Python | online_quiz/online_quiz/asgi.py | abhinavkavuri/django-trivia | e451ffd85a06ec9c1e1d690c67fdc51601fa6a5c | [
"Apache-2.0"
] | null | null | null | online_quiz/online_quiz/asgi.py | abhinavkavuri/django-trivia | e451ffd85a06ec9c1e1d690c67fdc51601fa6a5c | [
"Apache-2.0"
] | 6 | 2020-06-05T20:37:41.000Z | 2021-09-22T18:27:23.000Z | online_quiz/online_quiz/asgi.py | abhinavkavuri/django-trivia | e451ffd85a06ec9c1e1d690c67fdc51601fa6a5c | [
"Apache-2.0"
] | null | null | null | """
ASGI config for online_quiz project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'online_quiz.settings')
application = get_asgi_application()
| 24.411765 | 79 | 0.759036 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'online_quiz.settings')
application = get_asgi_application()
| true | true |
f72aa1e7883558de22bc21371e82becfb2c32478 | 1,010 | py | Python | server/routes/prometheus.py | sadiejay/Open-Sentencing-Model | fc83af2f37c9d77035349d1d39cf1cc309837045 | [
"Apache-2.0"
] | 52 | 2019-12-27T03:52:00.000Z | 2022-03-26T18:16:30.000Z | server/routes/prometheus.py | sadiejay/Open-Sentencing-Model | fc83af2f37c9d77035349d1d39cf1cc309837045 | [
"Apache-2.0"
] | 114 | 2019-12-03T04:13:38.000Z | 2020-10-03T18:02:03.000Z | server/routes/prometheus.py | sadiejay/Open-Sentencing-Model | fc83af2f37c9d77035349d1d39cf1cc309837045 | [
"Apache-2.0"
] | 45 | 2019-12-22T08:17:08.000Z | 2022-03-13T09:57:09.000Z |
from server import app
from flask import Response, request
from prometheus_client import generate_latest, Counter
from functools import wraps
# route to display configured Prometheus metrics
# note that you will need to set up custom metric observers for your app
@app.route('/metrics')
def prometheus_metrics():
MIMETYPE = 'text/plain; version=0.0.4; charset=utf-8'
return Response(generate_latest(), mimetype=MIMETYPE)
# creates a Prometheus Counter to track requests for specified routes
# usage:
# @app.route('/example')
# @prometheus.track_requests
# def example():
# pass
route_counter = Counter('requests_for_routes', 'Number of requests for specififed routes', ['method', 'endpoint'])
def track_requests(route):
@wraps(route)
def wrapper(*args, **kwargs):
route_labels = {
"method": request.method,
"endpoint": str(request.path)
}
route_counter.labels(**route_labels).inc()
return route(*args, **kwargs)
return wrapper
| 31.5625 | 114 | 0.708911 |
from server import app
from flask import Response, request
from prometheus_client import generate_latest, Counter
from functools import wraps
@app.route('/metrics')
def prometheus_metrics():
MIMETYPE = 'text/plain; version=0.0.4; charset=utf-8'
return Response(generate_latest(), mimetype=MIMETYPE)
route_counter = Counter('requests_for_routes', 'Number of requests for specififed routes', ['method', 'endpoint'])
def track_requests(route):
@wraps(route)
def wrapper(*args, **kwargs):
route_labels = {
"method": request.method,
"endpoint": str(request.path)
}
route_counter.labels(**route_labels).inc()
return route(*args, **kwargs)
return wrapper
| true | true |
f72aa29a2c3502ea40faf7f916df3b0ead8ebf1a | 1,577 | py | Python | xl2code/parsers/direct_parser.py | youlanhai/ExcelToCode | d160c75b9b7a305f4b3367d85ee0550572869d3e | [
"MIT"
] | 47 | 2017-06-23T07:47:50.000Z | 2022-03-07T22:36:19.000Z | xl2code/parsers/direct_parser.py | twjitm/ExcelToCode | d160c75b9b7a305f4b3367d85ee0550572869d3e | [
"MIT"
] | 1 | 2019-03-12T06:12:50.000Z | 2019-04-03T00:50:01.000Z | xl2code/parsers/direct_parser.py | twjitm/ExcelToCode | d160c75b9b7a305f4b3367d85ee0550572869d3e | [
"MIT"
] | 23 | 2017-05-12T07:46:07.000Z | 2022-01-22T03:19:50.000Z | # -*- coding: utf-8 -*-
import traceback
import xlsconfig
import util
from tps import tp0, convention
from base_parser import ConverterInfo, BaseParser
# 利用Excel表头描述,进行导表,不需要转换器
class DirectParser(BaseParser):
def __init__(self, filename, module, sheet_index=0):
super(DirectParser, self).__init__(filename, module, sheet_index)
self.field_row_index = xlsconfig.SHEET_ROW_INDEX["field"]
self.type_row_index = xlsconfig.SHEET_ROW_INDEX["type"]
# 使用Excel表头提供的信息,构造转换器
def parse_header(self, rows):
header_row = [self.extract_cell_value(cell) for cell in rows[self.header_row_index]]
field_row = [self.extract_cell_value(cell) for cell in rows[self.field_row_index]]
type_row = [self.extract_cell_value(cell) for cell in rows[self.type_row_index]]
for col, field in enumerate(field_row):
if field == "": break
self.converters[col] = None
if field in self.field_2_col:
util.log_error("列名'%s'重复,列:%s", field, util.int_to_base26(col))
continue
self.field_2_col[field] = col
header = header_row[col] or field
type = type_row[col] or "String"
method = None
try:
method = convention.type2function(type)
except:
util.log_error("无效的类型'%s',列:%s", type, util.int_to_base26(col))
continue
self.converters[col] = ConverterInfo((header, field, method, True))
self.sheet_types[field] = (col, field, header, type)
self.key_name = self.converters[0].field
return
def parse_arguments(self, rows):
super(DirectParser, self).parse_arguments(rows)
self.is_multi_key = self.arguments.get("multiKey", False)
| 30.326923 | 87 | 0.733037 |
import traceback
import xlsconfig
import util
from tps import tp0, convention
from base_parser import ConverterInfo, BaseParser
class DirectParser(BaseParser):
def __init__(self, filename, module, sheet_index=0):
super(DirectParser, self).__init__(filename, module, sheet_index)
self.field_row_index = xlsconfig.SHEET_ROW_INDEX["field"]
self.type_row_index = xlsconfig.SHEET_ROW_INDEX["type"]
def parse_header(self, rows):
header_row = [self.extract_cell_value(cell) for cell in rows[self.header_row_index]]
field_row = [self.extract_cell_value(cell) for cell in rows[self.field_row_index]]
type_row = [self.extract_cell_value(cell) for cell in rows[self.type_row_index]]
for col, field in enumerate(field_row):
if field == "": break
self.converters[col] = None
if field in self.field_2_col:
util.log_error("列名'%s'重复,列:%s", field, util.int_to_base26(col))
continue
self.field_2_col[field] = col
header = header_row[col] or field
type = type_row[col] or "String"
method = None
try:
method = convention.type2function(type)
except:
util.log_error("无效的类型'%s',列:%s", type, util.int_to_base26(col))
continue
self.converters[col] = ConverterInfo((header, field, method, True))
self.sheet_types[field] = (col, field, header, type)
self.key_name = self.converters[0].field
return
def parse_arguments(self, rows):
super(DirectParser, self).parse_arguments(rows)
self.is_multi_key = self.arguments.get("multiKey", False)
| true | true |
f72aa4256943cc8676fb4c07209c50e85fa11d40 | 4,050 | py | Python | examples/structural/beam.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | 1 | 2020-07-23T00:15:00.000Z | 2020-07-23T00:15:00.000Z | examples/structural/beam.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | null | null | null | examples/structural/beam.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | null | null | null | from aeropy.geometry.parametric import poly
from aeropy.structural.stable_solution import (structure, mesh_1D, properties,
boundary_conditions)
from aeropy.xfoil_module import output_reader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pickle
abaqus_primary = pickle.load(open("save.p", "rb"), encoding='latin1')
abaqus_secondary = output_reader('secondary_variables.txt')
# sort data
abaqus_data = np.array(sorted(zip(abaqus_primary['C_U']['x'],
abaqus_primary['C_U']['y'],
abaqus_primary['U'][:, 0],
abaqus_primary['U'][:, 1],)))
abq_x, abq_y, abq_u1, abq_u2 = abaqus_data.T
abq_y = -abq_y + .005
abq_u2 = -abq_u2
# Convert log strains into engineering strain
abaqus_secondary['LE11'] = np.exp(np.array(abaqus_secondary['LE11'])) - 1
abaqus_secondary['LE12'] = np.exp(np.array(abaqus_secondary['LE12'])) - 1
abaqus_secondary['LE22'] = np.exp(np.array(abaqus_secondary['LE22'])) - 1
coefficients = np.array([0, 0, 0, 0])
bp = properties()
bc = boundary_conditions(load=np.array([[0, -1]]))
analytical_solution = bc.concentrated_load[0][1]/(6*bp.young*bp.inertia) * \
np.array([-1, 3, 0, 0])
mesh = mesh_1D(mesh_n=10)
curve_parent = poly(a=[0, 0, 0, 0])
curve_child = poly(a=analytical_solution)
beam = structure(curve_parent, curve_child, mesh, bp, bc)
beam.calculate_position()
strain = beam.strain()
stress = beam.stress(loading_condition='plane_stress')
# Plot beam results
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.plot(beam.r_p[0], beam.r_p[1], label='parent')
plt.scatter(beam.r_p[0], beam.r_p[1], label='parent')
plt.plot(beam.r_c[0], beam.r_c[1], label='child')
plt.scatter(beam.r_c[0], beam.r_c[1], label='child')
plt.plot(abq_x, abq_y, label='Abaqus')
plt.title('Position')
plt.grid()
plt.legend()
# Plot beam results
plt.figure()
r1_p, r1_c = beam.calculate_position(diff='x1')
r2_p, r2_c = beam.calculate_position(diff='x2')
# plt.plot(beam.r_p[0], r1_p[0], label='$r_{1,1}^p$')
plt.plot(beam.r_p[0], r1_p[1], label='$r_{2,1}^p$')
# plt.plot(beam.r_p[0], r2_p[0], label='$r_{1,2}^p$')
plt.plot(beam.r_p[0], r2_p[1], label='$r_{2,2}^p$')
# plt.plot(beam.r_p[0], r1_c[0], label='$r_{1,1}^c$')
plt.plot(beam.r_p[0], r1_c[1], label='$r_{2,1}^c$')
# plt.plot(beam.r_p[0], r2_c[0], label='$r_{1,2}^c$')
plt.plot(beam.r_p[0], r2_c[1], label='$r_{2,2}^c$')
plt.title('Position gradients')
plt.grid()
plt.legend()
# Plot beam results
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.scatter(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.scatter(beam.mesh.x_p, u[1], label=r'$u_2$')
plt.plot(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.plot(beam.mesh.x_p, u[1], label=r'$u_2$')
# plt.plot(abq_x, abq_u1, label=r'Abaqus $u_1$')
# plt.plot(abq_x, abq_u2, label=r'Abaqus $u_2$')
plt.title('Displacement diff')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, strain[0][0], label=r'$\epsilon_{11}$')
plt.plot(beam.mesh.x_p, strain[0][1], label=r'$\epsilon_{12}$')
plt.plot(beam.mesh.x_p, strain[1][1], label=r'$\epsilon_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE11'],
label=r'Abaqus $\epsilon_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE12'],
label=r'Abaqus $\epsilon_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE22'],
label=r'Abaqus $\epsilon_{22}$')
plt.title('Strain')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, stress[0][0], label=r'$\sigma_{11}$')
plt.plot(beam.mesh.x_p, stress[0][1], label=r'$\sigma_{12}$')
plt.plot(beam.mesh.x_p, stress[1][1], label=r'$\sigma_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S11'],
label=r'Abaqus $\sigma_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S12'],
label=r'Abaqus $\sigma_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S22'],
label=r'Abaqus $\sigma_{22}$')
plt.legend()
plt.title('Stress')
plt.show()
| 36.486486 | 78 | 0.655062 | from aeropy.geometry.parametric import poly
from aeropy.structural.stable_solution import (structure, mesh_1D, properties,
boundary_conditions)
from aeropy.xfoil_module import output_reader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pickle
abaqus_primary = pickle.load(open("save.p", "rb"), encoding='latin1')
abaqus_secondary = output_reader('secondary_variables.txt')
abaqus_data = np.array(sorted(zip(abaqus_primary['C_U']['x'],
abaqus_primary['C_U']['y'],
abaqus_primary['U'][:, 0],
abaqus_primary['U'][:, 1],)))
abq_x, abq_y, abq_u1, abq_u2 = abaqus_data.T
abq_y = -abq_y + .005
abq_u2 = -abq_u2
abaqus_secondary['LE11'] = np.exp(np.array(abaqus_secondary['LE11'])) - 1
abaqus_secondary['LE12'] = np.exp(np.array(abaqus_secondary['LE12'])) - 1
abaqus_secondary['LE22'] = np.exp(np.array(abaqus_secondary['LE22'])) - 1
coefficients = np.array([0, 0, 0, 0])
bp = properties()
bc = boundary_conditions(load=np.array([[0, -1]]))
analytical_solution = bc.concentrated_load[0][1]/(6*bp.young*bp.inertia) * \
np.array([-1, 3, 0, 0])
mesh = mesh_1D(mesh_n=10)
curve_parent = poly(a=[0, 0, 0, 0])
curve_child = poly(a=analytical_solution)
beam = structure(curve_parent, curve_child, mesh, bp, bc)
beam.calculate_position()
strain = beam.strain()
stress = beam.stress(loading_condition='plane_stress')
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.plot(beam.r_p[0], beam.r_p[1], label='parent')
plt.scatter(beam.r_p[0], beam.r_p[1], label='parent')
plt.plot(beam.r_c[0], beam.r_c[1], label='child')
plt.scatter(beam.r_c[0], beam.r_c[1], label='child')
plt.plot(abq_x, abq_y, label='Abaqus')
plt.title('Position')
plt.grid()
plt.legend()
plt.figure()
r1_p, r1_c = beam.calculate_position(diff='x1')
r2_p, r2_c = beam.calculate_position(diff='x2')
plt.plot(beam.r_p[0], r1_p[1], label='$r_{2,1}^p$')
plt.plot(beam.r_p[0], r2_p[1], label='$r_{2,2}^p$')
plt.plot(beam.r_p[0], r1_c[1], label='$r_{2,1}^c$')
plt.plot(beam.r_p[0], r2_c[1], label='$r_{2,2}^c$')
plt.title('Position gradients')
plt.grid()
plt.legend()
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.scatter(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.scatter(beam.mesh.x_p, u[1], label=r'$u_2$')
plt.plot(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.plot(beam.mesh.x_p, u[1], label=r'$u_2$')
plt.title('Displacement diff')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, strain[0][0], label=r'$\epsilon_{11}$')
plt.plot(beam.mesh.x_p, strain[0][1], label=r'$\epsilon_{12}$')
plt.plot(beam.mesh.x_p, strain[1][1], label=r'$\epsilon_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE11'],
label=r'Abaqus $\epsilon_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE12'],
label=r'Abaqus $\epsilon_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE22'],
label=r'Abaqus $\epsilon_{22}$')
plt.title('Strain')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, stress[0][0], label=r'$\sigma_{11}$')
plt.plot(beam.mesh.x_p, stress[0][1], label=r'$\sigma_{12}$')
plt.plot(beam.mesh.x_p, stress[1][1], label=r'$\sigma_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S11'],
label=r'Abaqus $\sigma_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S12'],
label=r'Abaqus $\sigma_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S22'],
label=r'Abaqus $\sigma_{22}$')
plt.legend()
plt.title('Stress')
plt.show()
| true | true |
f72aa529d44c10ff374c8d1442847033225d002f | 449 | py | Python | examples/subgraphs/create_knn_subgraph.py | gugarosa/opfython | 19b467a92d85c7c26d231efec770645096827b4e | [
"Apache-2.0"
] | 26 | 2018-04-24T20:16:18.000Z | 2022-03-09T14:03:28.000Z | examples/subgraphs/create_knn_subgraph.py | gugarosa/opfython | 19b467a92d85c7c26d231efec770645096827b4e | [
"Apache-2.0"
] | 4 | 2020-12-26T14:57:18.000Z | 2022-03-30T02:34:18.000Z | examples/subgraphs/create_knn_subgraph.py | gugarosa/opfython | 19b467a92d85c7c26d231efec770645096827b4e | [
"Apache-2.0"
] | 16 | 2019-05-20T15:41:56.000Z | 2022-03-23T17:59:53.000Z | import opfython.stream.loader as l
import opfython.stream.parser as p
from opfython.subgraphs import KNNSubgraph
# Defining an input file
input_file = 'data/boat.txt'
# Loading a .txt file to a dataframe
txt = l.load_txt(input_file)
# Parsing a pre-loaded dataframe
X, Y = p.parse_loader(txt)
# Creating a knn-subgraph structure
g = KNNSubgraph(X, Y)
# KNNSubgraph can also be directly created from a file
g = KNNSubgraph(from_file=input_file)
| 23.631579 | 54 | 0.772829 | import opfython.stream.loader as l
import opfython.stream.parser as p
from opfython.subgraphs import KNNSubgraph
input_file = 'data/boat.txt'
txt = l.load_txt(input_file)
X, Y = p.parse_loader(txt)
g = KNNSubgraph(X, Y)
g = KNNSubgraph(from_file=input_file)
| true | true |
f72aa5fc954c18cf553f3deb747a7fd96e64bef0 | 2,664 | py | Python | ishuhui/controllers/admin.py | Nayak-cyber/flask_ishuhui | 34352da462d4999bc7788c87773001312a213b20 | [
"MIT"
] | 192 | 2017-08-27T13:56:37.000Z | 2022-03-09T00:59:14.000Z | ishuhui/controllers/admin.py | Soumi7/flask_ishuhui | a3444b3679c45d5ba94c5c9a66551207eff1a646 | [
"MIT"
] | null | null | null | ishuhui/controllers/admin.py | Soumi7/flask_ishuhui | a3444b3679c45d5ba94c5c9a66551207eff1a646 | [
"MIT"
] | 54 | 2017-08-28T01:04:04.000Z | 2021-07-07T17:27:50.000Z | from flask import Blueprint, render_template, current_app, session
from flask import abort, jsonify
from flask_login import current_user
import ishuhui.tasks.task as task
from ..models.chapter import Chapter
from ..models.comic import Comic
from ..tasks.celery_task import refresh_chapters_task
bp_admin = Blueprint('admin', __name__, url_prefix='/admin')
@bp_admin.before_request
def login():
if not current_user.is_authenticated:
abort(403)
@bp_admin.route('/mange', methods=['GET'])
def mange():
return render_template('mange.html', chapter_count=Chapter.query.count(),
comic_count=Comic.query.count(),
comics=Comic.query.all(),
task_id=session.get('task_id'),
enable_celery=current_app.config['ENABLE_CELERY'],
running=session.get('task_id') is not None)
@bp_admin.route('/refresh_comics')
def refresh_comics():
return jsonify(task.refresh_comics())
@bp_admin.route('/refresh_chapters')
def refresh_chapters():
if current_app.config['ENABLE_CELERY']:
if session.get('task_id') is None:
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
else:
result = refresh_chapters_task.AsyncResult(session['task_id'])
if result.state == 'SUCCESS' or result.state == 'FAILURE':
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
return 'Already running', 400
return jsonify(task.refresh_chapters())
@bp_admin.route('/tasks/status/<task_id>')
def task_status(task_id):
result = refresh_chapters_task.AsyncResult(task_id)
if result.state == 'PENDING':
response = {
'state': result.state,
'progress': 0,
}
elif result.state != 'FAILURE':
response = {
'state': result.state,
'progress': result.info.get('progress', 0),
}
if result.state == 'SUCCESS':
session.pop('task_id')
if 'result' in result.info:
response['result'] = result.info['result']
else:
# something went wrong in the background job
session.pop('task_id')
response = {
'state': result.state,
'progress': 0,
'status': str(result.info), # this is the exception raised
}
return jsonify(response)
@bp_admin.route('/refresh_comic_images')
def refresh_comic_images():
return jsonify(task.refresh_comic_images())
| 32.487805 | 77 | 0.614489 | from flask import Blueprint, render_template, current_app, session
from flask import abort, jsonify
from flask_login import current_user
import ishuhui.tasks.task as task
from ..models.chapter import Chapter
from ..models.comic import Comic
from ..tasks.celery_task import refresh_chapters_task
bp_admin = Blueprint('admin', __name__, url_prefix='/admin')
@bp_admin.before_request
def login():
if not current_user.is_authenticated:
abort(403)
@bp_admin.route('/mange', methods=['GET'])
def mange():
return render_template('mange.html', chapter_count=Chapter.query.count(),
comic_count=Comic.query.count(),
comics=Comic.query.all(),
task_id=session.get('task_id'),
enable_celery=current_app.config['ENABLE_CELERY'],
running=session.get('task_id') is not None)
@bp_admin.route('/refresh_comics')
def refresh_comics():
return jsonify(task.refresh_comics())
@bp_admin.route('/refresh_chapters')
def refresh_chapters():
if current_app.config['ENABLE_CELERY']:
if session.get('task_id') is None:
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
else:
result = refresh_chapters_task.AsyncResult(session['task_id'])
if result.state == 'SUCCESS' or result.state == 'FAILURE':
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
return 'Already running', 400
return jsonify(task.refresh_chapters())
@bp_admin.route('/tasks/status/<task_id>')
def task_status(task_id):
result = refresh_chapters_task.AsyncResult(task_id)
if result.state == 'PENDING':
response = {
'state': result.state,
'progress': 0,
}
elif result.state != 'FAILURE':
response = {
'state': result.state,
'progress': result.info.get('progress', 0),
}
if result.state == 'SUCCESS':
session.pop('task_id')
if 'result' in result.info:
response['result'] = result.info['result']
else:
session.pop('task_id')
response = {
'state': result.state,
'progress': 0,
'status': str(result.info),
}
return jsonify(response)
@bp_admin.route('/refresh_comic_images')
def refresh_comic_images():
return jsonify(task.refresh_comic_images())
| true | true |
f72aa61a7732c6e419535219feafdd67e09bdfe5 | 5,711 | py | Python | doors/gatekeeper_app.py | manens/nadine | 4938afa2d2c69ae5ac54f4360b081d10521a0a2f | [
"Apache-2.0"
] | null | null | null | doors/gatekeeper_app.py | manens/nadine | 4938afa2d2c69ae5ac54f4360b081d10521a0a2f | [
"Apache-2.0"
] | null | null | null | doors/gatekeeper_app.py | manens/nadine | 4938afa2d2c69ae5ac54f4360b081d10521a0a2f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import json
import time
import logging
import traceback
from core import Messages, EncryptedConnection, Gatekeeper
from threads import Heartbeat, EventWatcher
class GatekeeperApp(object):
def run(self, config):
try:
logging.info("Starting up Gatekeeper...")
gatekeeper = Gatekeeper(config)
connection = gatekeeper.get_connection()
# Sync our system clocks
gatekeeper.set_system_clock()
# Test the connection encryption
if gatekeeper.test_keymaster_connection():
logging.info("Keymaster encrypted connection successfull!")
# Pull the configuration
gatekeeper.configure_doors()
if len(gatekeeper.doors) == 0:
logging.error("No doors to program. Exiting")
return
logging.info("Configured %d doors" % len(gatekeeper.doors))
# Set the time on each door
if config['syncClocks']:
gatekeeper.sync_clocks()
# Clear out all the door codes if requested
if config['clearCodes']:
gatekeeper.clear_all_codes()
initialSync = True
# Pull new data if requested
if config['initialSync']:
gatekeeper.pull_door_codes()
try:
# Start with a clean bowl
sys.stdout.flush()
heartbeat = None
event_watcher = None
hb_conn_err = False
while True:
# Keep our heartbeat alive
if not heartbeat or not heartbeat.is_alive():
hb_conn_err = False
if heartbeat and heartbeat.error:
try:
# Heartbeat errors can come from a poor connection to the Keymaster
# In cases like these we need to keep retrying to send the log up
gatekeeper.send_gatekeper_log("Heartbeat: " + str(heartbeat.error))
except Exception as e:
hb_conn_err = True
logging.warning("Unable to report hearbeat error!: %s" % str(e))
time.sleep(5)
if not hb_conn_err:
logging.info("Starting Heartbeat...")
poll_delay = config.get('KEYMASTER_POLL_DELAY_SEC', 5)
heartbeat = Heartbeat(connection, poll_delay)
heartbeat.setDaemon(True)
heartbeat.start()
# Keep our event watcher alive
if not event_watcher or not event_watcher.is_alive():
if event_watcher and event_watcher.error:
gatekeeper.send_gatekeper_log("EventWatcher: " + str(event_watcher.error))
time.sleep(5)
logging.info("Starting Event Watcher...")
poll_delay = config.get('EVENT_POLL_DELAY_SEC', 10)
event_watcher = EventWatcher(gatekeeper, poll_delay)
event_watcher.setDaemon(True)
event_watcher.start()
if heartbeat.new_data:
gatekeeper.pull_door_codes()
heartbeat.all_clear()
if event_watcher.new_data:
event_logs = gatekeeper.pull_event_logs()
gatekeeper.push_event_logs(event_logs)
event_watcher.all_clear()
time.sleep(.1)
except KeyboardInterrupt:
logging.info(" Keyboard Interupt!")
logging.info("Shutting down Heartbeat...")
if heartbeat and heartbeat.is_alive():
heartbeat.stop()
#heartbeat.join()
logging.info("Shutting down Event Watcher...")
if event_watcher and event_watcher.is_alive():
event_watcher.stop()
#event_watcher.join()
except Exception as e:
traceback.print_exc()
logging.error("Error: %s" % str(e))
if __name__ == "__main__":
# Pull the config
with open('gw_config.json', 'r') as f:
config = json.load(f)
# Pull the command line args
config['initialSync'] = "--sync" in sys.argv
config['syncClocks'] = "--set-time" in sys.argv
config['clearCodes'] = "--clear-all" in sys.argv
if "--debug" in sys.argv:
config['DEBUG'] = True
# Configure logging
log_level = logging.DEBUG if config.get('DEBUG', False) else logging.INFO
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
# Start the application
app = GatekeeperApp()
app.run(config)
# Copyright 2019 Office Nomads LLC (http://www.officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 42.93985 | 583 | 0.555244 |
import sys
import json
import time
import logging
import traceback
from core import Messages, EncryptedConnection, Gatekeeper
from threads import Heartbeat, EventWatcher
class GatekeeperApp(object):
def run(self, config):
try:
logging.info("Starting up Gatekeeper...")
gatekeeper = Gatekeeper(config)
connection = gatekeeper.get_connection()
gatekeeper.set_system_clock()
if gatekeeper.test_keymaster_connection():
logging.info("Keymaster encrypted connection successfull!")
gatekeeper.configure_doors()
if len(gatekeeper.doors) == 0:
logging.error("No doors to program. Exiting")
return
logging.info("Configured %d doors" % len(gatekeeper.doors))
if config['syncClocks']:
gatekeeper.sync_clocks()
if config['clearCodes']:
gatekeeper.clear_all_codes()
initialSync = True
if config['initialSync']:
gatekeeper.pull_door_codes()
try:
sys.stdout.flush()
heartbeat = None
event_watcher = None
hb_conn_err = False
while True:
if not heartbeat or not heartbeat.is_alive():
hb_conn_err = False
if heartbeat and heartbeat.error:
try:
gatekeeper.send_gatekeper_log("Heartbeat: " + str(heartbeat.error))
except Exception as e:
hb_conn_err = True
logging.warning("Unable to report hearbeat error!: %s" % str(e))
time.sleep(5)
if not hb_conn_err:
logging.info("Starting Heartbeat...")
poll_delay = config.get('KEYMASTER_POLL_DELAY_SEC', 5)
heartbeat = Heartbeat(connection, poll_delay)
heartbeat.setDaemon(True)
heartbeat.start()
if not event_watcher or not event_watcher.is_alive():
if event_watcher and event_watcher.error:
gatekeeper.send_gatekeper_log("EventWatcher: " + str(event_watcher.error))
time.sleep(5)
logging.info("Starting Event Watcher...")
poll_delay = config.get('EVENT_POLL_DELAY_SEC', 10)
event_watcher = EventWatcher(gatekeeper, poll_delay)
event_watcher.setDaemon(True)
event_watcher.start()
if heartbeat.new_data:
gatekeeper.pull_door_codes()
heartbeat.all_clear()
if event_watcher.new_data:
event_logs = gatekeeper.pull_event_logs()
gatekeeper.push_event_logs(event_logs)
event_watcher.all_clear()
time.sleep(.1)
except KeyboardInterrupt:
logging.info(" Keyboard Interupt!")
logging.info("Shutting down Heartbeat...")
if heartbeat and heartbeat.is_alive():
heartbeat.stop()
logging.info("Shutting down Event Watcher...")
if event_watcher and event_watcher.is_alive():
event_watcher.stop()
except Exception as e:
traceback.print_exc()
logging.error("Error: %s" % str(e))
if __name__ == "__main__":
with open('gw_config.json', 'r') as f:
config = json.load(f)
config['initialSync'] = "--sync" in sys.argv
config['syncClocks'] = "--set-time" in sys.argv
config['clearCodes'] = "--clear-all" in sys.argv
if "--debug" in sys.argv:
config['DEBUG'] = True
log_level = logging.DEBUG if config.get('DEBUG', False) else logging.INFO
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
app = GatekeeperApp()
app.run(config)
| true | true |
f72aa63243daf6a11e454d0ec669bad1d564b255 | 4,178 | py | Python | src/dataAnalyze.py | bankrollhunter/DreamTrader | c8f2f9043b0ff11a67146007b6f952fca05a629d | [
"MIT"
] | 32 | 2020-10-16T17:48:04.000Z | 2021-06-16T06:14:31.000Z | src/dataAnalyze.py | bankrollhunter/DreamTrader | c8f2f9043b0ff11a67146007b6f952fca05a629d | [
"MIT"
] | null | null | null | src/dataAnalyze.py | bankrollhunter/DreamTrader | c8f2f9043b0ff11a67146007b6f952fca05a629d | [
"MIT"
] | 19 | 2020-10-16T17:13:27.000Z | 2021-05-26T02:44:56.000Z | from .dataSource import DataSource
from .trend import TrendAnalyze
from jqdatasdk import *
from jqdatasdk.api import get_fundamentals, get_industry_stocks, get_security_info
from jqdatasdk.utils import query
import talib
from datetime import datetime, timedelta
import json
import logging
from sqlalchemy.orm.query import Query
from .store import MongoDB
analyzePeriod = 5
longArrangeLimit = 5
ema20NegativeThreshold = -0.05
nearMovingAverageThreshold = 0.003
class MarketBreadth:
def __init__(self):
super().__init__()
def report_daily_first_level_market_breadth(self):
"""
1. 300 index
2. HY001 energy
3. HY002 meterial
4. HY003 industry
5. HY005 daily consume
6. HY006 medical
7. HY007 financial
8. HY008 information&technology
9. HY009 telecommunications
10. HY010 public utilities
11. HY011 real estate
"""
logging.info('report first level market breadth')
logging.info('HS300 index')
stocks = DataSource.query_index_stocks('000300.XSHG')
self.get_market_breadth(stocks)
codes = {
'HY001': '能源',
'HY002': '材料',
'HY003': '工业',
'HY004': '可选消费',
'HY005': '日常消费',
'HY006': '医疗保健',
'HY007': '金融',
'HY008': '信息技术',
'HY009': '电信服务',
'HY010': '公共事业',
'HY011': '房地产'
}
for k, v in codes.items():
self.report_market_breadth(k, v)
# 图片,write to report.
def report_daily_second_level_market_breadth(self):
"""
JQ行业: https://www.joinquant.com/help/api/help?name=plateData#%E8%81%9A%E5%AE%BD%E8%A1%8C%E4%B8%9A
TODO: 清洁能源板块,光伏,电动车
"""
codes = {
'HY477': '啤酒',
'HY478': '白酒',
'HY479': '软饮料',
'HY481': '食品加工和肉类',
'HY504': '人寿与健康保险',
'HY523': '半导体设备',
'HY524': '半导体产品',
'HY446': '消费电子',
'HY572': '中药',
'HY491': '生物科技',
'HY492': '西药',
'HY485': '医疗保健设备',
'HY486': '医疗保健用品',
'HY487': '保健护理产品经销商',
'HY488': '保健护理服务',
'HY435': '航空',
'HY439': '机场服务',
'HY449': '家用电器',
'HY454': '鞋类',
'HY493': '多元化银行',
'HY494': '区域性银行',
'HY496': '多元化金融',
'HY501': '投资银行业与经纪业',
'HY505': '多元化保险',
'HY444': '汽车制造',
'HY445': '摩托车制造',
'HY576': '汽车零售',
'HY426': '建筑机械与重型卡车',
'HY466': '互联网零售',
'HY601': '新能源发电业'
}
logging.info('report second level market breadth')
for k, v in codes.items():
self.report_market_breadth(k, v, enableDetail=False)
def report_market_breadth(self, code, description, enableDetail=False):
logging.info('report {} {}'.format(code, description))
stocks = DataSource.query_industry_stocks(code)
for it in stocks:
if(enableDetail):
logging.info(DataSource.query_security_info(it)
['display_name'])
self.get_market_breadth(stocks)
def get_market_breadth(self, stocks=[], period=analyzePeriod):
res = None
for it in stocks:
price = DataSource.query_price_data(it)
aboveEMA20 = self.AboveEMA20(price.close)
if(res is None):
res = aboveEMA20
else:
res = res.add(aboveEMA20)
for idx, item in res[-period:].items():
logging.info("{} : {:.2%}".format(idx, item/len(stocks)))
def AboveEMA20(self, close):
ema20 = talib.EMA(close, timeperiod=20)
res = close.copy()
for idx, item in close.items():
if(item > ema20[idx]):
res[idx] = 1
else:
res[idx] = 0
return res
# https://discourse.julialang.org/t/plotting-while-working-with-vs-code-remote-ssh/34309/7
# https://github.com/microsoft/vscode-remote-release/issues/452
| 30.057554 | 105 | 0.533748 | from .dataSource import DataSource
from .trend import TrendAnalyze
from jqdatasdk import *
from jqdatasdk.api import get_fundamentals, get_industry_stocks, get_security_info
from jqdatasdk.utils import query
import talib
from datetime import datetime, timedelta
import json
import logging
from sqlalchemy.orm.query import Query
from .store import MongoDB
analyzePeriod = 5
longArrangeLimit = 5
ema20NegativeThreshold = -0.05
nearMovingAverageThreshold = 0.003
class MarketBreadth:
def __init__(self):
super().__init__()
def report_daily_first_level_market_breadth(self):
logging.info('report first level market breadth')
logging.info('HS300 index')
stocks = DataSource.query_index_stocks('000300.XSHG')
self.get_market_breadth(stocks)
codes = {
'HY001': '能源',
'HY002': '材料',
'HY003': '工业',
'HY004': '可选消费',
'HY005': '日常消费',
'HY006': '医疗保健',
'HY007': '金融',
'HY008': '信息技术',
'HY009': '电信服务',
'HY010': '公共事业',
'HY011': '房地产'
}
for k, v in codes.items():
self.report_market_breadth(k, v)
def report_daily_second_level_market_breadth(self):
codes = {
'HY477': '啤酒',
'HY478': '白酒',
'HY479': '软饮料',
'HY481': '食品加工和肉类',
'HY504': '人寿与健康保险',
'HY523': '半导体设备',
'HY524': '半导体产品',
'HY446': '消费电子',
'HY572': '中药',
'HY491': '生物科技',
'HY492': '西药',
'HY485': '医疗保健设备',
'HY486': '医疗保健用品',
'HY487': '保健护理产品经销商',
'HY488': '保健护理服务',
'HY435': '航空',
'HY439': '机场服务',
'HY449': '家用电器',
'HY454': '鞋类',
'HY493': '多元化银行',
'HY494': '区域性银行',
'HY496': '多元化金融',
'HY501': '投资银行业与经纪业',
'HY505': '多元化保险',
'HY444': '汽车制造',
'HY445': '摩托车制造',
'HY576': '汽车零售',
'HY426': '建筑机械与重型卡车',
'HY466': '互联网零售',
'HY601': '新能源发电业'
}
logging.info('report second level market breadth')
for k, v in codes.items():
self.report_market_breadth(k, v, enableDetail=False)
def report_market_breadth(self, code, description, enableDetail=False):
logging.info('report {} {}'.format(code, description))
stocks = DataSource.query_industry_stocks(code)
for it in stocks:
if(enableDetail):
logging.info(DataSource.query_security_info(it)
['display_name'])
self.get_market_breadth(stocks)
def get_market_breadth(self, stocks=[], period=analyzePeriod):
res = None
for it in stocks:
price = DataSource.query_price_data(it)
aboveEMA20 = self.AboveEMA20(price.close)
if(res is None):
res = aboveEMA20
else:
res = res.add(aboveEMA20)
for idx, item in res[-period:].items():
logging.info("{} : {:.2%}".format(idx, item/len(stocks)))
def AboveEMA20(self, close):
ema20 = talib.EMA(close, timeperiod=20)
res = close.copy()
for idx, item in close.items():
if(item > ema20[idx]):
res[idx] = 1
else:
res[idx] = 0
return res
| true | true |
f72aa66497de214ed535e744d03638f19d86133b | 2,551 | py | Python | test/old/Old2/modules/SCF.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | test/old/Old2/modules/SCF.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | test/old/Old2/modules/SCF.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(thispath),"helper"))
from MiscFxns import *
from StandardModules import *
def CompareEgy(EgyIn):
return EgyIn+224.912529687124<0.00001
def CompareGrad(GradIn):
CorrectGrad=[
0.00631057813355, 0.00571458363554, 0.05476152065996,
0.02287072160272, -0.0002840915734, -0.03359062789176,
-0.02457654725095, -0.00435313214139, -0.02443656592336,
-0.02033326759132, -0.04939904659428, -0.00601012407546,
0.01536321804528, 0.02452313009004, -0.01889869345071,
0.0056070168479, 0.02707750704665, 0.03157680066598,
0.01965867456494, 0.03636269982351, -0.03762798149958,
-0.03166475907529, -0.02714461080685, 0.00193798500615,
0.00676436472219, -0.01249703947853, 0.03228768650336]
AllGood=True
for i in range(0,27):
AllGood=AllGood and CorrectGrad[i]-GradIn[i]<0.00001
return AllGood
def Run(mm):
try:
tester = psr.testing.Tester("Testing SCF")
tester.print_header()
LoadDefaultModules(mm)
mm.change_option("PSR_SCF","BASIS_SET","sto-3g")
MyMod=mm.get_module("PSR_SCF",0)
mol=psr.system.MakeSystem("""
0 1
O 1.2361419 1.0137761 -0.0612424
H 0.5104418 0.8944555 0.5514190
H 1.9926927 1.1973129 0.4956931
O -0.9957202 0.0160415 1.2422556
H -1.4542703 -0.5669741 1.8472817
H -0.9377950 -0.4817912 0.4267562
O -0.2432343 -1.0198566 -1.1953808
H 0.4367536 -0.3759433 -0.9973297
H -0.5031835 -0.8251492 -2.0957959
""")
mol = ApplyBasis(mol,"sto-3g","sto-3g")
wfn=psr.datastore.Wavefunction()
wfn.system=mol
NewWfn,Egy=MyMod.Deriv(0,wfn)
tester.test("Testing Energy via Deriv(0)", True, CompareEgy, Egy[0])
NewWfn,Egy=MyModenergy(wfn)
tester.test("Testing Energy via Energy()", True, CompareEgy, Egy)
NewWfn,Grad=MyMod.Deriv(1,wfn)
tester.test("Testing Gradient via Deriv(1)", True, CompareGrad, Grad)
NewWfn,Grad=MyMod.Gradient(wfn)
tester.test("Testing Gradient via Gradient()", True, CompareGrad, Grad)
tester.print_results()
except Exception as e:
psr.output.Output("Caught exception in main handler\n")
traceback.print_exc()
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
| 33.565789 | 79 | 0.642101 |
import os
import sys
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(thispath),"helper"))
from MiscFxns import *
from StandardModules import *
def CompareEgy(EgyIn):
return EgyIn+224.912529687124<0.00001
def CompareGrad(GradIn):
CorrectGrad=[
0.00631057813355, 0.00571458363554, 0.05476152065996,
0.02287072160272, -0.0002840915734, -0.03359062789176,
-0.02457654725095, -0.00435313214139, -0.02443656592336,
-0.02033326759132, -0.04939904659428, -0.00601012407546,
0.01536321804528, 0.02452313009004, -0.01889869345071,
0.0056070168479, 0.02707750704665, 0.03157680066598,
0.01965867456494, 0.03636269982351, -0.03762798149958,
-0.03166475907529, -0.02714461080685, 0.00193798500615,
0.00676436472219, -0.01249703947853, 0.03228768650336]
AllGood=True
for i in range(0,27):
AllGood=AllGood and CorrectGrad[i]-GradIn[i]<0.00001
return AllGood
def Run(mm):
try:
tester = psr.testing.Tester("Testing SCF")
tester.print_header()
LoadDefaultModules(mm)
mm.change_option("PSR_SCF","BASIS_SET","sto-3g")
MyMod=mm.get_module("PSR_SCF",0)
mol=psr.system.MakeSystem("""
0 1
O 1.2361419 1.0137761 -0.0612424
H 0.5104418 0.8944555 0.5514190
H 1.9926927 1.1973129 0.4956931
O -0.9957202 0.0160415 1.2422556
H -1.4542703 -0.5669741 1.8472817
H -0.9377950 -0.4817912 0.4267562
O -0.2432343 -1.0198566 -1.1953808
H 0.4367536 -0.3759433 -0.9973297
H -0.5031835 -0.8251492 -2.0957959
""")
mol = ApplyBasis(mol,"sto-3g","sto-3g")
wfn=psr.datastore.Wavefunction()
wfn.system=mol
NewWfn,Egy=MyMod.Deriv(0,wfn)
tester.test("Testing Energy via Deriv(0)", True, CompareEgy, Egy[0])
NewWfn,Egy=MyModenergy(wfn)
tester.test("Testing Energy via Energy()", True, CompareEgy, Egy)
NewWfn,Grad=MyMod.Deriv(1,wfn)
tester.test("Testing Gradient via Deriv(1)", True, CompareGrad, Grad)
NewWfn,Grad=MyMod.Gradient(wfn)
tester.test("Testing Gradient via Gradient()", True, CompareGrad, Grad)
tester.print_results()
except Exception as e:
psr.output.Output("Caught exception in main handler\n")
traceback.print_exc()
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
| true | true |
f72aa69c7ec0962f39e0a42210cdf6e5308bb185 | 801 | py | Python | scripts/utils.py | onchere/whack | 0702e46f13855d4efd8dd0cb67af2fddfb84b00c | [
"Apache-2.0"
] | 54 | 2018-10-28T07:18:31.000Z | 2022-03-08T20:30:40.000Z | scripts/utils.py | onchere/whack | 0702e46f13855d4efd8dd0cb67af2fddfb84b00c | [
"Apache-2.0"
] | null | null | null | scripts/utils.py | onchere/whack | 0702e46f13855d4efd8dd0cb67af2fddfb84b00c | [
"Apache-2.0"
] | 5 | 2018-10-28T14:43:53.000Z | 2020-04-26T19:52:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Onchere Bironga
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def read(filename):
with open(filename, "r") as f:
return f.read()
def write(filename, contents):
with open(filename, "w+") as f:
f.write(contents)
| 30.807692 | 74 | 0.714107 |
def read(filename):
with open(filename, "r") as f:
return f.read()
def write(filename, contents):
with open(filename, "w+") as f:
f.write(contents)
| true | true |
f72aa75a0f0acb3039551b59174f7f22257880d1 | 31,650 | py | Python | credit_default/app/views.py | sandymule/Credit-Card-Default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | 1 | 2017-05-20T06:08:05.000Z | 2017-05-20T06:08:05.000Z | credit_default/app/views.py | sandymule/credit-card-default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | null | null | null | credit_default/app/views.py | sandymule/credit-card-default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | 2 | 2017-05-20T06:08:25.000Z | 2019-05-18T19:59:31.000Z | import logging
import json
import pandas as pd
from flask import render_template
from flask_wtf import Form
from wtforms import fields
from wtforms.validators import Required
from . import app, estimator, target_names
logger = logging.getLogger('app')
class PredictForm(Form):
"""Fields for Predict"""
# sepal_length = fields.DecimalField('Sepal Length:', places=2, validators=[Required()])
# sepal_width = fields.DecimalField('Sepal Width:', places=2, validators=[Required()])
# petal_length = fields.DecimalField('Petal Length:', places=2, validators=[Required()])
# petal_width = fields.DecimalField('Petal Width:', places=2, validators=[Required()])
Limit_bal = fields.DecimalField('Limit Balance:', places=2, validators=[Required()])
Gender_list = [(1, "Male"), (2, "Female")]
Gender = fields.SelectField("Gender", choices=Gender_list, coerce=int)
Education_list = [(1, "Graduate school"), (2, "College"), (3, "High school"), (4, "Less than high school")]
Education = fields.SelectField("Education", choices=Education_list, coerce=int)
Marriage_list = [(1, "Married"), (2, "Single"), (3, "Separated, Divorced, or Widowed")]
Marriage = fields.SelectField("Marriage", choices=Marriage_list, coerce=int)
Age= fields.DecimalField('Age:', places=2, validators=[Required()])
Percent_1_monthago = fields.DecimalField('Percent Paid 1 Month Ago:', places=2, validators=[Required()])
Percent_2_monthago = fields.DecimalField('Percent Paid 2 Months Ago:', places=2, validators=[Required()])
Percent_3_monthago = fields.DecimalField('Percent Paid 3 Months Ago:', places=2, validators=[Required()])
Percent_4_monthago = fields.DecimalField('Percent Paid 4 Months Ago:', places=2, validators=[Required()])
Percent_5_monthago = fields.DecimalField('Percent Paid 5 Months Ago:', places=2, validators=[Required()])
Percent_6_monthago = fields.DecimalField('Percent Paid 6 Months Ago:', places=2, validators=[Required()])
submit = fields.SubmitField('Submit')
@app.route('/',methods=('GET','POST'))
def predict():
return render_template('homepage.html')
@app.route('/visualize',methods=('GET','POST'))
def visualize():
datastuff = []
"""Index page"""
form = PredictForm()
# predicted_iris = None
result = None
if form.validate_on_submit():
# store the submitted values
submitted_data = form.data
# Retrieve values from form
# sepal_length = float(submitted_data['sepal_length'])
# sepal_width = float(submitted_data['sepal_width'])
# petal_length = float(submitted_data['petal_length'])
# petal_width = float(submitted_data['petal_width'])
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
# Create array from values
# flower_instance = [sepal_length, sepal_width, petal_length, petal_width]
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
# my_prediction = estimator.predict(flower_instance)
result = estimator.predict(default_instance)[0] # Target Predicted
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('visualize.html',
form=form,
# prediction=predicted_iris
prediction=result, data=datastuff)
@app.route('/predict', methods=('GET', 'POST'))
def index():
datastuff = []
"""Index page"""
form = PredictForm()
# predicted_iris = None
result = None
if form.validate_on_submit():
# store the submitted values
submitted_data = form.data
# Retrieve values from form
# sepal_length = float(submitted_data['sepal_length'])
# sepal_width = float(submitted_data['sepal_width'])
# petal_length = float(submitted_data['petal_length'])
# petal_width = float(submitted_data['petal_width'])
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
# Create array from values
# flower_instance = [sepal_length, sepal_width, petal_length, petal_width]
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
# my_prediction = estimator.predict(flower_instance)
result = estimator.predict(default_instance)[0] # Target Predicted
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('predict.html',
form=form,
# prediction=predicted_iris
prediction=result, data=datastuff)
| 58.938547 | 156 | 0.576461 | import logging
import json
import pandas as pd
from flask import render_template
from flask_wtf import Form
from wtforms import fields
from wtforms.validators import Required
from . import app, estimator, target_names
logger = logging.getLogger('app')
class PredictForm(Form):
Limit_bal = fields.DecimalField('Limit Balance:', places=2, validators=[Required()])
Gender_list = [(1, "Male"), (2, "Female")]
Gender = fields.SelectField("Gender", choices=Gender_list, coerce=int)
Education_list = [(1, "Graduate school"), (2, "College"), (3, "High school"), (4, "Less than high school")]
Education = fields.SelectField("Education", choices=Education_list, coerce=int)
Marriage_list = [(1, "Married"), (2, "Single"), (3, "Separated, Divorced, or Widowed")]
Marriage = fields.SelectField("Marriage", choices=Marriage_list, coerce=int)
Age= fields.DecimalField('Age:', places=2, validators=[Required()])
Percent_1_monthago = fields.DecimalField('Percent Paid 1 Month Ago:', places=2, validators=[Required()])
Percent_2_monthago = fields.DecimalField('Percent Paid 2 Months Ago:', places=2, validators=[Required()])
Percent_3_monthago = fields.DecimalField('Percent Paid 3 Months Ago:', places=2, validators=[Required()])
Percent_4_monthago = fields.DecimalField('Percent Paid 4 Months Ago:', places=2, validators=[Required()])
Percent_5_monthago = fields.DecimalField('Percent Paid 5 Months Ago:', places=2, validators=[Required()])
Percent_6_monthago = fields.DecimalField('Percent Paid 6 Months Ago:', places=2, validators=[Required()])
submit = fields.SubmitField('Submit')
@app.route('/',methods=('GET','POST'))
def predict():
return render_template('homepage.html')
@app.route('/visualize',methods=('GET','POST'))
def visualize():
datastuff = []
form = PredictForm()
result = None
if form.validate_on_submit():
submitted_data = form.data
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
result = estimator.predict(default_instance)[0]
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('visualize.html',
form=form,
prediction=result, data=datastuff)
@app.route('/predict', methods=('GET', 'POST'))
def index():
datastuff = []
form = PredictForm()
result = None
if form.validate_on_submit():
submitted_data = form.data
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
result = estimator.predict(default_instance)[0]
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('predict.html',
form=form,
prediction=result, data=datastuff)
| true | true |
f72aa7ac988a87f3f873350a9bce3b67813e99a1 | 188 | py | Python | lessons/ObjectOrientedProgramming/IdeFiles/3a_python_package/setup.py | cnegrelli/DSND_Term2 | c69a654a7d492ce895c9b835b6c05e89eef84a1b | [
"MIT"
] | null | null | null | lessons/ObjectOrientedProgramming/IdeFiles/3a_python_package/setup.py | cnegrelli/DSND_Term2 | c69a654a7d492ce895c9b835b6c05e89eef84a1b | [
"MIT"
] | null | null | null | lessons/ObjectOrientedProgramming/IdeFiles/3a_python_package/setup.py | cnegrelli/DSND_Term2 | c69a654a7d492ce895c9b835b6c05e89eef84a1b | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name = 'distributions',
version = '0.2',
description = 'Gaussian distributions',
packages = ['distributions'],
zip_safe = False) | 26.857143 | 45 | 0.648936 | from setuptools import setup
setup(name = 'distributions',
version = '0.2',
description = 'Gaussian distributions',
packages = ['distributions'],
zip_safe = False) | true | true |
f72aa85d1d51fabcb355ee1ffa6f5c5410b545f6 | 27,086 | py | Python | nuitka/tree/ReformulationFunctionStatements.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | nuitka/tree/ReformulationFunctionStatements.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | nuitka/tree/ReformulationFunctionStatements.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of function statements.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
ExpressionTargetTempVariableRef,
ExpressionTargetVariableRef,
StatementAssignmentVariable,
StatementReleaseVariable
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
StatementSpecialUnpackCheck
)
from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack
from nuitka.nodes.BuiltinRefNodes import ExpressionBuiltinRef
from nuitka.nodes.CallNodes import ExpressionCallNoKeywords
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ConstantRefNodes import (
ExpressionConstantNoneRef,
makeConstantRefNode
)
from nuitka.nodes.ContainerMakingNodes import ExpressionMakeTuple
from nuitka.nodes.CoroutineNodes import (
ExpressionCoroutineObjectBody,
ExpressionMakeCoroutineObject
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionBody,
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef
)
from nuitka.nodes.GeneratorNodes import (
ExpressionGeneratorObjectBody,
ExpressionMakeGeneratorObject,
StatementGeneratorReturn
)
from nuitka.nodes.ParameterSpecs import ParameterSpec
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef
)
from nuitka.PythonVersions import python_version
from nuitka.tree import SyntaxErrors
from .Helpers import (
buildFrameNode,
buildNode,
buildNodeList,
detectFunctionBodyKind,
extractDocFromBody,
getKind,
makeDictCreationOrConstant,
makeStatementsSequenceFromStatement,
mangleName
)
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
def _insertFinalReturnStatement(function_statements_body, return_class,
source_ref):
return_statement = return_class(
expression = ExpressionConstantNoneRef(
source_ref = source_ref
),
source_ref = source_ref
)
if function_statements_body is None:
function_statements_body = makeStatementsSequenceFromStatement(
statement = return_statement,
)
elif not function_statements_body.isStatementAborting():
function_statements_body.setStatements(
function_statements_body.getStatements() +
(
return_statement,
)
)
return function_statements_body
def buildFunctionNode(provider, node, source_ref):
# Functions have way too many details, pylint: disable=R0912,R0914
assert getKind(node) == "FunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
outer_body, function_body, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = function_kind,
name = node.name,
function_doc = function_doc,
flags = flags,
node = node,
source_ref = source_ref
)
if function_kind == "Function":
code_body = function_body
elif function_kind == "Generator":
code_body = ExpressionGeneratorObjectBody(
provider = function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
for variable in function_body.getVariables():
code_body.getVariableForReference(variable.getName())
else:
assert False, function_kind
if function_kind == "Generator":
function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeGeneratorObject(
generator_ref = ExpressionFunctionRef(
function_body = code_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = function_body,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = code_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
if function_kind == "Function":
# TODO: Generators might have to raise GeneratorExit instead.
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
code_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = outer_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
# Add the "staticmethod" decorator to __new__ methods if not provided.
# CPython made these optional, but secretly applies them when it does
# "class __new__". We add them earlier, so our optimization will see it.
if node.name == "__new__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "staticmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "staticmethod",
source_ref = source_ref
)
)
if python_version >= 360 and \
node.name == "__init_subclass__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "classmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "classmethod",
source_ref = source_ref
)
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
if python_version >= 340:
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildAsyncFunctionNode(provider, node, source_ref):
# We are creating a function here that creates coroutine objects, with
# many details each, pylint: disable=R0914
assert getKind(node) == "AsyncFunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
_function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
creator_function_body, _, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = "Coroutine",
name = node.name,
flags = (),
function_doc = function_doc,
node = node,
source_ref = source_ref
)
function_body = ExpressionCoroutineObjectBody(
provider = creator_function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = function_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementGeneratorReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
function_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = creator_function_body,
source_ref = source_ref
)
creator_function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeCoroutineObject(
coroutine_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = creator_function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildParameterKwDefaults(provider, node, function_body, source_ref):
# Build keyword only arguments default values. We are hiding here, that it
# is a Python3 only feature.
if python_version >= 300:
kw_only_names = function_body.getParameters().getKwOnlyParameterNames()
if kw_only_names:
keys = []
values = []
for kw_only_name, kw_default in \
zip(kw_only_names, node.args.kw_defaults):
if kw_default is not None:
keys.append(
makeConstantRefNode(
constant = kw_only_name,
source_ref = source_ref
)
)
values.append(
buildNode(provider, kw_default, source_ref)
)
kw_defaults = makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
kw_defaults = None
else:
kw_defaults = None
return kw_defaults
def buildParameterAnnotations(provider, node, source_ref):
# Too many branches, because there is too many cases, pylint: disable=R0912
# Build annotations. We are hiding here, that it is a Python3 only feature.
if python_version < 300:
return None
# Starting with Python 3.4, the names of parameters are mangled in
# annotations as well.
if python_version < 340:
mangle = lambda variable_name: variable_name
else:
mangle = lambda variable_name: mangleName(variable_name, provider)
keys = []
values = []
def addAnnotation(key, value):
keys.append(
makeConstantRefNode(
constant = mangle(key),
source_ref = source_ref,
user_provided = True
)
)
values.append(value)
def extractArg(arg):
if getKind(arg) == "Name":
assert arg.annotation is None
elif getKind(arg) == "arg":
if arg.annotation is not None:
addAnnotation(
key = arg.arg,
value = buildNode(provider, arg.annotation, source_ref)
)
elif getKind(arg) == "Tuple":
for arg in arg.elts:
extractArg(arg)
else:
assert False, getKind(arg)
for arg in node.args.args:
extractArg(arg)
for arg in node.args.kwonlyargs:
extractArg(arg)
if python_version < 340:
if node.args.varargannotation is not None:
addAnnotation(
key = node.args.vararg,
value = buildNode(
provider, node.args.varargannotation, source_ref
)
)
if node.args.kwargannotation is not None:
addAnnotation(
key = node.args.kwarg,
value = buildNode(
provider, node.args.kwargannotation, source_ref
)
)
else:
if node.args.vararg is not None:
extractArg(node.args.vararg)
if node.args.kwarg is not None:
extractArg(node.args.kwarg)
# Return value annotation (not there for lambdas)
if hasattr(node, "returns") and node.returns is not None:
addAnnotation(
key = "return",
value = buildNode(
provider, node.returns, source_ref
)
)
if keys:
return makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
return None
def buildFunctionWithParsing(provider, function_kind, name, function_doc, flags,
node, source_ref):
# This contains a complex re-formulation for nested parameter functions.
# pylint: disable=R0914
kind = getKind(node)
assert kind in ("FunctionDef", "Lambda", "AsyncFunctionDef"), "unsupported for kind " + kind
def extractArg(arg):
if arg is None:
return None
elif type(arg) is str:
return mangleName(arg, provider)
elif getKind(arg) == "Name":
return mangleName(arg.id, provider)
elif getKind(arg) == "arg":
return mangleName(arg.arg, provider)
elif getKind(arg) == "Tuple":
# These are to be re-formulated on the outside.
assert False
else:
assert False, getKind(arg)
special_args = {}
def extractNormalArgs(args):
normal_args = []
for arg in args:
if type(arg) is not str and getKind(arg) == "Tuple":
special_arg_name = ".%d" % (len(special_args) + 1)
special_args[special_arg_name] = arg.elts
normal_args.append(special_arg_name)
else:
normal_args.append(extractArg(arg))
return normal_args
normal_args = extractNormalArgs(node.args.args)
parameters = ParameterSpec(
ps_name = name,
ps_normal_args = normal_args,
ps_kw_only_args = [
extractArg(arg)
for arg in
node.args.kwonlyargs
]
if python_version >= 300 else
[],
ps_list_star_arg = extractArg(node.args.vararg),
ps_dict_star_arg = extractArg(node.args.kwarg),
ps_default_count = len(node.args.defaults)
)
message = parameters.checkValid()
if message is not None:
SyntaxErrors.raiseSyntaxError(
message,
source_ref
)
code_object = CodeObjectSpec(
co_name = name,
co_kind = function_kind,
co_varnames = parameters.getParameterNames(),
co_argcount = parameters.getArgumentCount(),
co_kwonlyargcount = parameters.getKwOnlyParameterCount(),
co_has_starlist = parameters.getStarListArgumentName() is not None,
co_has_stardict = parameters.getStarDictArgumentName() is not None
)
outer_body = ExpressionFunctionBody(
provider = provider,
name = name,
flags = flags,
doc = function_doc,
parameters = parameters,
source_ref = source_ref
)
if special_args:
inner_name = name.strip("<>") + "$inner"
inner_arg_names = []
iter_vars = []
values = []
statements = []
def unpackFrom(source, arg_names):
accesses = []
sub_special_index = 0
iter_var = outer_body.allocateTempVariable(None, "arg_iter_%d" % len(iter_vars))
iter_vars.append(iter_var)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
source = ExpressionBuiltinIter1(
value = source,
source_ref = source_ref
),
source_ref = source_ref
)
)
for element_index, arg_name in enumerate(arg_names):
if getKind(arg_name) == "Name":
inner_arg_names.append(arg_name.id)
arg_var = outer_body.allocateTempVariable(None, "tmp_" + arg_name.id)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = arg_var,
source_ref = source_ref
),
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
source_ref = source_ref
)
)
accesses.append(
ExpressionTempVariableRef(
variable = arg_var,
source_ref = source_ref
)
)
elif getKind(arg_name) == "Tuple":
accesses.extend(
unpackFrom(
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
arg_names = arg_name.elts
)
)
sub_special_index += 1
else:
assert False, arg_name
statements.append(
StatementSpecialUnpackCheck(
iterator = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = len(arg_names),
source_ref = source_ref
)
)
return accesses
for arg_name in parameters.getParameterNames():
if arg_name.startswith('.'):
source = ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
values.extend(
unpackFrom(source, special_args[arg_name])
)
else:
values.append(
ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
)
inner_arg_names.append(arg_name)
inner_parameters = ParameterSpec(
ps_name = inner_name,
ps_normal_args = inner_arg_names,
ps_kw_only_args = (),
ps_list_star_arg = None,
ps_dict_star_arg = None,
ps_default_count = None
)
function_body = ExpressionFunctionBody(
provider = outer_body,
name = inner_name,
flags = flags,
doc = function_doc,
parameters = inner_parameters,
source_ref = source_ref
)
statements.append(
StatementReturn(
ExpressionFunctionCall(
function = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = (),
kw_defaults = None,
annotations = None,
source_ref = source_ref
),
values = values,
source_ref = source_ref
),
source_ref = source_ref
)
)
outer_body.setBody(
makeStatementsSequenceFromStatement(
statement = makeTryFinallyStatement(
provider,
tried = statements,
final = [
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
for variable in
outer_body.getTempVariables()
] ,
source_ref = source_ref,
public_exc = False
)
)
)
else:
function_body = outer_body
return outer_body, function_body, code_object
def addFunctionVariableReleases(function):
assert function.isExpressionFunctionBody() or \
function.isExpressionClassBody() or \
function.isExpressionGeneratorObjectBody() or \
function.isExpressionCoroutineObjectBody()
releases = []
# We attach everything to the function definition source location.
source_ref = function.getSourceReference()
for variable in function.getLocalVariables():
# Shared variables are freed by function object attachment.
if variable.getOwner() is not function:
continue
releases.append(
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
)
if releases:
body = function.getBody()
if body.isStatementsFrame():
body = makeStatementsSequenceFromStatement(
statement = body
)
body = makeTryFinallyStatement(
provider = function,
tried = body,
final = releases,
source_ref = source_ref
)
function.setBody(
makeStatementsSequenceFromStatement(
statement = body
)
)
# assert body.isStatementAborting(), body.asXmlText()
| 32.283671 | 96 | 0.558554 |
from nuitka.nodes.AssignNodes import (
ExpressionTargetTempVariableRef,
ExpressionTargetVariableRef,
StatementAssignmentVariable,
StatementReleaseVariable
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
StatementSpecialUnpackCheck
)
from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack
from nuitka.nodes.BuiltinRefNodes import ExpressionBuiltinRef
from nuitka.nodes.CallNodes import ExpressionCallNoKeywords
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ConstantRefNodes import (
ExpressionConstantNoneRef,
makeConstantRefNode
)
from nuitka.nodes.ContainerMakingNodes import ExpressionMakeTuple
from nuitka.nodes.CoroutineNodes import (
ExpressionCoroutineObjectBody,
ExpressionMakeCoroutineObject
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionBody,
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef
)
from nuitka.nodes.GeneratorNodes import (
ExpressionGeneratorObjectBody,
ExpressionMakeGeneratorObject,
StatementGeneratorReturn
)
from nuitka.nodes.ParameterSpecs import ParameterSpec
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef
)
from nuitka.PythonVersions import python_version
from nuitka.tree import SyntaxErrors
from .Helpers import (
buildFrameNode,
buildNode,
buildNodeList,
detectFunctionBodyKind,
extractDocFromBody,
getKind,
makeDictCreationOrConstant,
makeStatementsSequenceFromStatement,
mangleName
)
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
def _insertFinalReturnStatement(function_statements_body, return_class,
source_ref):
return_statement = return_class(
expression = ExpressionConstantNoneRef(
source_ref = source_ref
),
source_ref = source_ref
)
if function_statements_body is None:
function_statements_body = makeStatementsSequenceFromStatement(
statement = return_statement,
)
elif not function_statements_body.isStatementAborting():
function_statements_body.setStatements(
function_statements_body.getStatements() +
(
return_statement,
)
)
return function_statements_body
def buildFunctionNode(provider, node, source_ref):
assert getKind(node) == "FunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
outer_body, function_body, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = function_kind,
name = node.name,
function_doc = function_doc,
flags = flags,
node = node,
source_ref = source_ref
)
if function_kind == "Function":
code_body = function_body
elif function_kind == "Generator":
code_body = ExpressionGeneratorObjectBody(
provider = function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
for variable in function_body.getVariables():
code_body.getVariableForReference(variable.getName())
else:
assert False, function_kind
if function_kind == "Generator":
function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeGeneratorObject(
generator_ref = ExpressionFunctionRef(
function_body = code_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = function_body,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = code_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
if function_kind == "Function":
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
code_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = outer_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
if node.name == "__new__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "staticmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "staticmethod",
source_ref = source_ref
)
)
if python_version >= 360 and \
node.name == "__init_subclass__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "classmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "classmethod",
source_ref = source_ref
)
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
if python_version >= 340:
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildAsyncFunctionNode(provider, node, source_ref):
assert getKind(node) == "AsyncFunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
_function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
creator_function_body, _, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = "Coroutine",
name = node.name,
flags = (),
function_doc = function_doc,
node = node,
source_ref = source_ref
)
function_body = ExpressionCoroutineObjectBody(
provider = creator_function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = function_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementGeneratorReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
function_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = creator_function_body,
source_ref = source_ref
)
creator_function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeCoroutineObject(
coroutine_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = creator_function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildParameterKwDefaults(provider, node, function_body, source_ref):
if python_version >= 300:
kw_only_names = function_body.getParameters().getKwOnlyParameterNames()
if kw_only_names:
keys = []
values = []
for kw_only_name, kw_default in \
zip(kw_only_names, node.args.kw_defaults):
if kw_default is not None:
keys.append(
makeConstantRefNode(
constant = kw_only_name,
source_ref = source_ref
)
)
values.append(
buildNode(provider, kw_default, source_ref)
)
kw_defaults = makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
kw_defaults = None
else:
kw_defaults = None
return kw_defaults
def buildParameterAnnotations(provider, node, source_ref):
if python_version < 300:
return None
if python_version < 340:
mangle = lambda variable_name: variable_name
else:
mangle = lambda variable_name: mangleName(variable_name, provider)
keys = []
values = []
def addAnnotation(key, value):
keys.append(
makeConstantRefNode(
constant = mangle(key),
source_ref = source_ref,
user_provided = True
)
)
values.append(value)
def extractArg(arg):
if getKind(arg) == "Name":
assert arg.annotation is None
elif getKind(arg) == "arg":
if arg.annotation is not None:
addAnnotation(
key = arg.arg,
value = buildNode(provider, arg.annotation, source_ref)
)
elif getKind(arg) == "Tuple":
for arg in arg.elts:
extractArg(arg)
else:
assert False, getKind(arg)
for arg in node.args.args:
extractArg(arg)
for arg in node.args.kwonlyargs:
extractArg(arg)
if python_version < 340:
if node.args.varargannotation is not None:
addAnnotation(
key = node.args.vararg,
value = buildNode(
provider, node.args.varargannotation, source_ref
)
)
if node.args.kwargannotation is not None:
addAnnotation(
key = node.args.kwarg,
value = buildNode(
provider, node.args.kwargannotation, source_ref
)
)
else:
if node.args.vararg is not None:
extractArg(node.args.vararg)
if node.args.kwarg is not None:
extractArg(node.args.kwarg)
if hasattr(node, "returns") and node.returns is not None:
addAnnotation(
key = "return",
value = buildNode(
provider, node.returns, source_ref
)
)
if keys:
return makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
return None
def buildFunctionWithParsing(provider, function_kind, name, function_doc, flags,
node, source_ref):
kind = getKind(node)
assert kind in ("FunctionDef", "Lambda", "AsyncFunctionDef"), "unsupported for kind " + kind
def extractArg(arg):
if arg is None:
return None
elif type(arg) is str:
return mangleName(arg, provider)
elif getKind(arg) == "Name":
return mangleName(arg.id, provider)
elif getKind(arg) == "arg":
return mangleName(arg.arg, provider)
elif getKind(arg) == "Tuple":
assert False
else:
assert False, getKind(arg)
special_args = {}
def extractNormalArgs(args):
normal_args = []
for arg in args:
if type(arg) is not str and getKind(arg) == "Tuple":
special_arg_name = ".%d" % (len(special_args) + 1)
special_args[special_arg_name] = arg.elts
normal_args.append(special_arg_name)
else:
normal_args.append(extractArg(arg))
return normal_args
normal_args = extractNormalArgs(node.args.args)
parameters = ParameterSpec(
ps_name = name,
ps_normal_args = normal_args,
ps_kw_only_args = [
extractArg(arg)
for arg in
node.args.kwonlyargs
]
if python_version >= 300 else
[],
ps_list_star_arg = extractArg(node.args.vararg),
ps_dict_star_arg = extractArg(node.args.kwarg),
ps_default_count = len(node.args.defaults)
)
message = parameters.checkValid()
if message is not None:
SyntaxErrors.raiseSyntaxError(
message,
source_ref
)
code_object = CodeObjectSpec(
co_name = name,
co_kind = function_kind,
co_varnames = parameters.getParameterNames(),
co_argcount = parameters.getArgumentCount(),
co_kwonlyargcount = parameters.getKwOnlyParameterCount(),
co_has_starlist = parameters.getStarListArgumentName() is not None,
co_has_stardict = parameters.getStarDictArgumentName() is not None
)
outer_body = ExpressionFunctionBody(
provider = provider,
name = name,
flags = flags,
doc = function_doc,
parameters = parameters,
source_ref = source_ref
)
if special_args:
inner_name = name.strip("<>") + "$inner"
inner_arg_names = []
iter_vars = []
values = []
statements = []
def unpackFrom(source, arg_names):
accesses = []
sub_special_index = 0
iter_var = outer_body.allocateTempVariable(None, "arg_iter_%d" % len(iter_vars))
iter_vars.append(iter_var)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
source = ExpressionBuiltinIter1(
value = source,
source_ref = source_ref
),
source_ref = source_ref
)
)
for element_index, arg_name in enumerate(arg_names):
if getKind(arg_name) == "Name":
inner_arg_names.append(arg_name.id)
arg_var = outer_body.allocateTempVariable(None, "tmp_" + arg_name.id)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = arg_var,
source_ref = source_ref
),
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
source_ref = source_ref
)
)
accesses.append(
ExpressionTempVariableRef(
variable = arg_var,
source_ref = source_ref
)
)
elif getKind(arg_name) == "Tuple":
accesses.extend(
unpackFrom(
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
arg_names = arg_name.elts
)
)
sub_special_index += 1
else:
assert False, arg_name
statements.append(
StatementSpecialUnpackCheck(
iterator = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = len(arg_names),
source_ref = source_ref
)
)
return accesses
for arg_name in parameters.getParameterNames():
if arg_name.startswith('.'):
source = ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
values.extend(
unpackFrom(source, special_args[arg_name])
)
else:
values.append(
ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
)
inner_arg_names.append(arg_name)
inner_parameters = ParameterSpec(
ps_name = inner_name,
ps_normal_args = inner_arg_names,
ps_kw_only_args = (),
ps_list_star_arg = None,
ps_dict_star_arg = None,
ps_default_count = None
)
function_body = ExpressionFunctionBody(
provider = outer_body,
name = inner_name,
flags = flags,
doc = function_doc,
parameters = inner_parameters,
source_ref = source_ref
)
statements.append(
StatementReturn(
ExpressionFunctionCall(
function = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = (),
kw_defaults = None,
annotations = None,
source_ref = source_ref
),
values = values,
source_ref = source_ref
),
source_ref = source_ref
)
)
outer_body.setBody(
makeStatementsSequenceFromStatement(
statement = makeTryFinallyStatement(
provider,
tried = statements,
final = [
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
for variable in
outer_body.getTempVariables()
] ,
source_ref = source_ref,
public_exc = False
)
)
)
else:
function_body = outer_body
return outer_body, function_body, code_object
def addFunctionVariableReleases(function):
assert function.isExpressionFunctionBody() or \
function.isExpressionClassBody() or \
function.isExpressionGeneratorObjectBody() or \
function.isExpressionCoroutineObjectBody()
releases = []
source_ref = function.getSourceReference()
for variable in function.getLocalVariables():
if variable.getOwner() is not function:
continue
releases.append(
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
)
if releases:
body = function.getBody()
if body.isStatementsFrame():
body = makeStatementsSequenceFromStatement(
statement = body
)
body = makeTryFinallyStatement(
provider = function,
tried = body,
final = releases,
source_ref = source_ref
)
function.setBody(
makeStatementsSequenceFromStatement(
statement = body
)
)
| true | true |
f72aaada40c2662f3b0cfa6fbf29805cd48bed68 | 3,807 | py | Python | argo/workflows/client/models/v1alpha1_metrics.py | jyotishp/argo-client-python | 7dfe27c8bc542a9142efcb0a8f55bb85c915448c | [
"Apache-2.0"
] | 1 | 2021-03-10T23:09:42.000Z | 2021-03-10T23:09:42.000Z | argo/workflows/client/models/v1alpha1_metrics.py | jyotishp/argo-client-python | 7dfe27c8bc542a9142efcb0a8f55bb85c915448c | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_metrics.py | jyotishp/argo-client-python | 7dfe27c8bc542a9142efcb0a8f55bb85c915448c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v2.11.8
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1Metrics(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'prometheus': 'list[V1alpha1Prometheus]'
}
attribute_map = {
'prometheus': 'prometheus'
}
def __init__(self, prometheus=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1Metrics - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._prometheus = None
self.discriminator = None
self.prometheus = prometheus
@property
def prometheus(self):
"""Gets the prometheus of this V1alpha1Metrics. # noqa: E501
Prometheus is a list of prometheus metrics to be emitted # noqa: E501
:return: The prometheus of this V1alpha1Metrics. # noqa: E501
:rtype: list[V1alpha1Prometheus]
"""
return self._prometheus
@prometheus.setter
def prometheus(self, prometheus):
"""Sets the prometheus of this V1alpha1Metrics.
Prometheus is a list of prometheus metrics to be emitted # noqa: E501
:param prometheus: The prometheus of this V1alpha1Metrics. # noqa: E501
:type: list[V1alpha1Prometheus]
"""
if self.local_vars_configuration.client_side_validation and prometheus is None: # noqa: E501
raise ValueError("Invalid value for `prometheus`, must not be `None`") # noqa: E501
self._prometheus = prometheus
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Metrics):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1Metrics):
return True
return self.to_dict() != other.to_dict()
| 30.701613 | 134 | 0.599947 |
import pprint
import re
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1Metrics(object):
openapi_types = {
'prometheus': 'list[V1alpha1Prometheus]'
}
attribute_map = {
'prometheus': 'prometheus'
}
def __init__(self, prometheus=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._prometheus = None
self.discriminator = None
self.prometheus = prometheus
@property
def prometheus(self):
return self._prometheus
@prometheus.setter
def prometheus(self, prometheus):
if self.local_vars_configuration.client_side_validation and prometheus is None:
raise ValueError("Invalid value for `prometheus`, must not be `None`")
self._prometheus = prometheus
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1alpha1Metrics):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1alpha1Metrics):
return True
return self.to_dict() != other.to_dict()
| true | true |
f72aabfa532e7c390422910aefe539d8af6b71e5 | 4,802 | py | Python | cnn_training.py | xiangzhemeng/epfl-ml2017-project2 | 16345b3e453989dfeba70667773b76362897a782 | [
"MIT"
] | 11 | 2018-12-11T05:59:50.000Z | 2020-09-30T03:01:02.000Z | cnn_training.py | xiangzhemeng/epfl-ml2017-project2 | 16345b3e453989dfeba70667773b76362897a782 | [
"MIT"
] | 1 | 2019-02-28T15:51:26.000Z | 2019-02-28T15:51:26.000Z | cnn_training.py | xiangzhemeng/epfl-ml2017-project2 | 16345b3e453989dfeba70667773b76362897a782 | [
"MIT"
] | 3 | 2018-03-06T07:34:39.000Z | 2018-05-28T03:13:32.000Z | import pandas as pd
import numpy as np
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import EarlyStopping
# Main function of cnn training
def run_neural_network():
print(" == Enter into CNN training step ==")
np.random.seed(0)
x_train = pd.read_pickle("data/pickles/train_after_preprocess.pkl")
x_train = np.array(x_train['tweet'])
x_test = pd.read_pickle("data/pickles/test_after_preprocess.pkl")
x_test = np.array(x_test['tweet'])
y = np.array(int(2500000 / 2) * [0] + int(2500000 / 2) * [1])
print("Data loading finish!")
# Tokenization
tokenizer = Tokenizer(filters='')
tokenizer.fit_on_texts(x_train)
# Turn x_train into sequence form
sequence_train = tokenizer.texts_to_sequences(x_train)
# Turn x_test into sequence form
sequence_test = tokenizer.texts_to_sequences(x_test)
# Transform sequence_train into into a 2D Numpy array
sequence_train = sequence.pad_sequences(sequence_train, maxlen = 30)
# Transform sequence_test into into a 2D Numpy array
sequence_test = sequence.pad_sequences(sequence_test, maxlen = 30)
# Affect input dimension
input_dim = len(tokenizer.word_index) + 1
input_length = sequence_train.shape[1]
print("Tokenization finish!")
# Shuffle training dataset
new_index = np.arange(sequence_train.shape[0])
np.random.shuffle(new_index)
sequence_train = sequence_train[new_index]
y = y[new_index]
print("Data shuffling finish!")
earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 2)
### Model 1 ###
print("Build model1!")
np.random.seed(1)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(Flatten())
model.add(Dense(250, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model1!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model1 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model1, open('data/xgboost/train_model1.txt', 'wb'))
test_model1 = model.predict(sequence_test)
pickle.dump(test_model1, open('data/xgboost/test_model1.txt', 'wb'))
print("Model1 finished!")
### Model 2 ###
print("Build model2!")
np.random.seed(2)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(LSTM(100, recurrent_dropout = 0.2, dropout = 0.2))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model2!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model2 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model2, open('data/xgboost/train_model2.txt', 'wb'))
test_model2 = model.predict(sequence_test)
pickle.dump(test_model2, open('data/xgboost/test_model2.txt', 'wb'))
print("Model2 finished!")
### Model 3 ###
print("Build model1!")
np.random.seed(3)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(LSTM(100))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model3!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model3= model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model3, open('data/xgboost/train_model3.txt', 'wb'))
test_model3 = model.predict(sequence_test)
pickle.dump(test_model3, open('data/xgboost/test_model3.txt', 'wb'))
print("Model3 finished!")
if __name__ == "__main__":
run_neural_network()
| 37.515625 | 145 | 0.704082 | import pandas as pd
import numpy as np
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import EarlyStopping
def run_neural_network():
print(" == Enter into CNN training step ==")
np.random.seed(0)
x_train = pd.read_pickle("data/pickles/train_after_preprocess.pkl")
x_train = np.array(x_train['tweet'])
x_test = pd.read_pickle("data/pickles/test_after_preprocess.pkl")
x_test = np.array(x_test['tweet'])
y = np.array(int(2500000 / 2) * [0] + int(2500000 / 2) * [1])
print("Data loading finish!")
tokenizer = Tokenizer(filters='')
tokenizer.fit_on_texts(x_train)
sequence_train = tokenizer.texts_to_sequences(x_train)
sequence_test = tokenizer.texts_to_sequences(x_test)
sequence_train = sequence.pad_sequences(sequence_train, maxlen = 30)
sequence_test = sequence.pad_sequences(sequence_test, maxlen = 30)
input_dim = len(tokenizer.word_index) + 1
input_length = sequence_train.shape[1]
print("Tokenization finish!")
new_index = np.arange(sequence_train.shape[0])
np.random.shuffle(new_index)
sequence_train = sequence_train[new_index]
y = y[new_index]
print("Data shuffling finish!")
earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 2)
p.random.seed(1)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(Flatten())
model.add(Dense(250, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model1!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model1 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model1, open('data/xgboost/train_model1.txt', 'wb'))
test_model1 = model.predict(sequence_test)
pickle.dump(test_model1, open('data/xgboost/test_model1.txt', 'wb'))
print("Model1 finished!")
p.random.seed(2)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(LSTM(100, recurrent_dropout = 0.2, dropout = 0.2))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model2!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model2 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model2, open('data/xgboost/train_model2.txt', 'wb'))
test_model2 = model.predict(sequence_test)
pickle.dump(test_model2, open('data/xgboost/test_model2.txt', 'wb'))
print("Model2 finished!")
p.random.seed(3)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(LSTM(100))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model3!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model3= model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model3, open('data/xgboost/train_model3.txt', 'wb'))
test_model3 = model.predict(sequence_test)
pickle.dump(test_model3, open('data/xgboost/test_model3.txt', 'wb'))
print("Model3 finished!")
if __name__ == "__main__":
run_neural_network()
| true | true |
f72aacfc0d05c9205783f92c37e379035bd0665e | 5,592 | py | Python | Doc/conf.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | 2 | 2018-12-22T08:20:13.000Z | 2020-06-24T02:48:52.000Z | Doc/conf.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | null | null | null | Doc/conf.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | 3 | 2018-03-06T05:12:17.000Z | 2021-04-22T10:01:01.000Z | # -*- coding: utf-8 -*-
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# Options for HTML output
# -----------------------
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://docs.python.org/dev'
# Additional static files.
html_static_path = ['tools/sphinxext/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('documenting/index', 'documenting.tex',
'Documenting Python', 'Georg Brandl', 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Using Python', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
| 30.557377 | 82 | 0.669886 |
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
refcount_file = 'data/refcounts.dat'
add_function_parentheses = True
add_module_names = True
html_last_updated_fmt = '%b %d, %Y'
html_use_smartypants = True
html_sidebars = {
'index': 'indexsidebar.html',
}
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
html_use_opensearch = 'http://docs.python.org/dev'
html_static_path = ['tools/sphinxext/static']
htmlhelp_basename = 'python' + release.replace('.', '')
html_split_index = True
latex_paper_size = 'a4'
latex_font_size = '10pt'
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('documenting/index', 'documenting.tex',
'Documenting Python', 'Georg Brandl', 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Using Python', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
| true | true |
f72aad60b115d3fd47a1a6dd7076ff1a07ca0230 | 140 | py | Python | tests/data/config1.py | seismopy/figcon | 7e5d6ac30ea49bce8a566f9afb7e9e5af081164c | [
"BSD-3-Clause"
] | null | null | null | tests/data/config1.py | seismopy/figcon | 7e5d6ac30ea49bce8a566f9afb7e9e5af081164c | [
"BSD-3-Clause"
] | null | null | null | tests/data/config1.py | seismopy/figcon | 7e5d6ac30ea49bce8a566f9afb7e9e5af081164c | [
"BSD-3-Clause"
] | null | null | null | """
The first config example
"""
from types import SimpleNamespace
agency = 'NSA'
snuffler = SimpleNamespace(phase_map={1: 'P', 2: 'S'})
| 14 | 54 | 0.685714 | from types import SimpleNamespace
agency = 'NSA'
snuffler = SimpleNamespace(phase_map={1: 'P', 2: 'S'})
| true | true |
f72aadc6e09185ea3c69fa953e810a4ae3a1ee00 | 1,277 | py | Python | cc_backend_lib/models/scales.py | prio-data/cc_backend_lib | 7daa3c38d96e9063074367ea0873e39d7544e2b7 | [
"MIT"
] | null | null | null | cc_backend_lib/models/scales.py | prio-data/cc_backend_lib | 7daa3c38d96e9063074367ea0873e39d7544e2b7 | [
"MIT"
] | null | null | null | cc_backend_lib/models/scales.py | prio-data/cc_backend_lib | 7daa3c38d96e9063074367ea0873e39d7544e2b7 | [
"MIT"
] | null | null | null | """
The intensity measurement scale has changed, and might change again
Therefore, I need this module to translate between numeric intensity scores
and casualty numbers
"""
from typing import Optional
from datetime import date
import pydantic
class CasualtyRange(pydantic.BaseModel):
lower: int
upper: Optional[int]
text: Optional[str]
@property
def zero(self):
return self.upper == 0
SCALES = {
# The old scale
date(1,1,1):{
0: CasualtyRange(lower=0,upper=1),
1: CasualtyRange(lower=2,upper=25),
2: CasualtyRange(lower=26,upper=99),
3: CasualtyRange(lower=100,upper=999),
4: CasualtyRange(lower=1000,upper=None),
},
# The current scale
date(2021,1,1):{
0: CasualtyRange(lower=1,upper=25,text="Low"),
1: CasualtyRange(lower=26,upper=99,text="Medium"),
2: CasualtyRange(lower=100,upper=None,text="High"),
}
}
def scaled(date:date,intensity_value:int)->CasualtyRange:
if intensity_value < 0:
return CasualtyRange(lower=0,upper=0)
valid_scales = {k:v for k,v in SCALES.items() if k <= date}
scale_for_date = SCALES[max((d for d,_ in valid_scales.items()))]
return scale_for_date[intensity_value]
| 30.404762 | 75 | 0.649961 | from typing import Optional
from datetime import date
import pydantic
class CasualtyRange(pydantic.BaseModel):
lower: int
upper: Optional[int]
text: Optional[str]
@property
def zero(self):
return self.upper == 0
SCALES = {
date(1,1,1):{
0: CasualtyRange(lower=0,upper=1),
1: CasualtyRange(lower=2,upper=25),
2: CasualtyRange(lower=26,upper=99),
3: CasualtyRange(lower=100,upper=999),
4: CasualtyRange(lower=1000,upper=None),
},
date(2021,1,1):{
0: CasualtyRange(lower=1,upper=25,text="Low"),
1: CasualtyRange(lower=26,upper=99,text="Medium"),
2: CasualtyRange(lower=100,upper=None,text="High"),
}
}
def scaled(date:date,intensity_value:int)->CasualtyRange:
if intensity_value < 0:
return CasualtyRange(lower=0,upper=0)
valid_scales = {k:v for k,v in SCALES.items() if k <= date}
scale_for_date = SCALES[max((d for d,_ in valid_scales.items()))]
return scale_for_date[intensity_value]
| true | true |
f72aadee17de447d48becb2d1e2d660cbd57c250 | 3,508 | py | Python | third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json5_generator
import template_expander
from collections import namedtuple
from core.css import css_properties
class PropertyClassData(
namedtuple(
'PropertyClassData',
'enum_key,enum_value,property_id,classname,namespace_group,filename'
)):
pass
class CSSPropertyInstancesWriter(json5_generator.Writer):
def __init__(self, json5_file_paths, output_dir):
super(CSSPropertyInstancesWriter, self).__init__([], output_dir)
self._input_files = json5_file_paths
self._outputs = {
'css_property_instances.h':
self.generate_property_instances_header,
'css_property_instances.cc':
self.generate_property_instances_implementation
}
# These files are no longer generated. If the files are present from
# a previous build, we remove them. This avoids accidentally #including
# a stale generated header.
self._cleanup = set([
'css_property.cc', 'css_property.h', 'css_unresolved_property.cc',
'css_unresolved_property.h'
])
self._css_properties = css_properties.CSSProperties(json5_file_paths)
properties = self._css_properties.longhands + self._css_properties.shorthands
aliases = self._css_properties.aliases
# Lists of PropertyClassData.
self._property_classes_by_id = list(map(self.get_class, properties))
self._alias_classes_by_id = list(map(self.get_class, aliases))
# Sort by enum value.
self._property_classes_by_id.sort(key=lambda t: t.enum_value)
self._alias_classes_by_id.sort(key=lambda t: t.enum_value)
def get_class(self, property_):
"""Gets the automatically
generated class name for a property.
Args:
property_: A single property from CSSProperties.properties()
Returns:
The name to use for the property class.
"""
namespace_group = 'Shorthand' if property_['longhands'] else 'Longhand'
return PropertyClassData(
enum_key=property_['enum_key'],
enum_value=property_['enum_value'],
property_id=property_['property_id'],
classname=property_['name'].to_upper_camel_case(),
namespace_group=namespace_group,
filename=property_['name'].to_snake_case())
@property
def css_properties(self):
return self._css_properties
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.h.tmpl')
def generate_property_instances_header(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.cc.tmpl')
def generate_property_instances_implementation(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
if __name__ == '__main__':
json5_generator.Maker(CSSPropertyInstancesWriter).main()
| 37.319149 | 85 | 0.686431 |
import json5_generator
import template_expander
from collections import namedtuple
from core.css import css_properties
class PropertyClassData(
namedtuple(
'PropertyClassData',
'enum_key,enum_value,property_id,classname,namespace_group,filename'
)):
pass
class CSSPropertyInstancesWriter(json5_generator.Writer):
def __init__(self, json5_file_paths, output_dir):
super(CSSPropertyInstancesWriter, self).__init__([], output_dir)
self._input_files = json5_file_paths
self._outputs = {
'css_property_instances.h':
self.generate_property_instances_header,
'css_property_instances.cc':
self.generate_property_instances_implementation
}
self._cleanup = set([
'css_property.cc', 'css_property.h', 'css_unresolved_property.cc',
'css_unresolved_property.h'
])
self._css_properties = css_properties.CSSProperties(json5_file_paths)
properties = self._css_properties.longhands + self._css_properties.shorthands
aliases = self._css_properties.aliases
self._property_classes_by_id = list(map(self.get_class, properties))
self._alias_classes_by_id = list(map(self.get_class, aliases))
self._property_classes_by_id.sort(key=lambda t: t.enum_value)
self._alias_classes_by_id.sort(key=lambda t: t.enum_value)
def get_class(self, property_):
namespace_group = 'Shorthand' if property_['longhands'] else 'Longhand'
return PropertyClassData(
enum_key=property_['enum_key'],
enum_value=property_['enum_value'],
property_id=property_['property_id'],
classname=property_['name'].to_upper_camel_case(),
namespace_group=namespace_group,
filename=property_['name'].to_snake_case())
@property
def css_properties(self):
return self._css_properties
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.h.tmpl')
def generate_property_instances_header(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.cc.tmpl')
def generate_property_instances_implementation(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
if __name__ == '__main__':
json5_generator.Maker(CSSPropertyInstancesWriter).main()
| true | true |
f72aae9d51b7f0153c0b16c546eb1ceaa7d6f438 | 777 | py | Python | web/data/migrations/0003_auto_20210108_1000.py | liwan1698/CatchingFire | 74535cba4b6da178eed2857d5db9900604c0c5f7 | [
"MIT"
] | 11 | 2021-02-14T15:56:22.000Z | 2022-03-21T08:26:58.000Z | web/data/migrations/0003_auto_20210108_1000.py | liwan1698/CatchingFire | 74535cba4b6da178eed2857d5db9900604c0c5f7 | [
"MIT"
] | null | null | null | web/data/migrations/0003_auto_20210108_1000.py | liwan1698/CatchingFire | 74535cba4b6da178eed2857d5db9900604c0c5f7 | [
"MIT"
] | 6 | 2021-03-16T14:30:12.000Z | 2022-03-10T14:20:24.000Z | # Generated by Django 3.1.5 on 2021-01-08 10:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0002_remove_classifydata_pending_tag'),
]
operations = [
migrations.CreateModel(
name='ClassifyTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(default='', help_text='标签种类', max_length=30, verbose_name='标签种类')),
],
),
migrations.AddField(
model_name='classifydata',
name='status',
field=models.BooleanField(default=False, help_text='是否完成', verbose_name='是否完成'),
),
]
| 29.884615 | 114 | 0.593308 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0002_remove_classifydata_pending_tag'),
]
operations = [
migrations.CreateModel(
name='ClassifyTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(default='', help_text='标签种类', max_length=30, verbose_name='标签种类')),
],
),
migrations.AddField(
model_name='classifydata',
name='status',
field=models.BooleanField(default=False, help_text='是否完成', verbose_name='是否完成'),
),
]
| true | true |
f72aaeb6bcf064edb9fbce86a27a4b372ec8bac8 | 9,321 | py | Python | kernel/protobuf/generated/pearson_model_param_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 39 | 2021-10-12T01:43:27.000Z | 2022-03-28T04:46:35.000Z | kernel/protobuf/generated/pearson_model_param_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 6 | 2021-10-14T02:11:47.000Z | 2022-03-23T02:41:50.000Z | kernel/protobuf/generated/pearson_model_param_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 10 | 2021-10-14T09:36:03.000Z | 2022-02-10T11:05:12.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pearson-model-param.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pearson-model-param.proto',
package='com.welab.wefe.core.mlmodel.buffer',
syntax='proto3',
serialized_options=b'B\026PearsonModelParamProto',
serialized_pb=b'\n\x19pearson-model-param.proto\x12\"com.welab.wefe.core.mlmodel.buffer\"\x16\n\x05Names\x12\r\n\x05names\x18\x01 \x03(\t\"/\n\x0c\x41nonymousMap\x12\x11\n\tanonymous\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x8a\x02\n\x11PearsonModelParam\x12\r\n\x05party\x18\x01 \x01(\t\x12\x0f\n\x07parties\x18\x02 \x03(\t\x12\r\n\x05shape\x18\x03 \x01(\x05\x12\x0e\n\x06shapes\x18\x04 \x03(\x05\x12\r\n\x05names\x18\x05 \x03(\t\x12G\n\ranonymous_map\x18\t \x03(\x0b\x32\x30.com.welab.wefe.core.mlmodel.buffer.AnonymousMap\x12\x0c\n\x04\x63orr\x18\x06 \x03(\x01\x12\x12\n\nlocal_corr\x18\x07 \x03(\x01\x12<\n\tall_names\x18\x08 \x03(\x0b\x32).com.welab.wefe.core.mlmodel.buffer.NamesB\x18\x42\x16PearsonModelParamProtob\x06proto3'
)
_NAMES = _descriptor.Descriptor(
name='Names',
full_name='com.welab.wefe.core.mlmodel.buffer.Names',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.Names.names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=87,
)
_ANONYMOUSMAP = _descriptor.Descriptor(
name='AnonymousMap',
full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='anonymous', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.anonymous', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=136,
)
_PEARSONMODELPARAM = _descriptor.Descriptor(
name='PearsonModelParam',
full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='party', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.party', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parties', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.parties', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shape', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shapes', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shapes', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.names', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anonymous_map', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.anonymous_map',
index=5,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.corr', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.local_corr', index=7,
number=7, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.all_names', index=8,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=405,
)
_PEARSONMODELPARAM.fields_by_name['anonymous_map'].message_type = _ANONYMOUSMAP
_PEARSONMODELPARAM.fields_by_name['all_names'].message_type = _NAMES
DESCRIPTOR.message_types_by_name['Names'] = _NAMES
DESCRIPTOR.message_types_by_name['AnonymousMap'] = _ANONYMOUSMAP
DESCRIPTOR.message_types_by_name['PearsonModelParam'] = _PEARSONMODELPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Names = _reflection.GeneratedProtocolMessageType('Names', (_message.Message,), {
'DESCRIPTOR': _NAMES,
'__module__': 'pearson_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.Names)
})
_sym_db.RegisterMessage(Names)
AnonymousMap = _reflection.GeneratedProtocolMessageType('AnonymousMap', (_message.Message,), {
'DESCRIPTOR': _ANONYMOUSMAP,
'__module__': 'pearson_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.AnonymousMap)
})
_sym_db.RegisterMessage(AnonymousMap)
PearsonModelParam = _reflection.GeneratedProtocolMessageType('PearsonModelParam', (_message.Message,), {
'DESCRIPTOR': _PEARSONMODELPARAM,
'__module__': 'pearson_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.PearsonModelParam)
})
_sym_db.RegisterMessage(PearsonModelParam)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 45.247573 | 747 | 0.695848 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pearson-model-param.proto',
package='com.welab.wefe.core.mlmodel.buffer',
syntax='proto3',
serialized_options=b'B\026PearsonModelParamProto',
serialized_pb=b'\n\x19pearson-model-param.proto\x12\"com.welab.wefe.core.mlmodel.buffer\"\x16\n\x05Names\x12\r\n\x05names\x18\x01 \x03(\t\"/\n\x0c\x41nonymousMap\x12\x11\n\tanonymous\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x8a\x02\n\x11PearsonModelParam\x12\r\n\x05party\x18\x01 \x01(\t\x12\x0f\n\x07parties\x18\x02 \x03(\t\x12\r\n\x05shape\x18\x03 \x01(\x05\x12\x0e\n\x06shapes\x18\x04 \x03(\x05\x12\r\n\x05names\x18\x05 \x03(\t\x12G\n\ranonymous_map\x18\t \x03(\x0b\x32\x30.com.welab.wefe.core.mlmodel.buffer.AnonymousMap\x12\x0c\n\x04\x63orr\x18\x06 \x03(\x01\x12\x12\n\nlocal_corr\x18\x07 \x03(\x01\x12<\n\tall_names\x18\x08 \x03(\x0b\x32).com.welab.wefe.core.mlmodel.buffer.NamesB\x18\x42\x16PearsonModelParamProtob\x06proto3'
)
_NAMES = _descriptor.Descriptor(
name='Names',
full_name='com.welab.wefe.core.mlmodel.buffer.Names',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.Names.names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=87,
)
_ANONYMOUSMAP = _descriptor.Descriptor(
name='AnonymousMap',
full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='anonymous', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.anonymous', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=136,
)
_PEARSONMODELPARAM = _descriptor.Descriptor(
name='PearsonModelParam',
full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='party', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.party', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parties', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.parties', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shape', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shapes', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shapes', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.names', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anonymous_map', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.anonymous_map',
index=5,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.corr', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.local_corr', index=7,
number=7, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.all_names', index=8,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=405,
)
_PEARSONMODELPARAM.fields_by_name['anonymous_map'].message_type = _ANONYMOUSMAP
_PEARSONMODELPARAM.fields_by_name['all_names'].message_type = _NAMES
DESCRIPTOR.message_types_by_name['Names'] = _NAMES
DESCRIPTOR.message_types_by_name['AnonymousMap'] = _ANONYMOUSMAP
DESCRIPTOR.message_types_by_name['PearsonModelParam'] = _PEARSONMODELPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Names = _reflection.GeneratedProtocolMessageType('Names', (_message.Message,), {
'DESCRIPTOR': _NAMES,
'__module__': 'pearson_model_param_pb2'
})
_sym_db.RegisterMessage(Names)
AnonymousMap = _reflection.GeneratedProtocolMessageType('AnonymousMap', (_message.Message,), {
'DESCRIPTOR': _ANONYMOUSMAP,
'__module__': 'pearson_model_param_pb2'
})
_sym_db.RegisterMessage(AnonymousMap)
PearsonModelParam = _reflection.GeneratedProtocolMessageType('PearsonModelParam', (_message.Message,), {
'DESCRIPTOR': _PEARSONMODELPARAM,
'__module__': 'pearson_model_param_pb2'
})
_sym_db.RegisterMessage(PearsonModelParam)
DESCRIPTOR._options = None
| true | true |
f72aaed00855b0147da7146ad4481dc9c1de0fae | 7,616 | py | Python | sig.py | IlyaKodua/colorization_with_averaging_ab_channels_test | 425a9f3e8b875b21c76424e892cbf489a9e408cb | [
"MIT"
] | null | null | null | sig.py | IlyaKodua/colorization_with_averaging_ab_channels_test | 425a9f3e8b875b21c76424e892cbf489a9e408cb | [
"MIT"
] | null | null | null | sig.py | IlyaKodua/colorization_with_averaging_ab_channels_test | 425a9f3e8b875b21c76424e892cbf489a9e408cb | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class SIGGRAPHGenerator(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d, classes=529):
super(SIGGRAPHGenerator, self).__init__()
# Conv1
model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
# add a subsampling operation
# Conv2
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
# add a subsampling layer operation
# Conv3
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
# add a subsampling layer operation
# Conv4
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
# Conv5
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
# Conv6
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
# Conv7
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
# Conv7
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
# Conv9
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
# add the two feature maps above
model9=[nn.ReLU(True),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
# Conv10
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
# add the two feature maps above
model10=[nn.ReLU(True),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
# classification output
model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
# regression output
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
model_out+=[nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B=None, mask_B=None):
if(input_B is None):
input_B = torch.cat((input_A*0, input_A*0), dim=1)
if(mask_B is None):
mask_B = input_A*0
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.re_pad_sum(self.model8up(conv7_3), self.model3short8(conv3_3))
conv8_3 = self.model8(conv8_up)
conv9_up = self.re_pad_sum(self.model9up(conv8_3),self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3),self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
conv9_up = self.re_pad_sum(self.model9up(conv8_3), self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3), self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return out_reg
def re_pad_sum(self, x, y):
diffY = y.size()[2] - x.size()[2]
diffX = y.size()[3] - x.size()[3]
x = F.pad(x, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
return x + y
def siggraph17(pretrained=True):
model = SIGGRAPHGenerator()
if(pretrained):
import torch.utils.model_zoo as model_zoo
model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True))
return model | 43.028249 | 157 | 0.616334 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SIGGRAPHGenerator(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d, classes=529):
super(SIGGRAPHGenerator, self).__init__()
model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model9=[nn.ReLU(True),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model10=[nn.ReLU(True),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
model_out+=[nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B=None, mask_B=None):
if(input_B is None):
input_B = torch.cat((input_A*0, input_A*0), dim=1)
if(mask_B is None):
mask_B = input_A*0
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.re_pad_sum(self.model8up(conv7_3), self.model3short8(conv3_3))
conv8_3 = self.model8(conv8_up)
conv9_up = self.re_pad_sum(self.model9up(conv8_3),self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3),self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
conv9_up = self.re_pad_sum(self.model9up(conv8_3), self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3), self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return out_reg
def re_pad_sum(self, x, y):
diffY = y.size()[2] - x.size()[2]
diffX = y.size()[3] - x.size()[3]
x = F.pad(x, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
return x + y
def siggraph17(pretrained=True):
model = SIGGRAPHGenerator()
if(pretrained):
import torch.utils.model_zoo as model_zoo
model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True))
return model | true | true |
f72aaf0d1aeed3ebfc53a15203a1be8e842a5f86 | 1,624 | py | Python | landmark_recognition/urls.py | MilanSusa/Landmark-Recognition-Inference-API | e770fd8dce1b7dc39e52950c71e6406352a67123 | [
"MIT"
] | null | null | null | landmark_recognition/urls.py | MilanSusa/Landmark-Recognition-Inference-API | e770fd8dce1b7dc39e52950c71e6406352a67123 | [
"MIT"
] | 11 | 2020-11-13T18:40:49.000Z | 2022-03-12T00:20:31.000Z | landmark_recognition/urls.py | MilanSusa/Landmark-Recognition-Inference-API | e770fd8dce1b7dc39e52950c71e6406352a67123 | [
"MIT"
] | null | null | null | """landmark_recognition URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Landmark Recognition Inference API",
default_version='v1',
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('admin/', admin.site.urls),
path('inference/', include('inference_api.urls')),
] + static(prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 38.666667 | 108 | 0.721675 | from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Landmark Recognition Inference API",
default_version='v1',
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('admin/', admin.site.urls),
path('inference/', include('inference_api.urls')),
] + static(prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| true | true |
f72aaf58fab32e715e1c98ff0e845edadd9fd68c | 3,567 | py | Python | examples/ariadne_uvicorn/movies_v4.py | jyoost/neo4j-graphql-py | 14dbd8f133727f89ec8ea79e5475e4a940d4e55f | [
"Apache-2.0"
] | null | null | null | examples/ariadne_uvicorn/movies_v4.py | jyoost/neo4j-graphql-py | 14dbd8f133727f89ec8ea79e5475e4a940d4e55f | [
"Apache-2.0"
] | null | null | null | examples/ariadne_uvicorn/movies_v4.py | jyoost/neo4j-graphql-py | 14dbd8f133727f89ec8ea79e5475e4a940d4e55f | [
"Apache-2.0"
] | null | null | null | import uvicorn
from neo4j import GraphDatabase
from ariadne.asgi import GraphQL
from neo4j_graphql_py import neo4j_graphql
from ariadne import QueryType, make_executable_schema, MutationType, gql
typeDefs = gql('''
directive @cypher(statement: String!) on FIELD_DEFINITION
directive @relation(name:String!, direction:String!) on FIELD_DEFINITION
type Movie {
_id: ID
movieId: ID!
title: String
tagline: String
year: Int
plot: String
poster: String
imdbRating: Float
genres: [Genre] @relation(name: "IN_GENRE", direction: "OUT")
similar(first: Int = 3, offset: Int = 0, limit: Int = 5): [Movie] @cypher(statement: "WITH {this} AS this MATCH (this)--(:Genre)--(o:Movie) RETURN o LIMIT {limit}")
mostSimilar: Movie @cypher(statement: "WITH {this} AS this RETURN this")
degree: Int @cypher(statement: "WITH {this} AS this RETURN SIZE((this)--())")
actors(first: Int = 3, offset: Int = 0): [Actor] @relation(name: "ACTED_IN", direction:"IN")
avgStars: Float
filmedIn: State @relation(name: "FILMED_IN", direction: "OUT")
scaleRating(scale: Int = 3): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
scaleRatingFloat(scale: Float = 1.5): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
}
type Genre {
_id: ID!
name: String
movies(first: Int = 3, offset: Int = 0): [Movie] @relation(name: "IN_GENRE", direction: "IN")
highestRatedMovie: Movie @cypher(statement: "MATCH (m:Movie)-[:IN_GENRE]->(this) RETURN m ORDER BY m.imdbRating DESC LIMIT 1")
}
type State {
name: String
}
interface Person {
id: ID!
name: String
}
type Actor {
id: ID!
name: String
movies: [Movie] @relation(name: "ACTED_IN", direction: "OUT")
}
type User implements Person {
id: ID!
name: String
}
enum BookGenre {
Mystery,
Science,
Math
}
type Book {
title: String!
genre: BookGenre
}
type Query {
Movie(id: ID, title: String, year: Int, plot: String, poster: String, imdbRating: Float, first: Int, offset: Int): [Movie]
MoviesByYear(year: Int): [Movie]
AllMovies: [Movie]
MovieById(movieId: ID!): Movie
GenresBySubstring(substring: String): [Genre] @cypher(statement: "MATCH (g:Genre) WHERE toLower(g.name) CONTAINS toLower($substring) RETURN g")
Books: [Book]
Actors: [Actor]
}
type Mutation {
CreateGenre(name: String): Genre @cypher(statement: "CREATE (g:Genre) SET g.name = $name RETURN g")
CreateMovie(movieId: ID!, title: String, year: Int, plot: String, poster: String, imdbRating: Float): Movie
CreateBook(title: String!,genre: BookGenre): Book @cypher(statement: "CREATE (b:Book) SET b.title = $title, b.genre = $genre RETURN b")
}
'''
)
query = QueryType()
mutation = MutationType()
# @mutation.field('AddMovieGenre')
@query.field('Actors')
@query.field('Movie')
@query.field('MoviesByYear')
@query.field('AllMovies')
@query.field('MovieById')
@query.field('GenresBySubstring')
@query.field('Books')
@mutation.field('CreateGenre')
@mutation.field('CreateMovie')
@mutation.field('CreateBook')
async def resolve(obj, info, **kwargs):
return await neo4j_graphql(obj, info.context, info, True, **kwargs)
schema = make_executable_schema(typeDefs, query, mutation)
driver = None
def context(request):
global driver
if driver is None:
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "123456"))
return {'driver': driver, 'request': request}
root_value = {}
app = GraphQL(schema=schema, root_value=root_value, context_value=context, debug=True)
uvicorn.run(app)
driver.close()
| 31.566372 | 166 | 0.702832 | import uvicorn
from neo4j import GraphDatabase
from ariadne.asgi import GraphQL
from neo4j_graphql_py import neo4j_graphql
from ariadne import QueryType, make_executable_schema, MutationType, gql
typeDefs = gql('''
directive @cypher(statement: String!) on FIELD_DEFINITION
directive @relation(name:String!, direction:String!) on FIELD_DEFINITION
type Movie {
_id: ID
movieId: ID!
title: String
tagline: String
year: Int
plot: String
poster: String
imdbRating: Float
genres: [Genre] @relation(name: "IN_GENRE", direction: "OUT")
similar(first: Int = 3, offset: Int = 0, limit: Int = 5): [Movie] @cypher(statement: "WITH {this} AS this MATCH (this)--(:Genre)--(o:Movie) RETURN o LIMIT {limit}")
mostSimilar: Movie @cypher(statement: "WITH {this} AS this RETURN this")
degree: Int @cypher(statement: "WITH {this} AS this RETURN SIZE((this)--())")
actors(first: Int = 3, offset: Int = 0): [Actor] @relation(name: "ACTED_IN", direction:"IN")
avgStars: Float
filmedIn: State @relation(name: "FILMED_IN", direction: "OUT")
scaleRating(scale: Int = 3): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
scaleRatingFloat(scale: Float = 1.5): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
}
type Genre {
_id: ID!
name: String
movies(first: Int = 3, offset: Int = 0): [Movie] @relation(name: "IN_GENRE", direction: "IN")
highestRatedMovie: Movie @cypher(statement: "MATCH (m:Movie)-[:IN_GENRE]->(this) RETURN m ORDER BY m.imdbRating DESC LIMIT 1")
}
type State {
name: String
}
interface Person {
id: ID!
name: String
}
type Actor {
id: ID!
name: String
movies: [Movie] @relation(name: "ACTED_IN", direction: "OUT")
}
type User implements Person {
id: ID!
name: String
}
enum BookGenre {
Mystery,
Science,
Math
}
type Book {
title: String!
genre: BookGenre
}
type Query {
Movie(id: ID, title: String, year: Int, plot: String, poster: String, imdbRating: Float, first: Int, offset: Int): [Movie]
MoviesByYear(year: Int): [Movie]
AllMovies: [Movie]
MovieById(movieId: ID!): Movie
GenresBySubstring(substring: String): [Genre] @cypher(statement: "MATCH (g:Genre) WHERE toLower(g.name) CONTAINS toLower($substring) RETURN g")
Books: [Book]
Actors: [Actor]
}
type Mutation {
CreateGenre(name: String): Genre @cypher(statement: "CREATE (g:Genre) SET g.name = $name RETURN g")
CreateMovie(movieId: ID!, title: String, year: Int, plot: String, poster: String, imdbRating: Float): Movie
CreateBook(title: String!,genre: BookGenre): Book @cypher(statement: "CREATE (b:Book) SET b.title = $title, b.genre = $genre RETURN b")
}
'''
)
query = QueryType()
mutation = MutationType()
@query.field('Actors')
@query.field('Movie')
@query.field('MoviesByYear')
@query.field('AllMovies')
@query.field('MovieById')
@query.field('GenresBySubstring')
@query.field('Books')
@mutation.field('CreateGenre')
@mutation.field('CreateMovie')
@mutation.field('CreateBook')
async def resolve(obj, info, **kwargs):
return await neo4j_graphql(obj, info.context, info, True, **kwargs)
schema = make_executable_schema(typeDefs, query, mutation)
driver = None
def context(request):
global driver
if driver is None:
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "123456"))
return {'driver': driver, 'request': request}
root_value = {}
app = GraphQL(schema=schema, root_value=root_value, context_value=context, debug=True)
uvicorn.run(app)
driver.close()
| true | true |
f72ab07a46237929635bd213f93c69e456b0ae1e | 2,868 | py | Python | modules/storage/PrefabS3Scality.py | threefoldtech/jumpscale_prefab9 | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | null | null | null | modules/storage/PrefabS3Scality.py | threefoldtech/jumpscale_prefab9 | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | 31 | 2018-07-31T15:40:07.000Z | 2019-02-20T11:07:15.000Z | modules/storage/PrefabS3Scality.py | threefoldtech/jumpscale_prefab | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | null | null | null | from jumpscale import j
from time import sleep
app = j.tools.prefab._getBaseAppClass()
class PrefabS3Scality(app):
NAME = 's3scality'
def install(self, start=False, storageLocation="/data/", metaLocation="/meta/"):
"""
put backing store on /storage/...
"""
self.prefab.system.package.mdupdate()
self.prefab.system.package.install('build-essential')
self.prefab.system.package.install('python2.7')
self.prefab.core.dir_ensure(storageLocation)
self.prefab.core.dir_ensure(metaLocation)
self.prefab.core.dir_ensure('/opt/code/github/scality')
path = self.prefab.tools.git.pullRepo('https://github.com/scality/S3.git', ssh=False)
profile = self.prefab.bash.profileDefault
profile.addPath(self.prefab.core.dir_paths['BINDIR'])
profile.save()
self.prefab.runtimes.nodejs.install()
self.prefab.core.run('cd {} && npm install --python=python2.7'.format(path), profile=True)
self.prefab.core.dir_remove('$JSAPPSDIR/S3', recursive=True)
self.prefab.core.dir_ensure('$JSAPPSDIR')
self.prefab.core.run('mv {} $JSAPPSDIR/'.format(path))
cmd = 'S3DATAPATH={data} S3METADATAPATH={meta} npm start'.format(
data=storageLocation,
meta=metaLocation,
)
content = self.prefab.core.file_read('$JSAPPSDIR/S3/package.json')
pkg = j.data.serializer.json.loads(content)
pkg['scripts']['start_location'] = cmd
content = j.data.serializer.json.dumps(pkg, indent=True)
self.prefab.core.file_write('$JSAPPSDIR/S3/package.json', content)
if start:
self.start()
def start(self, name=NAME):
nodePath = '$BASEDIR/node/lib/node_modules'
# Temporary. Should be removed after updating the building process
self.prefab.core.dir_ensure('/data/data')
self.prefab.core.dir_ensure('/data/meta')
# Temporary. npm install should be added to install() function after updating the building process
if not self.prefab.core.dir_exists('%s/npm-run-all' % nodePath):
self.prefab.core.run('npm install npm-run-all')
nodePath = self.prefab.core.replace('$BASEDIR/node/lib/node_modules/s3/node_modules:%s' % nodePath)
if self.prefab.bash.profileDefault.envGet('NODE_PATH') != nodePath:
self.prefab.bash.profileDefault.envSet("NODE_PATH", nodePath)
self.prefab.bash.profileDefault.addPath(self.prefab.core.replace("$BASEDIR/node/bin/"))
self.prefab.bash.profileDefault.save()
path = j.sal.fs.joinPaths(j.dirs.JSAPPSDIR, 'S3')
self.prefab.core.run('cd {} && npm run start_location'.format(path), profile=True)
def test(self):
# put/get file over S3 interface using a python S3 lib
raise NotImplementedError
| 43.454545 | 107 | 0.658996 | from jumpscale import j
from time import sleep
app = j.tools.prefab._getBaseAppClass()
class PrefabS3Scality(app):
NAME = 's3scality'
def install(self, start=False, storageLocation="/data/", metaLocation="/meta/"):
self.prefab.system.package.mdupdate()
self.prefab.system.package.install('build-essential')
self.prefab.system.package.install('python2.7')
self.prefab.core.dir_ensure(storageLocation)
self.prefab.core.dir_ensure(metaLocation)
self.prefab.core.dir_ensure('/opt/code/github/scality')
path = self.prefab.tools.git.pullRepo('https://github.com/scality/S3.git', ssh=False)
profile = self.prefab.bash.profileDefault
profile.addPath(self.prefab.core.dir_paths['BINDIR'])
profile.save()
self.prefab.runtimes.nodejs.install()
self.prefab.core.run('cd {} && npm install --python=python2.7'.format(path), profile=True)
self.prefab.core.dir_remove('$JSAPPSDIR/S3', recursive=True)
self.prefab.core.dir_ensure('$JSAPPSDIR')
self.prefab.core.run('mv {} $JSAPPSDIR/'.format(path))
cmd = 'S3DATAPATH={data} S3METADATAPATH={meta} npm start'.format(
data=storageLocation,
meta=metaLocation,
)
content = self.prefab.core.file_read('$JSAPPSDIR/S3/package.json')
pkg = j.data.serializer.json.loads(content)
pkg['scripts']['start_location'] = cmd
content = j.data.serializer.json.dumps(pkg, indent=True)
self.prefab.core.file_write('$JSAPPSDIR/S3/package.json', content)
if start:
self.start()
def start(self, name=NAME):
nodePath = '$BASEDIR/node/lib/node_modules'
self.prefab.core.dir_ensure('/data/data')
self.prefab.core.dir_ensure('/data/meta')
if not self.prefab.core.dir_exists('%s/npm-run-all' % nodePath):
self.prefab.core.run('npm install npm-run-all')
nodePath = self.prefab.core.replace('$BASEDIR/node/lib/node_modules/s3/node_modules:%s' % nodePath)
if self.prefab.bash.profileDefault.envGet('NODE_PATH') != nodePath:
self.prefab.bash.profileDefault.envSet("NODE_PATH", nodePath)
self.prefab.bash.profileDefault.addPath(self.prefab.core.replace("$BASEDIR/node/bin/"))
self.prefab.bash.profileDefault.save()
path = j.sal.fs.joinPaths(j.dirs.JSAPPSDIR, 'S3')
self.prefab.core.run('cd {} && npm run start_location'.format(path), profile=True)
def test(self):
raise NotImplementedError
| true | true |
f72ab0a07e44dd8dfddbb6dd81911777030d7752 | 3,238 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/privatelinks/v2020_05_01/_configuration.py | NateLehman/azure-sdk-for-python | 82fcc5a5e9e01c3b7f6ab24fccbafad19149e400 | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/privatelinks/v2020_05_01/_configuration.py | NateLehman/azure-sdk-for-python | 82fcc5a5e9e01c3b7f6ab24fccbafad19149e400 | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/privatelinks/v2020_05_01/_configuration.py | NateLehman/azure-sdk-for-python | 82fcc5a5e9e01c3b7f6ab24fccbafad19149e400 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ResourcePrivateLinkClientConfiguration(Configuration):
"""Configuration for ResourcePrivateLinkClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ResourcePrivateLinkClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 46.927536 | 125 | 0.693638 |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class ResourcePrivateLinkClientConfiguration(Configuration):
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ResourcePrivateLinkClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs
):
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.