code stringlengths 101 5.91M |
|---|
def has_valid_annotation(anno, ann_types, filter_crowd=True):
if (len(anno) == 0):
return False
if filter_crowd:
if ('iscrowd' in anno[0]):
anno = [obj for obj in anno if (obj['iscrowd'] == 0)]
if (len(anno) == 0):
return False
if _has_only_empty_bbox(anno):
return False
if ('keypoints' in ann_types):
keypoints_vis = (_count_visible_keypoints(anno) >= min_keypoints_per_image)
else:
keypoints_vis = True
if keypoints_vis:
return True
return False |
.parametrize('predictions, expected_result', [(([f'SELECT * FROM {TABLE_NAME}'] * 3), []), (([f'SELECT "name" FROM {TABLE_NAME}'] * 3), ['cell_recall_prediction_result', 'tuple_constraint_prediction_result'])])
def test_evaluate_with_df_no_order(metric_evaluator, predictions, expected_result):
PREDICTION_DATAFRAME['prediction'] = predictions
df = metric_evaluator.evaluate_with_df(PREDICTION_DATAFRAME, 'prediction', task='SP')
metrics = metric_evaluator.metrics
for metric in metrics:
metric = f'{metric}_prediction_result'
if (metric == 'tuple_order_prediction_result'):
assert (df[metric].tolist() == ([None] * len(PREDICTION_DATAFRAME)))
continue
if (metric not in expected_result):
assert (df[metric].tolist() == ([1.0] * len(PREDICTION_DATAFRAME)))
else:
assert (df[metric].tolist() != ([1.0] * len(PREDICTION_DATAFRAME))) |
class OnlineRUSBoostClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
def __init__(self, base_estimator=KNNADWINClassifier(), n_estimators=10, sampling_rate=3, algorithm=1, drift_detection=True, random_state=None):
super().__init__()
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.random_state = random_state
self.sampling_rate = sampling_rate
self.algorithm = algorithm
self.drift_detection = drift_detection
self.ensemble = None
self.actual_n_estimators = None
self.classes = None
self._random_state = None
self.adwin_ensemble = None
self.lam_sc = None
self.lam_pos = None
self.lam_neg = None
self.lam_sw = None
self.epsilon = None
def __configure(self):
if hasattr(self.base_estimator, 'reset'):
self.base_estimator.reset()
self.actual_n_estimators = self.n_estimators
self.adwin_ensemble = []
for i in range(self.actual_n_estimators):
self.adwin_ensemble.append(ADWIN())
self.ensemble = [cp.deepcopy(self.base_estimator) for _ in range(self.actual_n_estimators)]
self._random_state = check_random_state(self.random_state)
self.lam_sc = np.zeros(self.actual_n_estimators)
self.lam_pos = np.zeros(self.actual_n_estimators)
self.lam_neg = np.zeros(self.actual_n_estimators)
self.lam_sw = np.zeros(self.actual_n_estimators)
self.epsilon = np.zeros(self.actual_n_estimators)
self.n_pos = 0
self.n_neg = 0
def reset(self):
self.__configure()
def partial_fit(self, X, y, classes=None, sample_weight=None):
if (self.ensemble is None):
self.__configure()
if (self.classes is None):
if (classes is None):
raise ValueError('The first partial_fit call should pass all the classes.')
else:
self.classes = classes
if ((self.classes is not None) and (classes is not None)):
if (set(self.classes) == set(classes)):
pass
else:
raise ValueError('The classes passed to the partial_fit function differ from those passed earlier.')
self.__adjust_ensemble_size()
(r, _) = get_dimensions(X)
for j in range(r):
change_detected = False
lam = 1
for i in range(self.actual_n_estimators):
if (y[j] == 1):
self.lam_pos[i] += lam
self.n_pos += 1
else:
self.lam_neg[i] += lam
self.n_neg += 1
lam_rus = 1
if (self.algorithm == 1):
if (y[j] == 1):
if (self.n_neg != 0):
lam_rus = (lam * (((self.lam_pos[i] + self.lam_neg[i]) / (self.lam_pos[i] + (self.lam_neg[i] * (self.sampling_rate * (self.n_pos / self.n_neg))))) * (((self.sampling_rate + 1) * self.n_pos) / (self.n_pos + self.n_neg))))
elif (self.n_pos != 0):
lam_rus = (lam * (((self.lam_pos[i] + self.lam_neg[i]) / (self.lam_pos[i] + (self.lam_neg[i] * (self.n_neg / (self.n_pos * self.sampling_rate))))) * (((self.sampling_rate + 1) * self.n_pos) / (self.n_pos + self.n_neg))))
elif (self.algorithm == 2):
if (y[j] == 1):
lam_rus = (((lam * self.n_pos) / (self.n_pos + self.n_neg)) / (self.lam_pos[i] / (self.lam_pos[i] + self.lam_neg[i])))
else:
lam_rus = ((((lam * self.sampling_rate) * self.n_pos) / (self.n_pos + self.n_neg)) / (self.lam_neg[i] / (self.lam_pos[i] + self.lam_neg[i])))
elif (self.algorithm == 3):
if (y[j] == 1):
lam_rus = lam
else:
lam_rus = (lam / self.sampling_rate)
k = self._random_state.poisson(lam_rus)
if (k > 0):
for b in range(k):
self.ensemble[i].partial_fit([X[j]], [y[j]], classes, sample_weight)
if (self.ensemble[i].predict([X[j]])[0] == y[j]):
self.lam_sc[i] += lam
self.epsilon[i] = (self.lam_sw[i] / (self.lam_sc[i] + self.lam_sw[i]))
if (self.epsilon[i] != 1):
lam = (lam / (2 * (1 - self.epsilon[i])))
else:
self.lam_sw[i] += lam
self.epsilon[i] = (self.lam_sw[i] / (self.lam_sc[i] + self.lam_sw[i]))
if (self.epsilon[i] != 0):
lam = (lam / (2 * self.epsilon[i]))
if self.drift_detection:
try:
pred = self.ensemble[i].predict(X)
error_estimation = self.adwin_ensemble[i].estimation
for k in range(r):
if (pred[k] is not None):
self.adwin_ensemble[i].add_element(int((pred[k] == y[k])))
if self.adwin_ensemble[i].detected_change():
if (self.adwin_ensemble[i].estimation > error_estimation):
change_detected = True
except ValueError:
change_detected = False
pass
if (change_detected and self.drift_detection):
max_threshold = 0.0
i_max = (- 1)
for i in range(self.actual_n_estimators):
if (max_threshold < self.adwin_ensemble[i].estimation):
max_threshold = self.adwin_ensemble[i].estimation
i_max = i
if (i_max != (- 1)):
self.ensemble[i_max].reset()
self.adwin_ensemble[i_max] = ADWIN()
return self
def __adjust_ensemble_size(self):
if (len(self.classes) != len(self.ensemble)):
if (len(self.classes) > len(self.ensemble)):
for i in range(len(self.ensemble), len(self.classes)):
self.ensemble.append(cp.deepcopy(self.base_estimator))
self.actual_n_estimators += 1
self.adwin_ensemble.append(ADWIN())
self.lam_sc = np.zeros(self.actual_n_estimators)
self.lam_pos = np.zeros(self.actual_n_estimators)
self.lam_neg = np.zeros(self.actual_n_estimators)
self.lam_sw = np.zeros(self.actual_n_estimators)
self.epsilon = np.zeros(self.actual_n_estimators)
def predict(self, X):
(r, c) = get_dimensions(X)
proba = self.predict_proba(X)
predictions = []
if (proba is None):
return None
for i in range(r):
predictions.append(np.argmax(proba[i]))
return np.asarray(predictions)
def predict_proba(self, X):
proba = []
(r, c) = get_dimensions(X)
if (self.ensemble is None):
return np.zeros((r, 1))
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
for i in range(self.actual_n_estimators):
partial_proba = self.ensemble[i].predict_proba(X)
if (len(partial_proba[0]) > (max(self.classes) + 1)):
raise ValueError('The number of classes in the base learner is larger than in the ensemble.')
if (len(proba) < 1):
for n in range(r):
proba.append([0.0 for _ in partial_proba[n]])
for n in range(r):
for k in range(len(partial_proba[n])):
try:
proba[n][k] += (np.log(((1 - self.epsilon[i]) / self.epsilon[i])) * partial_proba[n][k])
except IndexError:
proba[n].append(partial_proba[n][k])
except RuntimeWarning:
continue
except ValueError:
return np.zeros((r, 1))
except TypeError:
return np.zeros((r, 1))
sum_proba = []
for k in range(r):
sum_proba.append(np.sum(proba[k]))
aux = []
for i in range(len(proba)):
if (sum_proba[i] > 0.0):
aux.append([(x / sum_proba[i]) for x in proba[i]])
else:
aux.append(proba[i])
return np.asarray(aux) |
class Test_logg(TestCase):
def test_works_wasp10(self):
answer = 4.51
result = eq.Logg((0.703 * aq.M_s), (0.775 * aq.R_s)).logg
self.assertAlmostEqual(answer, result, 1)
(M=floats(0.0001, 10000), R=floats(0.0001, 10000))
def test_can_derive_other_vars_from_one_calculated(self, M, R):
assume(((M > 0) and (R > 0)))
inf = float('inf')
assume(((M < inf) and (R < inf)))
R *= aq.R_j
M *= aq.M_j
logg = eq.Logg(M, R).logg
self.assertAlmostEqual(eq.Logg(M, R, None).logg, logg, 3)
self.assertAlmostEqual(eq.Logg(M, None, logg).R, R, 3)
self.assertAlmostEqual(eq.Logg(None, R, logg).M, M, 3) |
class CTRLConfig(PretrainedConfig):
model_type = 'ctrl'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=246534, n_positions=256, n_ctx=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-06, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, use_cache=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.dff = dff
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.use_cache = use_cache
def max_position_embeddings(self):
return self.n_positions
def hidden_size(self):
return self.n_embd
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
class FiniteDimensionalAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
class ParentMethods():
_method
def radical_basis(self):
F = self.base_ring()
if (not F.is_field()):
raise NotImplementedError('the base ring must be a field')
p = F.characteristic()
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
product_on_basis = self.product_on_basis
if (p == 0):
keys = list(self.basis().keys())
cache = [{(i, j): c for i in keys for (j, c) in product_on_basis(y, i)} for y in keys]
mat = [[sum(((x.get((j, i), 0) * c) for ((i, j), c) in y.items())) for x in cache] for y in cache]
mat = matrix(self.base_ring(), mat)
rad_basis = mat.kernel().basis()
else:
if hasattr(self.base_ring().one(), 'nth_root'):
root_fcn = (lambda s, x: x.nth_root(s))
else:
root_fcn = (lambda s, x: (x ** (1 / s)))
(s, n) = (1, self.dimension())
B = [b.on_left_matrix() for b in self.basis()]
I = B[0].parent().one()
while (s <= n):
BB = (B + [I])
G = matrix([[(((- 1) ** s) * (b * bb).characteristic_polynomial()[(n - s)]) for bb in BB] for b in B])
C = G.left_kernel().basis()
if (1 < s < F.order()):
C = [vector(F, [root_fcn(s, ci) for ci in c]) for c in C]
B = [sum(((ci * b) for (ci, b) in zip(c, B))) for c in C]
s = (p * s)
e = vector(self.one())
rad_basis = [(b * e) for b in B]
return tuple([self.from_vector(vec) for vec in rad_basis])
_method
def radical(self):
category = AssociativeAlgebras(self.base_ring()).WithBasis().FiniteDimensional().Subobjects()
radical = self.submodule(self.radical_basis(), category=category, already_echelonized=True)
radical.rename('Radical of {}'.format(self))
return radical
_method
def semisimple_quotient(self):
ring = self.base_ring()
category = Algebras(ring).WithBasis().FiniteDimensional().Quotients().Semisimple()
result = self.quotient_module(self.radical(), category=category)
result.rename('Semisimple quotient of {}'.format(self))
return result
_method
def center_basis(self):
return self.annihilator_basis(self.algebra_generators(), self.bracket)
_method
def center(self):
category = Algebras(self.base_ring()).FiniteDimensional().Subobjects().Commutative().WithBasis()
if (self in Algebras.Semisimple):
category = category.Semisimple()
center = self.submodule(self.center_basis(), category=category, already_echelonized=True)
center.rename('Center of {}'.format(self))
return center
def principal_ideal(self, a, side='left'):
return self.submodule([((a * b) if (side == 'right') else (b * a)) for b in self.basis()])
_method
def orthogonal_idempotents_central_mod_radical(self):
one = self.one()
idempotents = []
f = self.zero()
for g in self.semisimple_quotient().central_orthogonal_idempotents():
fi = self.idempotent_lift((((one - f) * g.lift()) * (one - f)))
idempotents.append(fi)
f = (f + fi)
return tuple(idempotents)
def idempotent_lift(self, x):
if (not self.is_parent_of(x)):
x = x.lift()
p = self.semisimple_quotient().retract(x)
if ((p * p) != p):
raise ValueError(('%s does not retract to an idempotent.' % p))
x_prev = None
one = self.one()
while (x != x_prev):
tmp = x
x = (one - ((one - (x ** 2)) ** 2))
x_prev = tmp
return x
_method
def cartan_invariants_matrix(self):
from sage.matrix.constructor import Matrix
from sage.rings.integer_ring import ZZ
A_quo = self.semisimple_quotient()
idempotents_quo = A_quo.central_orthogonal_idempotents()
dim_simples = [A_quo.principal_ideal(e).dimension().sqrt() for e in idempotents_quo]
idempotents = self.orthogonal_idempotents_central_mod_radical()
def C(i, j):
summand = self.peirce_summand(idempotents[i], idempotents[j])
return (summand.dimension() / (dim_simples[i] * dim_simples[j]))
m = Matrix(ZZ, len(idempotents), C)
m.set_immutable()
return m
def isotypic_projective_modules(self, side='left'):
return [self.principal_ideal(e, side) for e in self.orthogonal_idempotents_central_mod_radical()]
_method
def peirce_summand(self, ei, ej):
B = self.basis()
phi = self.module_morphism(on_basis=(lambda k: ((ei * B[k]) * ej)), codomain=self, triangular='lower')
ideal = phi.matrix(side='right').image()
return self.submodule([self.from_vector(v) for v in ideal.basis()], already_echelonized=True)
def peirce_decomposition(self, idempotents=None, check=True):
if (idempotents is None):
idempotents = self.orthogonal_idempotents_central_mod_radical()
if check:
if (not self.is_identity_decomposition_into_orthogonal_idempotents(idempotents)):
raise ValueError('Not a decomposition of the identity into orthogonal idempotents')
return [[self.peirce_summand(ei, ej) for ej in idempotents] for ei in idempotents]
def is_identity_decomposition_into_orthogonal_idempotents(self, l):
return ((self.sum(l) == self.one()) and all((((e * e) == e) for e in l)) and all(((((e * f) == 0) and ((f * e) == 0)) for (i, e) in enumerate(l) for f in l[:i])))
_method
def is_commutative(self):
B = list(self.basis())
try:
B.remove(self.one())
except ValueError:
pass
return all((((b * bp) == (bp * b)) for (i, b) in enumerate(B) for bp in B[(i + 1):]))
class ElementMethods():
def to_matrix(self, base_ring=None, action=operator.mul, side='left'):
basis = self.parent().basis()
action_left = action
if (side == 'right'):
action = (lambda x: action_left(basis[x], self))
else:
action = (lambda x: action_left(self, basis[x]))
endo = self.parent().module_morphism(on_basis=action, codomain=self.parent())
return endo.matrix(base_ring=base_ring)
_matrix_ = to_matrix
on_left_matrix = to_matrix
def __invert__(self):
alg = self.parent()
R = alg.base_ring()
ob = None
try:
ob = alg.one_basis()
except (AttributeError, TypeError, ValueError):
pass
if (ob is not None):
mc = self.monomial_coefficients(copy=False)
if ((len(mc) == 1) and (ob in mc)):
try:
return alg.term(ob, R((~ mc[ob])))
except (ValueError, TypeError):
raise ValueError(('cannot invert self (= %s)' % self))
e = alg.one().to_vector()
A = self.to_matrix()
try:
inv = A.solve_right(e)
inv.change_ring(R)
return alg.from_vector(inv)
except (ValueError, TypeError):
raise ValueError(('cannot invert self (= %s)' % self))
class Cellular(CategoryWithAxiom_over_base_ring):
class ParentMethods():
def _test_cellular(self, **options):
tester = self._tester(**options)
cell_basis = self.cellular_basis()
B = cell_basis.basis()
P = self.cell_poset()
for mu in P:
C = self.cell_module_indices(mu)
for s in C:
t = C[0]
vals = []
basis_elt = B[(mu, s, t)]
for a in B:
elt = (a * basis_elt)
tester.assertTrue(all(((P.lt(i[0], mu) or (i[2] == t)) for i in elt.support())))
vals.append([elt[(mu, u, t)] for u in C])
for t in C[1:]:
basis_elt = B[(mu, s, t)]
for (i, a) in enumerate(B):
elt = (a * basis_elt)
tester.assertTrue(all(((P.lt(i[0], mu) or (i[2] == t)) for i in elt.support())))
tester.assertEqual(vals[i], [elt[(mu, u, t)] for u in C])
_method
def cell_poset(self):
_method
def cell_module_indices(self, mu):
_method(optional=True)
def _to_cellular_element(self, i):
_method(optional=True)
def _from_cellular_index(self, x):
def cellular_involution(self, x):
C = self.cellular_basis()
if (C is self):
M = x.monomial_coefficients(copy=False)
return self._from_dict({(i[0], i[2], i[1]): M[i] for i in M}, remove_zeros=False)
return self(C(x).cellular_involution())
_method
def cells(self):
from sage.sets.family import Family
return Family(self.cell_poset(), self.cell_module_indices)
def cellular_basis(self):
from sage.algebras.cellular_basis import CellularBasis
return CellularBasis(self)
def cell_module(self, mu, **kwds):
from sage.modules.with_basis.cell_module import CellModule
return CellModule(self.cellular_basis(), mu, **kwds)
_method
def simple_module_parameterization(self):
return tuple([mu for mu in self.cell_poset() if self.cell_module(mu).nonzero_bilinear_form()])
class ElementMethods():
def cellular_involution(self):
return self.parent().cellular_involution(self)
class TensorProducts(TensorProductsCategory):
_method
def extra_super_categories(self):
return [self.base_category()]
class ParentMethods():
_method
def cell_poset(self):
ret = self._sets[0].cell_poset()
for A in self._sets[1:]:
ret = ret.product(A.cell_poset())
return ret
def cell_module_indices(self, mu):
from sage.categories.cartesian_product import cartesian_product
return cartesian_product([self._sets[i].cell_module_indices(x) for (i, x) in enumerate(mu)])
_attribute
def cellular_involution(self):
if (self.cellular_basis() is self):
def func(x):
M = x.monomial_coefficients(copy=False)
return self._from_dict({(i[0], i[2], i[1]): M[i] for i in M}, remove_zeros=False)
return self.module_morphism(function=func, codomain=self)
def on_basis(i):
return self._tensor_of_elements([A.basis()[i[j]].cellular_involution() for (j, A) in enumerate(self._sets)])
return self.module_morphism(on_basis, codomain=self)
_method
def _to_cellular_element(self, i):
C = [A.cellular_basis() for A in self._sets]
elts = [C[j](self._sets[j].basis()[ij]) for (j, ij) in enumerate(i)]
from sage.categories.tensor import tensor
T = tensor(C)
temp = T._tensor_of_elements(elts)
B = self.cellular_basis()
M = temp.monomial_coefficients(copy=False)
def convert_index(i):
mu = []
s = []
t = []
for (a, b, c) in i:
mu.append(a)
s.append(b)
t.append(c)
C = self.cell_module_indices(mu)
return (tuple(mu), C(s), C(t))
return B._from_dict({convert_index(i): M[i] for i in M}, remove_zeros=False)
_method
def _from_cellular_index(self, x):
elts = [A(A.cellular_basis().basis()[(x[0][i], x[1][i], x[2][i])]) for (i, A) in enumerate(self._sets)]
return self._tensor_of_elements(elts)
class SubcategoryMethods():
_method
def Cellular(self):
return self._with_axiom('Cellular') |
class Optimizer(object):
def __init__(self, weight_decay=0, max_norm=0, lr_scheduler=None, name='Sgd', **kargs):
if (name not in S.__dict__):
raise NotImplementedError((name + 'is not implemented'))
self._solver = S.__dict__[name](**kargs)
self._weight_decay = weight_decay
self._max_norm = max_norm
self._lr_scheduler = lr_scheduler
self._iter = 0
if (lr_scheduler is not None):
lr = self._lr_scheduler.get_learning_rate(self._iter)
self._solver.set_learning_rate(lr)
def set_parameters(self, params, **kargs):
self._solver.set_parameters(params, **kargs)
def update(self):
if (self._lr_scheduler is not None):
lr = self._lr_scheduler.get_learning_rate(self._iter)
self._solver.set_learning_rate(lr)
if (self._weight_decay > 0):
self._solver.weight_decay(self._weight_decay)
if (self._max_norm > 0):
self._solver.clip_grad_by_norm(self._max_norm)
self._solver.update()
self._iter += 1
def zero_grad(self):
self._solver.zero_grad()
def get_parameters(self):
return self._solver.get_parameters()
def get_learning_rate(self):
return self._solver.learning_rate()
def save_states(self, path):
self._solver.save_states(path)
def load_states(self, path):
self._solver.load_states(path)
def clear_parameters(self):
self._solver.clear_parameters()
self._iter = 0 |
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text |
def parse_text(text):
span_dict = collections.defaultdict(list)
for match in _NUMBER_PATTERN.finditer(text):
span_text = text[match.start():match.end()]
number = _parse_number(span_text)
if (number is not None):
span_dict[match.span()].append(_get_numeric_value_from_float(number))
for (begin_index, end_index) in get_all_spans(text, max_ngram_length=1):
if ((begin_index, end_index) in span_dict):
continue
span_text = text[begin_index:end_index]
number = _parse_number(span_text)
if (number is not None):
span_dict[(begin_index, end_index)].append(_get_numeric_value_from_float(number))
for (number, word) in enumerate(_NUMBER_WORDS):
if (span_text == word):
span_dict[(begin_index, end_index)].append(_get_numeric_value_from_float(float(number)))
break
for (number, word) in enumerate(_ORDINAL_WORDS):
if (span_text == word):
span_dict[(begin_index, end_index)].append(_get_numeric_value_from_float(float(number)))
break
for (begin_index, end_index) in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE):
span_text = text[begin_index:end_index]
date = _parse_date(span_text)
if (date is not None):
span_dict[(begin_index, end_index)].append(date)
spans = sorted(span_dict.items(), key=(lambda span_value: _get_span_length_key(span_value[0])), reverse=True)
selected_spans = []
for (span, value) in spans:
for (selected_span, _) in selected_spans:
if ((selected_span[0] <= span[0]) and (span[1] <= selected_span[1])):
break
else:
selected_spans.append((span, value))
selected_spans.sort(key=(lambda span_value: span_value[0][0]))
numeric_value_spans = []
for (span, values) in selected_spans:
numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values))
return numeric_value_spans |
def _findEDIcomp(comp, fileLines, dt=float):
(headLine, indHead) = [(st, nr) for (nr, st) in enumerate(fileLines) if re.search(comp, st)][0]
if ('NFREQ' in headLine):
breakup = headLine.split('=')
breakup2 = breakup[1].split()[0]
nrVec = int(breakup2)
else:
nrVec = int(headLine.split('//')[(- 1)])
c = 0
dataList = []
while (c < nrVec):
indHead += 1
dataList.extend(fileLines[indHead].split())
c = len(dataList)
return np.array(dataList, dt) |
def get_bn_to_prune(model, verbose=True, logger=None, spade=False):
weights = []
for (name, m) in model.get_named_block_list().items():
if spade:
if isinstance(m, incmod.SPADEInvertedResidualChannels):
for (op, (bn_name, bn)) in zip(m.res_ops, m.get_named_first_res_bn(prefix=name).items()):
weights.append('{}.weight'.format(bn_name))
for (op, (bn_name, bn)) in zip(m.dw_ops, m.get_named_first_dw_bn(prefix=name).items()):
weights.append('{}.weight'.format(bn_name))
for (op, (bn_name, bn)) in zip(m.spade.res_ops, m.spade.get_named_first_res_bn(prefix=(name + '.spade')).items()):
weights.append('{}.weight'.format(bn_name))
for (op, (bn_name, bn)) in zip(m.spade.dw_ops, m.spade.get_named_first_dw_bn(prefix=(name + '.spade')).items()):
weights.append('{}.weight'.format(bn_name))
elif isinstance(m, incmod.InvertedResidualChannels):
for (op, (bn_name, bn)) in zip(m.res_ops, m.get_named_first_res_bn(prefix=name).items()):
weights.append('{}.weight'.format(bn_name))
for (op, (bn_name, bn)) in zip(m.dw_ops, m.get_named_first_dw_bn(prefix=name).items()):
weights.append('{}.weight'.format(bn_name))
prune_weights = weights
if verbose:
for name in prune_weights:
if (logger is not None):
logger.print_info('{}\n'.format(name))
else:
print('{}'.format(name))
all_params_keys = [key for (key, val) in model.named_parameters()]
for name_weight in prune_weights:
assert (name_weight in all_params_keys)
return prune_weights |
def test_noise():
from glob import glob
for filename in glob(os.path.join(output_folder, 'save', '*.flac')):
expected_file = filename.replace('results', 'expected')
actual = read_audio(filename)
expected = read_audio(expected_file)
assert actual.allclose(expected) |
def method_from_name(method, **kwargs):
if (method == 'ParetoMTL'):
return ParetoMTLMethod(**kwargs)
elif ('cosmos' in method):
return COSMOSMethod(**kwargs)
elif (method == 'SingleTask'):
return SingleTaskMethod(**kwargs)
elif ('hyper' in method):
return HypernetMethod(**kwargs)
elif (method == 'mgda'):
return MGDAMethod(**kwargs)
elif (method == 'uniform'):
return UniformScalingMethod(**kwargs)
else:
raise ValueError('Unkown method {}'.format(method)) |
def print_tree(tree):
return ('{} -> {}'.format(tree.edge.src, tree.edge.dst) + ''.join(('\n |\n +- {}'.format(print_tree(c)) for c in tree.children))) |
def train_collate_build(batch):
(imgs, pids, _, _, mask) = zip(*batch)
pids = torch.tensor(pids, dtype=torch.int64)
return (torch.stack(imgs, dim=0), pids, torch.stack(mask, dim=0)) |
def pack_examples(tok, src_examples, tgt_examples, max_tokens=1024):
(finished_src, finished_tgt) = ([], [])
sorted_examples = list(zip(src_examples, tgt_examples))
(new_src, new_tgt) = sorted_examples[0]
def is_too_big(strang):
return (tok(strang, return_tensors='pt').input_ids.shape[1] > max_tokens)
for (src, tgt) in tqdm(sorted_examples[1:]):
cand_src = ((new_src + ' ') + src)
cand_tgt = ((new_tgt + ' ') + tgt)
if (is_too_big(cand_src) or is_too_big(cand_tgt)):
finished_src.append(new_src)
finished_tgt.append(new_tgt)
(new_src, new_tgt) = (src, tgt)
else:
(new_src, new_tgt) = (cand_src, cand_tgt)
if new_src:
assert new_tgt
finished_src.append(new_src)
finished_tgt.append(new_tgt)
return (finished_src, finished_tgt) |
class Observation():
def __init__(self, id=(- 1), reward=0, state=None, is_episode_over=False):
self.id = id
self.reward = reward
self.state = state
self.is_episode_over = is_episode_over
def are_equal(self, other, are_states_equal):
if (self.id != other.id):
return False
elif (self.reward != other.reward):
return False
elif (not are_states_equal(self.state, other.state)):
return False
elif (self.is_episode_over != other.is_episode_over):
return False
return True |
def get_output_dir(datasets, training=True):
assert isinstance(datasets, tuple(([tuple, list] + list(six.string_types)))), 'datasets argument must be of type tuple, list or string'
is_string = isinstance(datasets, six.string_types)
dataset_name = (datasets if is_string else ':'.join(datasets))
tag = ('train' if training else 'test')
outdir = osp.join(__C.OUTPUT_DIR, tag, dataset_name, __C.MODEL.TYPE)
if (not osp.exists(outdir)):
os.makedirs(outdir)
return outdir |
class TorchModel():
def __init__(self, model, use_cuda, full_features=False):
self.model = model
self.use_cuda = use_cuda
self.full_features = full_features
if full_features:
self.lm_embed = model.embedding.embed
self.lstm_stack = unstack_lstm(model.embedding.rnn)
self.proj = model.embedding.proj
if use_cuda:
self.lm_embed.cuda()
for lstm in self.lstm_stack:
lstm.cuda()
self.proj.cuda()
def __call__(self, x):
c = [torch.from_numpy(x_).long() for x_ in x]
(c, order) = pack_sequences(c)
if self.use_cuda:
c = c.cuda()
if self.full_features:
z = featurize(c, self.lm_embed, self.lstm_stack, self.proj)
else:
z = self.model(c)
z = unpack_sequences(z, order)
return z |
def stl2off(stl_file, off_file=None, delete_file=True):
file_format_check(stl_file, '.stl')
if (off_file is None):
off_file = stl_file.replace('.stl', '.off')
mesh = load_mesh(stl_file)
off_data = export_off(mesh)
with open(off_file, 'w') as f:
f.write(off_data)
if delete_file:
os.remove(stl_file)
return off_file |
class TestCopulaGANSynthesizer():
def test___init__(self):
metadata = SingleTableMetadata()
enforce_min_max_values = True
enforce_rounding = True
instance = CopulaGANSynthesizer(metadata, enforce_min_max_values=enforce_min_max_values, enforce_rounding=enforce_rounding)
assert (instance.enforce_min_max_values is True)
assert (instance.enforce_rounding is True)
assert (instance.embedding_dim == 128)
assert (instance.generator_dim == (256, 256))
assert (instance.discriminator_dim == (256, 256))
assert (instance.generator_lr == 0.0002)
assert (instance.generator_decay == 1e-06)
assert (instance.discriminator_lr == 0.0002)
assert (instance.discriminator_decay == 1e-06)
assert (instance.batch_size == 500)
assert (instance.discriminator_steps == 1)
assert (instance.log_frequency is True)
assert (instance.verbose is False)
assert (instance.epochs == 300)
assert (instance.pac == 10)
assert (instance.cuda is True)
assert (instance.numerical_distributions == {})
assert (instance.default_distribution == 'beta')
assert (instance._numerical_distributions == {})
assert (instance._default_distribution == BetaUnivariate)
def test___init__custom(self):
metadata = SingleTableMetadata()
metadata.add_column('field', sdtype='numerical')
enforce_min_max_values = False
enforce_rounding = False
embedding_dim = 64
generator_dim = (128, 128)
discriminator_dim = (128, 128)
generator_lr = 0.0001
generator_decay = 2e-06
discriminator_lr = 0.0003
discriminator_decay = 1e-06
batch_size = 250
discriminator_steps = 2
log_frequency = False
verbose = True
epochs = 150
pac = 5
cuda = False
numerical_distributions = {'field': 'gamma'}
default_distribution = 'uniform'
instance = CopulaGANSynthesizer(metadata, enforce_min_max_values=enforce_min_max_values, enforce_rounding=enforce_rounding, embedding_dim=embedding_dim, generator_dim=generator_dim, discriminator_dim=discriminator_dim, generator_lr=generator_lr, generator_decay=generator_decay, discriminator_lr=discriminator_lr, discriminator_decay=discriminator_decay, batch_size=batch_size, discriminator_steps=discriminator_steps, log_frequency=log_frequency, verbose=verbose, epochs=epochs, pac=pac, cuda=cuda, numerical_distributions=numerical_distributions, default_distribution=default_distribution)
assert (instance.enforce_min_max_values is False)
assert (instance.enforce_rounding is False)
assert (instance.embedding_dim == embedding_dim)
assert (instance.generator_dim == generator_dim)
assert (instance.discriminator_dim == discriminator_dim)
assert (instance.generator_lr == generator_lr)
assert (instance.generator_decay == generator_decay)
assert (instance.discriminator_lr == discriminator_lr)
assert (instance.discriminator_decay == discriminator_decay)
assert (instance.batch_size == batch_size)
assert (instance.discriminator_steps == discriminator_steps)
assert (instance.log_frequency == log_frequency)
assert (instance.verbose is True)
assert (instance.epochs == epochs)
assert (instance.pac == pac)
assert (instance.cuda is False)
assert (instance.numerical_distributions == {'field': 'gamma'})
assert (instance._numerical_distributions == {'field': GammaUnivariate})
assert (instance.default_distribution == 'uniform')
assert (instance._default_distribution == UniformUnivariate)
def test___init__incorrect_numerical_distributions(self):
metadata = SingleTableMetadata()
numerical_distributions = 'invalid'
err_msg = 'numerical_distributions can only be None or a dict instance.'
with pytest.raises(TypeError, match=err_msg):
CopulaGANSynthesizer(metadata, numerical_distributions=numerical_distributions)
def test___init__invalid_column_numerical_distributions(self):
metadata = SingleTableMetadata()
numerical_distributions = {'totally_fake_column_name': 'beta'}
err_msg = re.escape("Invalid column names found in the numerical_distributions dictionary {'totally_fake_column_name'}. The column names you provide must be present in the metadata.")
with pytest.raises(SynthesizerInputError, match=err_msg):
CopulaGANSynthesizer(metadata, numerical_distributions=numerical_distributions)
def test_get_params(self):
metadata = SingleTableMetadata()
instance = CopulaGANSynthesizer(metadata)
result = instance.get_parameters()
assert (result == {'enforce_min_max_values': True, 'enforce_rounding': True, 'locales': None, 'embedding_dim': 128, 'generator_dim': (256, 256), 'discriminator_dim': (256, 256), 'generator_lr': 0.0002, 'generator_decay': 1e-06, 'discriminator_lr': 0.0002, 'discriminator_decay': 1e-06, 'batch_size': 500, 'discriminator_steps': 1, 'log_frequency': True, 'verbose': False, 'epochs': 300, 'pac': 10, 'cuda': True, 'numerical_distributions': {}, 'default_distribution': 'beta'})
('sdv.single_table.copulagan.rdt')
def test__create_gaussian_normalizer_config(self, mock_rdt):
numerical_distributions = {'age': 'gamma'}
metadata = SingleTableMetadata()
metadata.columns = {'name': {'sdtype': 'categorical'}, 'age': {'sdtype': 'numerical'}, 'account': {'sdtype': 'numerical'}}
instance = CopulaGANSynthesizer(metadata, numerical_distributions=numerical_distributions)
processed_data = pd.DataFrame({'name': ['John', 'Doe', 'John Doe', 'John Doe Doe'], 'age': np.arange(4), 'account': np.arange(4), 'name#age': np.arange(4)})
config = instance._create_gaussian_normalizer_config(processed_data)
expected_calls = [call(missing_value_generation='from_column', distribution=GammaUnivariate), call(missing_value_generation='from_column', distribution=BetaUnivariate)]
expected_config = {'transformers': {'name': None, 'age': mock_rdt.transformers.GaussianNormalizer.return_value, 'account': mock_rdt.transformers.GaussianNormalizer.return_value, 'name#age': None}, 'sdtypes': {'name': 'categorical', 'age': 'numerical', 'account': 'numerical', 'name#age': 'categorical'}}
assert (config == expected_config)
assert (mock_rdt.transformers.GaussianNormalizer.call_args_list == expected_calls)
('sdv.single_table.copulagan.LOGGER')
('sdv.single_table.copulagan.CTGANSynthesizer._fit')
('sdv.single_table.copulagan.rdt')
def test__fit_logging(self, mock_rdt, mock_ctgansynthesizer__fit, mock_logger):
metadata = SingleTableMetadata()
metadata.add_column('col', sdtype='numerical')
numerical_distributions = {'col': 'gamma'}
instance = CopulaGANSynthesizer(metadata, numerical_distributions=numerical_distributions)
processed_data = pd.DataFrame()
instance._fit(processed_data)
mock_logger.info.assert_called_once_with("Requested distribution 'gamma' cannot be applied to column 'col' because it no longer exists after preprocessing.")
('sdv.single_table.copulagan.CTGANSynthesizer._fit')
('sdv.single_table.copulagan.rdt')
def test__fit(self, mock_rdt, mock_ctgansynthesizer__fit):
metadata = SingleTableMetadata()
instance = CopulaGANSynthesizer(metadata)
instance._create_gaussian_normalizer_config = Mock()
processed_data = pd.DataFrame()
instance._fit(processed_data)
hypertransformer = instance._gaussian_normalizer_hyper_transformer
assert (hypertransformer == mock_rdt.HyperTransformer.return_value)
hypertransformer.set_config.assert_called_once_with(instance._create_gaussian_normalizer_config.return_value)
hypertransformer.fit_transform.assert_called_once_with(processed_data)
mock_ctgansynthesizer__fit.assert_called_once_with(hypertransformer.fit_transform.return_value)
def test_get_learned_distributions(self):
data = pd.DataFrame({'zero': [0, 0, 0], 'one': [1, 1, 1]})
stm = SingleTableMetadata()
stm.detect_from_dataframe(data)
cgs = CopulaGANSynthesizer(stm)
zero_transformer_mock = Mock(spec_set=GaussianNormalizer)
zero_transformer_mock._univariate.to_dict.return_value = {'a': 1.0, 'b': 1.0, 'loc': 0.0, 'scale': 0.0, 'type': None}
one_transformer_mock = Mock(spec_set=GaussianNormalizer)
one_transformer_mock._univariate.to_dict.return_value = {'a': 1.0, 'b': 1.0, 'loc': 1.0, 'scale': 0.0, 'type': None}
cgs._gaussian_normalizer_hyper_transformer = Mock()
cgs._gaussian_normalizer_hyper_transformer.field_transformers = {'zero': zero_transformer_mock, 'one': one_transformer_mock}
cgs._fitted = True
result = cgs.get_learned_distributions()
assert (result == {'zero': {'distribution': 'beta', 'learned_parameters': {'a': 1.0, 'b': 1.0, 'loc': 0.0, 'scale': 0.0}}, 'one': {'distribution': 'beta', 'learned_parameters': {'a': 1.0, 'b': 1.0, 'loc': 1.0, 'scale': 0.0}}})
def test_get_learned_distributions_raises_an_error(self):
data = pd.DataFrame({'zero': [0, 0, 0], 'one': [1, 1, 1]})
stm = SingleTableMetadata()
stm.detect_from_dataframe(data)
cgs = CopulaGANSynthesizer(stm)
error_msg = re.escape("Distributions have not been learned yet. Please fit your model first using 'fit'.")
with pytest.raises(ValueError, match=error_msg):
cgs.get_learned_distributions() |
class IteratorTrainer(Trainer):
def __init__(self, opt, meta, data_loader, model, optimizer):
super(IteratorTrainer, self).__init__(opt, meta, data_loader, model, optimizer)
self.iters = meta.cycle
self.total_iters = meta.iterations
def run(self):
self.set_logger()
bar = utils.set_progress_bar(self.total_iters)
for cycle_num in range(int((self.total_iters / self.iters))):
self.model.train()
self.cycle(bar, cycle_num)
with torch.no_grad():
self.run_evaluation_cycle()
self.log_losses(self.opt, self.losses)
self.update_top_score(self.opt)
self.save_model(self.get_tracked_score())
self.stop_logger()
def cycle(self, bar, cycle_num):
nums = self.reset_losses()
print(self.losses['train'])
for i in range(1, (self.iters + 1)):
(loss, nums, reset) = self.do_forward_pass(nums)
self.do_backward_pass(loss)
self.update_parameters()
self.opt.train.dynamic.epoch += 1
for loss_name in self.losses['train']:
self.logger.add_scalar('train/{}'.format(loss_name), (loss.item() / self.opt.train.dynamic.bs), self.opt.train.dynamic.epoch)
bar.update(1)
if (cfg.toy and (i > 10)):
break
if reset:
self.data_loader.reset_offsets('train') |
def test_L3EthStarBuild():
topo = L3EthStar()
net = Mininet(topo=topo, link=TCLink, listenPort=OF_MISC['switch_debug_port'])
net.start()
CLI(net)
net.stop() |
class Net(nn.Module):
def __init__(self, num_classes=10):
super(Net, self).__init__()
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2))
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 2) * 2), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), ((256 * 2) * 2))
x = self.classifier(x)
return x |
def register_Ns3CallbackImpl__Void_Unsigned_long_Unsigned_short_Long_double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned long, unsigned short, long double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('long unsigned int', 'arg0'), param('short unsigned int', 'arg1'), param('long double', 'arg2')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
_model('linear_transformer_lm')
class LinearTransformerLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
def add_args(parser):
parser.add_argument('--embed-dim', type=int, metavar='N', help='embedding dimension')
parser.add_argument('--num-attention-heads', type=int, metavar='N', help='num attention heads')
parser.add_argument('--num-layers', type=int, metavar='N', help='num layers')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability for all fully connected layers in the embeddings, encoder, and pooler')
def build_model(cls, args, task):
base_architecture(args)
return cls(LinearTransformerDecoder(args, task)) |
class TransformerDecoderLayer(TransformerDecoderLayerBase):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__(TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn)
self.args = args
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return super().build_self_attention(embed_dim, TransformerConfig.from_namespace(args), add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn)
def build_encoder_attention(self, embed_dim, args):
return super().build_encoder_attention(embed_dim, TransformerConfig.from_namespace(args)) |
class RGBArrayAsObservationWrapper(dm_env.Environment):
'\n\tUse env.render(rgb_array) as observation\n\trather than the observation environment provides\n\n\tFrom:
def __init__(self, env, width=84, height=84):
self._env = env
self._width = width
self._height = height
self._env.reset()
dummy_obs = self._env.render(mode='rgb_array', width=self._width, height=self._height)
self.observation_space = spaces.Box(low=0, high=255, shape=dummy_obs.shape, dtype=dummy_obs.dtype)
self.action_space = self._env.action_space
wrapped_action_spec = self.action_space
if (not hasattr(wrapped_action_spec, 'minimum')):
wrapped_action_spec.minimum = (- np.ones(wrapped_action_spec.shape))
if (not hasattr(wrapped_action_spec, 'maximum')):
wrapped_action_spec.maximum = np.ones(wrapped_action_spec.shape)
self._action_spec = specs.BoundedArray(wrapped_action_spec.shape, np.float32, wrapped_action_spec.minimum, wrapped_action_spec.maximum, 'action')
self._obs_spec = {}
self._obs_spec['pixels'] = specs.BoundedArray(shape=self.observation_space.shape, dtype=np.uint8, minimum=0, maximum=255, name='observation')
def reset(self, **kwargs):
obs = {}
obs = self._env.reset(**kwargs)
obs['pixels'] = obs['pixels'].astype(np.uint8)
obs['goal_achieved'] = False
return obs
def step(self, action):
(observation, reward, done, info) = self._env.step(action)
obs = {}
obs['pixels'] = observation['pixels'].astype(np.uint8)
obs['goal_achieved'] = info['is_success']
return (obs, reward, done, info)
def observation_spec(self):
return self._obs_spec
def action_spec(self):
return self._action_spec
def render(self, mode='rgb_array', width=256, height=256):
return self._env.render(mode='rgb_array', width=width, height=height)
def __getattr__(self, name):
return getattr(self._env, name) |
class PegasusConverter(SpmConverter):
def vocab(self, proto):
vocab = [(self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), (self.original_tokenizer.mask_token_sent, 0.0), (self.original_tokenizer.mask_token, 0.0)]
vocab += [(f'<unk_{i}>', (- 100.0)) for i in range(2, self.original_tokenizer.offset)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]]
return vocab
def unk_id(self, proto):
return (proto.trainer_spec.unk_id + self.original_tokenizer.offset)
def pre_tokenizer(self, replacement, add_prefix_space):
return pre_tokenizers.Sequence([pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)])
def post_processor(self):
eos = self.original_tokenizer.eos_token
special_tokens = [(eos, self.original_tokenizer.eos_token_id)]
return processors.TemplateProcessing(single=['$A', eos], pair=['$A', '$B', eos], special_tokens=special_tokens) |
def plot_data(ax, alg, mean_lc, mean_stderr, best_params, exp_attrs, second_time=False):
alpha = 1.0
if PLOT_RERUN_AND_ORIG:
alpha = (1.0 if second_time else 0.5)
lbl = ((alg + '$\\alpha=$ ') + str(best_params['alpha']))
color = ALG_COLORS[alg]
ax.plot(np.arange(mean_lc.shape[0]), mean_lc, label=lbl, linewidth=1.0, color=color, alpha=alpha)
ax.fill_between(np.arange(mean_lc.shape[0]), (mean_lc - (mean_stderr / 2)), (mean_lc + (mean_stderr / 2)), color=color, alpha=(0.1 * alpha))
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(exp_attrs.x_lim)
ax.set_ylim(exp_attrs.y_lim)
ax.xaxis.set_ticks(exp_attrs.x_axis_ticks)
ax.set_xticklabels(exp_attrs.x_tick_labels, fontsize=25)
ax.yaxis.set_ticks(exp_attrs.y_axis_ticks)
ax.tick_params(axis='y', which='major', labelsize=exp_attrs.size_of_labels)
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2) |
def p_continue_statement(s):
pos = s.position()
s.next()
return Nodes.ContinueStatNode(pos) |
def test_initialize_from_files_lazy_paths():
_db = Database.from_files(pos=pathlib.Path('datasets/ToyFather/train/pos.pl'), neg=pathlib.Path('datasets/ToyFather/train/neg.pl'), facts=pathlib.Path('datasets/ToyFather/train/facts.pl'), lazy_load=True)
assert (_db.pos == pathlib.Path('datasets/ToyFather/train/pos.pl'))
assert (_db.neg == pathlib.Path('datasets/ToyFather/train/neg.pl'))
assert (_db.facts == pathlib.Path('datasets/ToyFather/train/facts.pl')) |
def parse_constants_2018toXXXX(d: str) -> dict[(str, tuple[(float, str, float)])]:
constants = {}
for line in d.split('\n'):
name = line[:60].rstrip()
val = float(line[60:85].replace(' ', '').replace('...', ''))
uncert = float(line[85:110].replace(' ', '').replace('(exact)', '0'))
units = line[110:].rstrip()
constants[name] = (val, units, uncert)
return constants |
_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args) |
class PPM(nn.Module):
def __init__(self, channels=2048):
super(PPM, self).__init__()
bins = (1, 2, 3, 6)
reduction_dim = int((channels / len(bins)))
self.features = []
for bin in bins:
self.features.append(nn.Sequential(nn.AdaptiveAvgPool2d(bin), nn.Conv2d(channels, reduction_dim, kernel_size=1, bias=False), nn.BatchNorm2d(reduction_dim), nn.ReLU(inplace=True)))
self.features = nn.ModuleList(self.features)
def forward(self, x):
x_size = x.size()
out = [x]
for f in self.features:
out.append(F.interpolate(f(x), x_size[2:], mode='bilinear', align_corners=True))
return torch.cat(out, 1) |
.node
class Transpose(dace.sdfg.nodes.LibraryNode):
implementations = {'pure': ExpandTransposePure, 'MKL': ExpandTransposeMKL, 'OpenBLAS': ExpandTransposeOpenBLAS, 'cuBLAS': ExpandTransposeCuBLAS}
default_implementation = 'pure'
dtype = dace.properties.TypeClassProperty(allow_none=True)
def __init__(self, name, dtype=None, location=None):
super().__init__(name, location=location, inputs={'_inp'}, outputs={'_out'})
self.dtype = dtype
def validate(self, sdfg, state):
in_edges = state.in_edges(self)
if (len(in_edges) != 1):
raise ValueError('Expected exactly one input to transpose operation')
for (_, _, _, dst_conn, memlet) in state.in_edges(self):
if (dst_conn == '_inp'):
subset = dc(memlet.subset)
subset.squeeze()
in_size = subset.size()
out_edges = state.out_edges(self)
if (len(out_edges) != 1):
raise ValueError('Expected exactly one output from transpose operation')
out_memlet = out_edges[0].data
if (len(in_size) != 2):
raise ValueError('Transpose operation only supported on matrices')
out_subset = dc(out_memlet.subset)
out_subset.squeeze()
out_size = out_subset.size()
if (len(out_size) != 2):
raise ValueError('Transpose operation only supported on matrices')
if (list(out_size) != [in_size[1], in_size[0]]):
raise ValueError('Output to transpose operation must agree in the m and n dimensions') |
class TestSelu(serial.SerializedTestCase):
(X=hu.tensor(), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
def test_selu_1(self, X, gc, dc, engine):
alpha = 1.0
scale = 2.0
op = core.CreateOperator('Selu', ['X'], ['Y'], alpha=alpha, scale=scale, engine=engine)
X = TestSelu.fix0(X)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
self.assertReferenceChecks(gc, op, [X], (lambda x: TestSelu.selu_ref(x, alpha=alpha, scale=scale)))
(X=hu.tensor(), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
(deadline=1000)
def test_selu_2(self, X, gc, dc, engine):
alpha = 1.6732
scale = 1.0507
op = core.CreateOperator('Selu', ['X'], ['Y'], alpha=alpha, scale=scale, engine=engine)
X = TestSelu.fix0(X)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=0.01, threshold=0.01)
self.assertReferenceChecks(gc, op, [X], (lambda x: TestSelu.selu_ref(x, alpha=alpha, scale=scale)))
(X=hu.tensor(), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
(deadline=1000)
def test_selu_3(self, X, gc, dc, engine):
alpha = 1.3
scale = 1.1
op = core.CreateOperator('Selu', ['X'], ['Y'], alpha=alpha, scale=scale, engine=engine)
X = TestSelu.fix0(X)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
self.assertReferenceChecks(gc, op, [X], (lambda x: TestSelu.selu_ref(x, alpha=alpha, scale=scale)))
(X=hu.tensor(), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
def test_selu_inplace(self, X, gc, dc, engine):
alpha = 1.3
scale = 1.1
op = core.CreateOperator('Selu', ['X'], ['X'], alpha=alpha, scale=scale, engine=engine)
X = TestSelu.fix0(X)
self.assertDeviceChecks(dc, op, [X], [0])
Y = TestSelu.selu_ref(X, alpha=alpha, scale=scale)
dX = np.ones_like(X)
op2 = core.CreateOperator('SeluGradient', ['Y', 'dX'], ['dX'], alpha=alpha, scale=scale, engine=engine)
self.assertDeviceChecks(dc, op2, [Y, dX], [0])
def fix0(X):
X += (0.02 * np.sign(X))
X[(X == 0.0)] += 0.02
return X
def selu_ref(x, scale, alpha):
ret = (scale * (((x > 0) * x) + ((x <= 0) * (alpha * (np.exp(x) - 1)))))
return [ret] |
def mk_unix_dist(build_path, dist_path):
for c in get_components():
c.mk_unix_dist(build_path, dist_path)
for pyc in filter((lambda f: (f.endswith('.pyc') or f.endswith('.py'))), os.listdir(build_path)):
shutil.copy(os.path.join(build_path, pyc), os.path.join(dist_path, INSTALL_BIN_DIR, pyc)) |
def print_on_call_decorator(func):
(func)
def wrapper(self, *args, **kwargs):
_debug(type(self).__name__, func.__name__)
try:
return func(self, *args, **kwargs)
except Exception:
_debug('An exception occurred:', traceback.format_exc())
raise
return wrapper |
.experimental
.parametrize('pad_columns, padding_value, array_size', [(['item_id', 'timestamp'], 0, None)])
.parametrize('dataset, result', [pytest.param('dataframe_special', 'dataframe_two_columns_none'), pytest.param('dataframe_special_pandas', 'dataframe_two_columns_none_pandas')])
def test_padder_two_columns_none(pad_columns, padding_value, array_size, dataset, result, request):
dataframe_special = request.getfixturevalue(dataset)
dataframe_two_columns_none = request.getfixturevalue(result)
is_spark = isinstance(dataframe_special, SparkDataFrame)
padder = Padder(pad_columns=pad_columns, padding_value=padding_value, array_size=array_size)
padder_interactions = padder.transform(dataframe_special)
columns = (padder_interactions.collect()[0].asDict().keys() if is_spark else padder_interactions.columns)
assert ('user_id' in columns)
assert ('item_id' in columns)
assert ('timestamp' in columns)
if (is_spark is True):
assert padder_interactions.toPandas().equals(dataframe_two_columns_none.toPandas())
else:
assert padder_interactions.equals(dataframe_two_columns_none) |
def make_distributor_init_64_bits(distributor_init, vcomp140_dll_filename, msvcp140_dll_filename):
with open(distributor_init, 'wt') as f:
f.write(textwrap.dedent('\n \'\'\'Helper to preload vcomp140.dll and msvcp140.dll to prevent\n "not found" errors.\n\n Once vcomp140.dll and msvcp140.dll are\n preloaded, the namespace is made available to any subsequent\n vcomp140.dll and msvcp140.dll. This is\n created as part of the scripts that build the wheel.\n \'\'\'\n\n\n import os\n import os.path as op\n from ctypes import WinDLL\n\n\n if os.name == "nt":\n libs_path = op.join(op.dirname(__file__), ".libs")\n vcomp140_dll_filename = op.join(libs_path, "{0}")\n msvcp140_dll_filename = op.join(libs_path, "{1}")\n WinDLL(op.abspath(vcomp140_dll_filename))\n WinDLL(op.abspath(msvcp140_dll_filename))\n '.format(vcomp140_dll_filename, msvcp140_dll_filename))) |
def config_imname(cfg, i):
return os.path.join(cfg['dir_images'], (cfg['imlist'][i] + cfg['ext'])) |
def test_record_int32_parameters():
t = RecordType([NumpyType('int32')], None, {'p': [123]})
assert (str(parser.parse(str(t))) == str(t)) |
def process_tweets(id_, data_child_array, source_claim, source_tweets, labels_dict):
user_id_array = []
tweet_id_array = []
tweet_array = []
time_delay_array = []
missing_count = 0
label = labels_dict[id_]
try:
source_tweet_id = str(source_claim['tweet_id']).strip()
source_claim_tweet = source_tweets[source_tweet_id]
source_claim['tweet'] = source_claim_tweet
except:
return (None, None, None, None, None, None, None)
for item in data_child_array:
try:
user_id = item['user_id']
tweet_id = str(item['tweet_id']).strip()
time_delay = float(item['time_delay'].strip())
tweet = source_tweets[tweet_id]
user_id_array.append(user_id)
tweet_id_array.append(tweet_id)
tweet_array.append(tweet)
time_delay_array.append(time_delay)
except:
missing_count += 1
continue
assert (len(user_id_array) == len(tweet_id_array)), print('Length of arrays DO NOT match')
assert (len(user_id_array) == len(tweet_array)), print('Length of arrays DO NOT match')
assert (len(user_id_array) == len(time_delay_array)), print('Length of arrays DO NOT match')
return (label, source_claim, user_id_array, tweet_id_array, tweet_array, time_delay_array, missing_count) |
def InferOpDeviceAsBlobDevices(op):
op_dev = (op.device_option if op.device_option else caffe2_pb2.DeviceOption())
input_dev = ([op_dev] * len(op.input))
output_dev = ([op_dev] * len(op.output))
return (input_dev, output_dev) |
class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger):
def __init__(self, job, init_state=None, bars=None, ignored_bars=(), logged_bars='all', min_time_interval=0):
RqWorkerProgressLogger.__init__(self, job)
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars, ignored_bars=ignored_bars, logged_bars=logged_bars, min_time_interval=min_time_interval) |
def get_video_modes(monitor):
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetVideoModes(monitor, count)
videomodes = [result[i].unwrap() for i in range(count_value.value)]
return videomodes |
class MultiBoxLoss(nn.Module):
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target, use_gpu=True):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = config['frame_work']['variance']
def forward(self, predictions, targets):
(loc_datas_p, p_c_p, p_e_p, priors) = predictions
(loc_datas_t, p_c_t, p_e_t) = targets
num = loc_datas_p.size(0)
num_frames = loc_datas_p.shape[1]
num_priors = priors.size(0)
num_classes = self.num_classes
loc_ts = torch.zeros(num, num_frames, num_priors, 4)
p_c_ts = torch.zeros(num, 1, num_priors, dtype=torch.long)
p_e_ts = torch.zeros(num, num_frames, num_priors)
if self.use_gpu:
loc_ts = loc_ts.cuda()
p_c_ts = p_c_ts.cuda()
p_e_ts = p_e_ts.cuda()
for idx in range(num):
if (len(loc_datas_t[idx]) == 0):
continue
truths = loc_datas_t[idx].float()
labels = p_c_t[idx]
exists = p_e_t[idx]
defaults = priors.data
match(self.threshold, truths, defaults, self.variance, labels, exists, loc_ts, p_c_ts, p_e_ts, idx)
with torch.no_grad():
loc_ts = Variable(loc_ts)
p_c_ts = Variable(p_c_ts)
p_e_ts = Variable(p_e_ts)
pos = (p_c_ts > 0)
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_datas_p)
exist_idx = (p_e_ts.unsqueeze(p_e_ts.dim()).expand_as(loc_datas_p) > 0)
pos_idx = ((pos_idx * exist_idx) > 0)
loc_p = loc_datas_p[pos_idx].view((- 1), 4)
loc_ts = loc_ts[pos_idx].view((- 1), 4)
loss_l = F.smooth_l1_loss(loc_p, loc_ts, reduction='sum')
batch_conf = p_c_p.contiguous().view((- 1), self.num_classes)
loss_c = (log_sum_exp(batch_conf) - batch_conf.gather(1, p_c_ts.view((- 1), 1)))
loss_c = loss_c.view(num, (- 1)).unsqueeze(1)
loss_c[pos] = 0
(_, loss_idx) = loss_c.sort(2, descending=True)
(_, idx_rank) = loss_idx.sort(2)
num_pos = pos.long().sum(2, keepdim=True)
num_neg = torch.clamp((self.negpos_ratio * num_pos), max=(pos.shape[2] - 1))
neg = (idx_rank < num_neg.expand_as(idx_rank))
pos_idx = pos.unsqueeze(3).expand_as(p_c_p)
neg_idx = neg.unsqueeze(3).expand_as(p_c_p)
conf_p = p_c_p[(pos_idx + neg_idx).gt(0)].view((- 1), self.num_classes)
targets_weighted = p_c_ts[(pos + neg).gt(0)]
if ((len(targets_weighted) == 0) or (len(conf_p) == 0)):
loss_c = conf_p.sum()
else:
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
pos_idx = pos.unsqueeze(3).expand_as(p_e_p)
neg_idx = neg.unsqueeze(3).expand_as(p_e_p)
exist_p = p_e_p[(pos_idx + neg_idx).gt(0)].view((- 1), 2)
pos_idx = pos.expand_as(p_e_ts)
neg_idx = neg.expand_as(p_e_ts)
targets_weighted = p_e_ts[(pos_idx + neg_idx).gt(0)].long()
if ((len(targets_weighted) == 0) or (len(exist_p) == 0)):
loss_e = exist_p.sum()
else:
loss_e = F.cross_entropy(exist_p, targets_weighted, reduction='sum')
N = num_pos.data.sum().float()
return (((loss_l / N) / num_frames), ((2 * loss_c) / N), ((loss_e / N) / num_frames)) |
class Dataset(InMemoryDataset):
def __init__(self, root, dataset, rating_file, sep, args, transform=None, pre_transform=None):
self.path = root
self.dataset = dataset
self.rating_file = rating_file
self.sep = sep
self.store_backup = True
self.args = args
super(Dataset, self).__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
self.stat_info = torch.load(self.processed_paths[1])
self.data_num = self.stat_info['data_num']
self.feature_num = self.stat_info['feature_num']
def raw_file_names(self):
return ['{}{}/user_dict.pkl'.format(self.path, self.dataset), '{}{}/item_dict.pkl'.format(self.path, self.dataset), '{}{}/feature_dict.pkl'.format(self.path, self.dataset), '{}{}/{}'.format(self.path, self.dataset, self.rating_file)]
def processed_file_names(self):
return ['{}/{}.dataset'.format(self.dataset, self.dataset), '{}/{}.statinfo'.format(self.dataset, self.dataset)]
def download(self):
pass
def data_2_graphs(self, ratings_df, dataset='train'):
graphs = []
processed_graphs = 0
num_graphs = ratings_df.shape[0]
one_per = int((num_graphs / 1000))
percent = 0.0
for i in range(len(ratings_df)):
if ((processed_graphs % one_per) == 0):
print(f'Processing [{dataset}]: {(percent / 10.0)}%, {processed_graphs}/{num_graphs}', end='\r')
percent += 1
processed_graphs += 1
line = ratings_df.iloc[i]
user_index = self.user_key_type(line[0])
item_index = self.item_key_type(line[1])
rating = int(line[2])
if ((item_index not in self.item_dict) or (user_index not in self.user_dict)):
error_num += 1
continue
user_id = self.user_dict[user_index]['name']
item_id = self.item_dict[item_index]['title']
user_attr_list = self.user_dict[user_index]['attribute']
item_attr_list = self.item_dict[item_index]['attribute']
user_list = ([user_id] + user_attr_list)
item_list = ([item_id] + item_attr_list)
graph = self.construct_graphs(user_list, item_list, rating)
graphs.append(graph)
print()
return graphs
def read_data(self):
self.user_dict = pickle.load(open(self.userfile, 'rb'))
self.item_dict = pickle.load(open(self.itemfile, 'rb'))
self.user_key_type = type(list(self.user_dict.keys())[0])
self.item_key_type = type(list(self.item_dict.keys())[0])
feature_dict = pickle.load(open(self.featurefile, 'rb'))
data = []
error_num = 0
ratings_df = pd.read_csv(self.ratingfile, sep=self.sep, header=None)
(train_df, test_df) = train_test_split(ratings_df, test_size=0.4, random_state=self.args.random_seed, stratify=ratings_df[[0, 2]])
(test_df, valid_df) = train_test_split(test_df, test_size=0.5, random_state=self.args.random_seed, stratify=test_df[[0, 2]])
if self.store_backup:
backup_path = f'{self.path}{self.dataset}/split_data_backup/'
if (not os.path.exists(backup_path)):
os.mkdir(backup_path)
train_df.to_csv(f'{backup_path}train_data.csv', index=False)
valid_df.to_csv(f'{backup_path}valid_data.csv', index=False)
test_df.to_csv(f'{backup_path}test_data.csv', index=False)
print('(Only run at the first time training the dataset)')
train_graphs = self.data_2_graphs(train_df, dataset='train')
valid_graphs = self.data_2_graphs(valid_df, dataset='valid')
test_graphs = self.data_2_graphs(test_df, dataset='test')
graphs = ((train_graphs + valid_graphs) + test_graphs)
stat_info = {}
stat_info['data_num'] = len(graphs)
stat_info['feature_num'] = len(feature_dict)
stat_info['train_test_split_index'] = [len(train_graphs), (len(train_graphs) + len(valid_graphs))]
print('error number of data:', error_num)
return (graphs, stat_info)
def construct_graphs(self, user_list, item_list, rating):
u_n = len(user_list)
i_n = len(item_list)
inner_edge_index = [[], []]
for i in range(u_n):
for j in range(i, u_n):
inner_edge_index[0].append(i)
inner_edge_index[1].append(j)
for i in range(u_n, (u_n + i_n)):
for j in range(i, (u_n + i_n)):
inner_edge_index[0].append(i)
inner_edge_index[1].append(j)
outer_edge_index = [[], []]
for i in range(u_n):
for j in range(i_n):
outer_edge_index[0].append(i)
outer_edge_index[1].append((u_n + j))
inner_edge_index = torch.LongTensor(inner_edge_index)
inner_edge_index = to_undirected(inner_edge_index)
outer_edge_index = torch.LongTensor(outer_edge_index)
outer_edge_index = to_undirected(outer_edge_index)
graph = self.construct_graph((user_list + item_list), inner_edge_index, outer_edge_index, rating)
return graph
def construct_graph(self, node_list, edge_index_inner, edge_index_outer, rating):
x = torch.LongTensor(node_list).unsqueeze(1)
rating = torch.FloatTensor([rating])
return Data(x=x, edge_index=edge_index_inner, edge_attr=torch.transpose(edge_index_outer, 0, 1), y=rating)
def process(self):
self.userfile = self.raw_file_names[0]
self.itemfile = self.raw_file_names[1]
self.featurefile = self.raw_file_names[2]
self.ratingfile = self.raw_file_names[3]
(graphs, stat_info) = self.read_data()
if (not os.path.exists(f'{self.path}processed/{self.dataset}')):
os.mkdir(f'{self.path}processed/{self.dataset}')
(data, slices) = self.collate(graphs)
torch.save((data, slices), self.processed_paths[0])
torch.save(stat_info, self.processed_paths[1])
def feature_N(self):
return self.feature_num
def data_N(self):
return self.data_num |
def cache_temp_view(df: SparkDataFrame, name: str) -> None:
spark = State().session
df.createOrReplaceTempView(name)
spark.sql(f'cache table {name}') |
_start_docstrings('CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer\n on top of the pooled output) e.g. for GLUE tasks. ', CAMEMBERT_START_DOCSTRING)
class TFCamembertForSequenceClassification(TFRobertaForSequenceClassification):
config_class = CamembertConfig |
def test_onnx():
model = tract.onnx().model_for_path('./mobilenetv2-7.onnx').into_optimized().into_runnable()
result = model.run([grace_hopper_1x3x224x244()])
confidences = result[0].to_numpy()
assert (numpy.argmax(confidences) == 652) |
class BMType(Enum):
KML_SEQ = 0
KML_RAND = 1
VAN_SEQ = 2
VAN_RAND = 3
RA = 4
KML_SEQ_MAJ = 5
KML_RAND_MAJ = 6
VAN_SEQ_MAJ = 7
VAN_RAND_MAJ = 8 |
def relation_matrix_wtk_g0(syms, sign, field, sparse):
rels = modS_relations(syms)
if (sign != 0):
rels.update(modI_relations(syms, sign))
rels = sorted(rels)
if (syms._apply_S_only_0pm1() and is_RationalField(field)):
from . import relation_matrix_pyx
mod = relation_matrix_pyx.sparse_2term_quotient_only_pm1(rels, len(syms))
else:
mod = sparse_2term_quotient(rels, len(syms), field)
R = T_relation_matrix_wtk_g0(syms, mod, field, sparse)
return (R, mod) |
def readEduDoc(fedu, doc):
if (not os.path.isfile(fedu)):
raise IOError("File doesn't exist: {}".format(fedu))
(gidx, eidx, tokendict, edudict) = (0, 1, {}, {})
with open(fedu, 'r') as fin:
for line in fin:
line = line.strip()
if (len(line) == 0):
continue
eduTxt = line
edudict[eidx] = []
tokens = TOKENIZER.tokenize(line)
for tok in tokens:
tokendict[gidx] = tok
edudict[eidx].append(gidx)
gidx += 1
eidx += 1
doc.tokendict = tokendict
doc.edudict = edudict
return doc |
def load_triples_from_txt(filename, words_indexes=None, parse_line=parse_line):
if (words_indexes == None):
words_indexes = dict()
entities = set()
next_ent = 0
else:
entities = set(words_indexes)
next_ent = (max(words_indexes.values()) + 1)
data = dict()
with open(filename) as f:
lines = f.readlines()
for (_, line) in enumerate(lines):
(sub, obj, rel, val) = parse_line(line)
if (sub in entities):
sub_ind = words_indexes[sub]
else:
sub_ind = next_ent
next_ent += 1
words_indexes[sub] = sub_ind
entities.add(sub)
if (rel in entities):
rel_ind = words_indexes[rel]
else:
rel_ind = next_ent
next_ent += 1
words_indexes[rel] = rel_ind
entities.add(rel)
if (obj in entities):
obj_ind = words_indexes[obj]
else:
obj_ind = next_ent
next_ent += 1
words_indexes[obj] = obj_ind
entities.add(obj)
data[(sub_ind, rel_ind, obj_ind)] = val
indexes_words = {}
for tmpkey in words_indexes:
indexes_words[words_indexes[tmpkey]] = tmpkey
return (data, words_indexes, indexes_words) |
def permute_map(map_entry: nodes.MapEntry, perm: List[int]):
map_entry.map.params = [map_entry.map.params[p] for p in perm]
map_entry.map.range = [map_entry.map.range[p] for p in perm] |
def adaptive_avg_pool2d(input, output_size):
if ((input.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size) |
def get_sources(module, surfix='*.c*'):
src_dir = osp.join(*module.split('.'), 'src')
cuda_dir = osp.join(src_dir, 'cuda')
cpu_dir = osp.join(src_dir, 'cpu')
return ((glob(osp.join(src_dir, surfix)) + glob(osp.join(cuda_dir, surfix))) + glob(osp.join(cpu_dir, surfix))) |
_model
def skresnet18(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['skresnet18']
sk_kwargs = dict(min_attn_channels=16, attn_reduction=8, split_input=True)
model = ResNet(SelectiveKernelBasic, [2, 2, 2, 2], num_classes=num_classes, in_chans=in_chans, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class BridgeTowerTextConfig(PretrainedConfig):
model_type = 'bridgetower_text_model'
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, initializer_factor=1, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_factor = initializer_factor
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'bridgetower'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def plot_results(graph, params, results):
plt.figure(figsize=(6.4, 4.8))
title = '{}_epidemic:diffusion={},method={},k={}'.format(params['model'], params['diffusion'], params['method'], params['k'])
for (strength, result) in results.items():
result_norm = [(r / len(graph)) for r in result]
plt.plot(result_norm, label='Effective strength: {}'.format(strength))
plt.xlabel('Steps')
plt.ylabel('Infected Nodes')
plt.legend()
plt.yscale('log')
plt.ylim(0.001, 1)
plt.title(title)
save_dir = (((os.getcwd() + '/plots/') + title) + '/')
os.makedirs(save_dir, exist_ok=True)
plt.savefig(((save_dir + title) + '.pdf'))
plt.show()
plt.clf() |
class PlotReconstructionCallback(tfk.callbacks.Callback):
def __init__(self, logdir: str, test_ds: tf.data.Dataset, nex: int=4):
super(PlotReconstructionCallback, self).__init__()
logdir = os.path.join(logdir, 'reconstructions')
self.file_writer = tf.summary.create_file_writer(logdir=logdir)
self.nex = nex
self.test_ds = test_ds.map((lambda x, y: x)).unbatch().batch(nex)
self.test_it = iter(self.test_ds)
def get_next_images(self):
try:
next_images = next(self.test_it)
except StopIteration:
self.test_it = iter(self.test_ds)
next_images = next(self.test_it)
return next_images
def plot_img_reconstruction(self, image, reconstruction):
(fig, ax) = plt.subplots(nrows=1, ncols=2)
if (image.shape[(- 1)] == 1):
image = tf.squeeze(image, axis=(- 1))
reconstruction = tf.squeeze(reconstruction, axis=(- 1))
ax[0].imshow(image, vmin=0.0, vmax=1.0, cmap=plt.cm.Greys)
ax[0].set_title('Image')
ax[0].axis('off')
ax[1].imshow(reconstruction, vmin=0.0, vmax=1.0, cmap=plt.cm.Greys)
ax[1].set_title('Reconstruction')
ax[1].axis('off')
return fig
def on_epoch_end(self, epoch, logs=None):
images = self.get_next_images()
reconstructions = self.model(images)
imgs = []
for i in range(self.nex):
fig = self.plot_img_reconstruction(images[i], reconstructions[i])
imgs.append(plot_to_image(fig))
imgs = tf.concat(imgs, axis=0)
with self.file_writer.as_default():
tf.summary.image(name='Reconstructions', data=imgs, step=epoch, max_outputs=self.nex) |
class NanDetector():
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.named_parameters = list(model.named_parameters())
self.reset()
for (name, mod) in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
norm = {}
gradients = {}
for (name, param) in self.named_parameters:
if (param.grad is not None):
grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32)
norm[name] = grad_norm.item()
if (torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any()):
gradients[name] = param.grad.data
if (len(gradients) > 0):
logger.info('Detected nan/inf grad norm, dumping norms...')
logger.info(f'norms: {norm}')
logger.info(f'gradients: {gradients}')
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (torch.is_floating_point(tensor) and (tensor.numel() >= 2)):
with torch.no_grad():
if torch.isnan(tensor).any():
err = 'NaN'
elif torch.isinf(tensor).any():
err = 'Inf'
if (err is not None):
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {('backward' if backward else 'forward')}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if (isinstance(inp, tuple) and (len(inp) > 0)):
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if (err is not None):
if (torch.is_tensor(inp) and (not backward)):
err += f' input max: {inp.max().item()}, input min: {inp.min().item()}'
has_printed_attr = ('has_printed_b' if backward else 'has_printed_f')
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif (isinstance(x, list) or isinstance(x, tuple)):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if (not self.has_printed_f):
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if (not self.has_printed_b):
self._apply(module, inp, output, backward=True)
def close(self):
for hook in (self.fhooks + self.bhooks):
hook.remove() |
class InceptionV3(nn.Module):
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {64: 0, 192: 1, 768: 2, 2048: 3}
def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False, use_fid_inception=True):
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert (self.last_needed_block <= 3), 'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = _inception_v3(pretrained=True)
block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block0))
if (self.last_needed_block >= 1):
block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block1))
if (self.last_needed_block >= 2):
block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]
self.blocks.append(nn.Sequential(*block2))
if (self.last_needed_block >= 3):
block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)
if self.normalize_input:
x = ((2 * x) - 1)
for (idx, block) in enumerate(self.blocks):
x = block(x)
if (idx in self.output_blocks):
outp.append(x)
if (idx == self.last_needed_block):
break
return outp |
class ImageDataset(Dataset):
def __init__(self, tokenizer=None, width: int=256, height: int=256, base_width: int=256, base_height: int=256, use_caption: bool=False, image_dir: str='', single_img_prompt: str='', use_bucketing: bool=False, fallback_prompt: str='', **kwargs):
self.tokenizer = tokenizer
self.img_types = ('.png', '.jpg', '.jpeg', '.bmp')
self.use_bucketing = use_bucketing
self.image_dir = self.get_images_list(image_dir)
self.fallback_prompt = fallback_prompt
self.use_caption = use_caption
self.single_img_prompt = single_img_prompt
self.width = width
self.height = height
def get_images_list(self, image_dir):
if os.path.exists(image_dir):
imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]
full_img_dir = []
for img in imgs:
full_img_dir.append(f'{image_dir}/{img}')
return sorted(full_img_dir)
return ['']
def image_batch(self, index):
train_data = self.image_dir[index]
img = train_data
try:
img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)
except:
img = T.transforms.PILToTensor()(Image.open(img).convert('RGB'))
width = self.width
height = self.height
if self.use_bucketing:
(_, h, w) = img.shape
(width, height) = sensible_buckets(width, height, w, h)
resize = T.transforms.Resize((height, width), antialias=True)
img = resize(img)
img = repeat(img, 'c h w -> f c h w', f=16)
prompt = get_text_prompt(file_path=train_data, text_prompt=self.single_img_prompt, fallback_prompt=self.fallback_prompt, ext_types=self.img_types, use_caption=True)
prompt_ids = get_prompt_ids(prompt, self.tokenizer)
return (img, prompt, prompt_ids)
def __getname__():
return 'image'
def __len__(self):
if os.path.exists(self.image_dir[0]):
return len(self.image_dir)
else:
return 0
def __getitem__(self, index):
(img, prompt, prompt_ids) = self.image_batch(index)
example = {'pixel_values': ((img / 127.5) - 1.0), 'prompt_ids': prompt_ids[0], 'text_prompt': prompt, 'dataset': self.__getname__()}
return example |
def skip(splits, save_folder, conf):
skip = True
split_files = {'train': TRAIN_JSON, 'valid': VALID_JSON, 'valid_small': VALID_SMALL, 'test': TEST_JSON}
for split in splits:
if (not (save_folder / split_files[split]).exists()):
skip = False
code_folder = (save_folder / 'codes')
if (not code_folder.exists()):
skip = False
save_opt = (save_folder / OPT_FILE)
if (skip is True):
if save_opt.is_file():
opts_old = load_pkl(save_opt.as_posix())
if (opts_old == conf):
skip = True
else:
skip = False
else:
skip = False
return skip |
class TransformBase(metaclass=AutodocABCMeta):
def __init__(self):
self.inversion_state = None
def proper_inversion(self):
return False
def requires_inversion_state(self):
return True
def identity_inversion(self):
return (not self.requires_inversion_state)
def to_dict(self):
state = {'name': type(self).__name__}
for k in inspect.signature(self.__init__).parameters:
v = getattr(self, k)
state[k] = (v.name if isinstance(v, Enum) else deepcopy(v))
return state
def from_dict(cls, state: dict):
return cls(**state)
def __getstate__(self):
return {k: v for (k, v) in self.to_dict().items() if (k != 'name')}
def __setstate__(self, state):
self.__init__(**state)
def train(self, time_series: TimeSeries):
raise NotImplementedError
def __call__(self, time_series: TimeSeries) -> TimeSeries:
raise NotImplementedError
def invert(self, time_series: TimeSeries, retain_inversion_state=False) -> TimeSeries:
if (not self.proper_inversion):
logger.warning(f'Transform {self} is not strictly invertible. Calling invert() is not guaranteed to recover the original time series exactly!')
if (self.requires_inversion_state and (self.inversion_state is None)):
raise RuntimeError('Inversion state not set. Please call this transform on an input time series before calling invert(). If you are trying to call invert() a second time, please supply the option `retain_inversion_state=True` to the first call.')
inverted = self._invert(time_series)
if (not retain_inversion_state):
self.inversion_state = None
return inverted
def _invert(self, time_series: TimeSeries) -> TimeSeries:
return time_series
def __repr__(self):
kwargs = self.to_dict()
name = kwargs.pop('name')
kwargs_str = ', '.join((f'{k}={v}' for (k, v) in sorted(kwargs.items())))
return f'{name}({kwargs_str})' |
def test_pair_tuple_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, ((1, 2),))
assert_array_equal(out[0], [6, 7])
assert_array_equal(out[(- 1)], [31, 32])
assert_equal(out.shape, (6, 2)) |
def _make_taus(batch_size: int, n_quantiles: int, training: bool, device: torch.device) -> torch.Tensor:
if training:
taus = torch.rand(batch_size, n_quantiles, device=device)
else:
taus = torch.linspace(start=0, end=1, steps=n_quantiles, device=device, dtype=torch.float32)
taus = taus.view(1, (- 1)).repeat(batch_size, 1)
return taus |
def load_rgb(path, downscale=1):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
if (downscale != 1):
img = rescale(img, (1.0 / downscale), anti_aliasing=False, channel_axis=(- 1))
return img
dataset_dir = '/data/chenziyu/myprojects/PanoData'
root_dir = '/data/chenziyu/myprojects/PanoData/my_sun360'
mask_path = '/data/chenziyu/myprojects/OmniDreamer/assets/90binarymask.png'
split = ['train', 'val', 'test']
train_num = 33000
test_num = 1260
downscale = 2
out_dir = '{}/BLIP_sun360_d{:01}_t{:05}_v{:05}'.format(dataset_dir, downscale, train_num, test_num)
source_dir = os.path.join(out_dir, 'source')
target_dir = os.path.join(out_dir, 'target')
os.makedirs(source_dir, exist_ok=True)
os.makedirs(target_dir, exist_ok=True)
path_dict = sorted([y for x in os.walk(root_dir) for y in glob.glob(os.path.join(x[0], '*.jpg'))])
mask = load_rgb(mask_path, downscale=downscale)
train_data = []
test_data = []
processor = Blip2Processor.from_pretrained('Salesforce/blip2-opt-2.7b')
model = Blip2ForConditionalGeneration.from_pretrained('Salesforce/blip2-opt-2.7b', torch_dtype=torch.float16)
model.to('cuda')
a = 1
for idx in tqdm(range(0, (train_num + test_num))):
try:
image = load_rgb(path_dict[idx], downscale=downscale)
image_BLIP = Image.open(path_dict[idx])
inputs = processor(images=image_BLIP, return_tensors='pt').to('cuda', torch.float16)
generated_ids = model.generate(**inputs)
generated_prompt = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
source = np.multiply(image, mask)
target = np.clip((image * 255), 0, 255).astype(np.uint8)
source = np.clip((source * 255), 0, 255).astype(np.uint8)
if (idx < train_num):
source_path = '{}/train_{:05}.jpg'.format(source_dir, idx)
target_path = '{}/train_{:05}.jpg'.format(target_dir, idx)
image_dict = {'source': '{}/train_{:05}.jpg'.format('source', idx), 'target': '{}/train_{:05}.jpg'.format('target', idx), 'prompt': generated_prompt}
train_data.append(image_dict)
elif ((idx >= train_num) and (idx < (train_num + test_num))):
source_path = '{}/test_{:05}.jpg'.format(source_dir, idx)
target_path = '{}/test_{:05}.jpg'.format(target_dir, idx)
image_dict = {'source': '{}/test_{:05}.jpg'.format('source', idx), 'target': '{}/test_{:05}.jpg'.format('target', idx), 'prompt': generated_prompt}
test_data.append(image_dict)
else:
break
cv2.imwrite(source_path, cv2.cvtColor(source, cv2.COLOR_RGB2BGR))
cv2.imwrite(target_path, cv2.cvtColor(target, cv2.COLOR_RGB2BGR))
except:
continue
train_json_path = '{}/train.json'.format(out_dir)
test_json_path = '{}/test.json'.format(out_dir)
with open(train_json_path, 'w') as out_file:
json.dump(train_data, out_file, sort_keys=True, indent=4, ensure_ascii=False)
with open(test_json_path, 'w') as out_file:
json.dump(test_data, out_file, sort_keys=True, indent=4, ensure_ascii=False) |
def at_loss(forward, x, y, train=True, epsilon=8.0):
ce = cross_entropy(forward(x, train=train, update_batch_stats=False), y)
ce.backward()
d = x.grad
xp = cuda.get_array_module(x.data)
d = get_normalized_vector(d, xp)
x_adv = (x + (epsilon * d))
return cross_entropy(forward(x_adv, train=train, update_batch_stats=False), y) |
def train_val_split(base_dataset: torchvision.datasets.CIFAR10):
num_classes = 10
base_dataset = np.array(base_dataset)
train_n = int(((len(base_dataset) * 0.9) / num_classes))
train_idxs = []
val_idxs = []
for i in range(num_classes):
idxs = np.where((base_dataset == i))[0]
np.random.shuffle(idxs)
train_idxs.extend(idxs[:])
val_idxs.extend(idxs[train_n:])
np.random.shuffle(train_idxs)
np.random.shuffle(val_idxs)
return (train_idxs, val_idxs) |
class TreeBeam(object):
def __init__(self, size, cuda, vocabs, rnn_size):
self.size = size
self.vocabs = vocabs
self.tt = (torch.cuda if cuda else torch)
self.rnn_size = rnn_size
self.scores = self.tt.FloatTensor(size).zero_()
self.prevKs = []
self.nextYs = [self.tt.LongTensor(self.size).fill_(self.vocabs['next_rules'].stoi['<blank>'])]
self.valid = [[0]]
self.nextYs[0][0] = self.vocabs['prev_rules'].stoi['<s>']
self.eosTop = False
self.attn = []
self.finished = []
self.stacks = [[('MemberDeclaration', '<s>', Variable(self.tt.FloatTensor(1, 1, self.rnn_size).zero_(), requires_grad=False))] for i in range(0, self.size)]
def getCurrentState(self):
batch = {'nt': self.tt.LongTensor(self.size, 1), 'prev_rules': self.tt.LongTensor(self.size, 1), 'parent_rules': self.tt.LongTensor(self.size, 1), 'parent_states': {}}
for i in range(0, len(self.nextYs[(- 1)])):
if (len(self.prevKs) == 0):
rule = '<s>'
elif (self.nextYs[(- 1)][i] >= len(self.vocabs['next_rules'])):
rule = '<unk>'
else:
rule = self.vocabs['next_rules'].itos[self.nextYs[(- 1)][i]]
if (len(self.stacks[i]) == 0):
(nt, parent_rule, parent_state) = ('MemberDeclaration', '<s>', Variable(self.tt.FloatTensor(1, 1, self.rnn_size).zero_(), requires_grad=False))
else:
(nt, parent_rule, parent_state) = self.stacks[i][(- 1)]
batch['nt'][i][0] = self.vocabs['nt'].stoi[nt]
batch['prev_rules'][i][0] = self.vocabs['prev_rules'].stoi[CDDataset.getAnonRule(rule)]
batch['parent_rules'][i][0] = self.vocabs['prev_rules'].stoi[parent_rule]
batch['parent_states'][i] = {}
batch['parent_states'][i][0] = parent_state
return batch
def getCurrentOrigin(self):
return self.prevKs[(- 1)]
def advance(self, wordLk, attnOut, rnn_output):
numWords = wordLk.size(1)
if (len(self.prevKs) > 0):
beamLk = (wordLk + self.scores.unsqueeze(1).expand_as(wordLk))
for i in range(self.nextYs[(- 1)].size(0)):
if (len(self.stacks[i]) == 0):
beamLk[i] = (- 1e+20)
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view((- 1))
(bestScores, bestScoresId) = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
oldStacks = self.stacks
self.stacks = [[] for i in range(0, self.size)]
prevK = (bestScoresId / numWords)
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - (prevK * numWords)))
self.attn.append(attnOut.index_select(0, prevK))
self.stacks = [copy.deepcopy(oldStacks[k]) for k in prevK]
for i in range(0, self.size):
currentRule = (bestScoresId[i] - (prevK[i] * numWords))
if (currentRule >= len(self.vocabs['next_rules'])):
rule = '<unk>'
else:
rule = self.vocabs['next_rules'].itos[currentRule]
try:
self.stacks[i].pop()
except:
pass
if (not CDDataset._is_terminal_rule(rule)):
if (rule != '<blank>'):
for elem in rhs(rule).split('___')[::(- 1)]:
if elem[0].isupper():
self.stacks[i].append((elem, rule, rnn_output[prevK[i]].unsqueeze(0)))
for i in range(self.nextYs[(- 1)].size(0)):
if (len(self.stacks[i]) == 0):
s = self.scores[i]
self.finished.append((s, (len(self.nextYs) - 1), i))
if (len(self.stacks[0]) == 0):
self.eosTop = True
def done(self):
return (self.eosTop and (len(self.finished) >= 1))
def getFinal(self):
if (len(self.finished) == 0):
self.finished.append((self.scores[0], (len(self.nextYs) - 1), 0))
self.finished.sort(key=(lambda a: (- a[0])))
return self.finished[0]
def getHyp(self, timestep, k):
(hyp, attn) = ([], [])
for j in range((len(self.prevKs[:timestep]) - 1), (- 1), (- 1)):
hyp.append(self.nextYs[(j + 1)][k])
attn.append(self.attn[j][k])
k = self.prevKs[j][k]
return (hyp[::(- 1)], torch.stack(attn[::(- 1)])) |
class DNNLowPElementwiseLinearOpTest(hu.HypothesisTestCase):
(N=st.integers(32, 256), D=st.integers(32, 256), empty_batch=st.booleans(), in_quantized=st.booleans(), out_quantized=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_linear_int(self, N, D, empty_batch, in_quantized, out_quantized, gc, dc):
if empty_batch:
N = 0
min_ = (- 100)
max_ = (min_ + 255)
X = np.round(((np.random.rand(N, D) * (max_ - min_)) + min_))
X = X.astype(np.float32)
if (N != 0):
X[(0, 0)] = min_
X[(0, 1)] = max_
a = np.round(((np.random.rand(D) * 255) - 128)).astype(np.float32)
a[0] = (- 128)
a[1] = 127
b = np.round(((np.random.rand(D) * 255) - 128)).astype(np.float32)
b[0] = (- 128)
b[1] = 127
Output = collections.namedtuple('Output', ['Y', 'op_type', 'engine'])
outputs = []
op_engine_list = [('ElementwiseLinear', ''), ('ElementwiseLinear', 'DNNLOWP'), ('Int8ElementwiseLinear', 'DNNLOWP')]
for (op_type, engine) in op_engine_list:
net = core.Net('test_net')
do_quantize = (('DNNLOWP' in engine) and in_quantized)
do_dequantize = (('DNNLOWP' in engine) and out_quantized)
if do_quantize:
quantize = core.CreateOperator('Quantize', ['X'], ['X_q'], engine=engine, device_option=gc)
net.Proto().op.extend([quantize])
eltwise_linear = core.CreateOperator(op_type, [('X_q' if do_quantize else 'X'), 'a', 'b'], [('Y_q' if do_dequantize else 'Y')], dequantize_output=(not do_dequantize), engine=engine, device_option=gc)
net.Proto().op.extend([eltwise_linear])
if do_dequantize:
dequantize = core.CreateOperator('Dequantize', ['Y_q'], ['Y'], engine=engine, device_option=gc)
net.Proto().op.extend([dequantize])
self.ws.create_blob('X').feed(X, device_option=gc)
self.ws.create_blob('a').feed(a, device_option=gc)
self.ws.create_blob('b').feed(b, device_option=gc)
self.ws.run(net)
outputs.append(Output(Y=self.ws.blobs['Y'].fetch(), op_type=op_type, engine=engine))
check_quantized_results_close(outputs) |
def get_inverse_square_root_decay(optimizer, num_warmup_steps=0, last_epoch=(- 1)):
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
else:
return ((num_warmup_steps / current_step) ** 0.5)
return LambdaLR(optimizer, lr_lambda, last_epoch) |
class _TestTorchSubModelRaisingException(torch.nn.Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.lin = torch.nn.Linear(in_features, out_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.lin(x)
if (int('1') == 1):
raise _DemoException('uh')
return x |
def gen(n=2, repeatParts=False, separator='-', lists=(LEFT, RIGHT)):
name = []
for word in lists[:n]:
part = None
while ((not part) or ((part in name) and (not repeatParts))):
part = random.choice(word)
name.append(part)
return separator.join(name) |
def build_backbone(cfg):
backbone_cfg = deepcopy(cfg)
name = backbone_cfg.pop('name')
if (name == 'VGG'):
return VGG(**backbone_cfg)
elif (name == 'ResNet'):
return ResNet(**backbone_cfg)
elif (name == 'ResNeXt'):
return ResNeXt(**backbone_cfg)
elif (name == 'WideResNet'):
return WideResNet(**backbone_cfg)
elif (name == 'SqueezeNet'):
return SqueezeNet(**backbone_cfg)
elif (name == 'MobileNetV2'):
return MobileNetV2(**backbone_cfg)
elif (name == 'MobileNetV3'):
return MobileNetV3(**backbone_cfg)
elif (name == 'ShuffleNetV2'):
return ShuffleNetV2(**backbone_cfg)
elif (name == 'EfficientNet'):
return EfficientNet(**backbone_cfg)
elif (name == 'RegNet'):
return RegNet(**backbone_cfg)
elif (name == 'ConvNeXt'):
return ConvNeXt(**backbone_cfg)
elif (name == 'VisionTransformer'):
return VisionTransformer(**backbone_cfg)
elif (name == 'STDCNet'):
return STDCNet(**backbone_cfg)
elif (name == 'MSCAN'):
return MSCAN(**backbone_cfg)
elif (name == 'LSPNetBackbone'):
return LSPNetBackbone(**backbone_cfg)
elif (name == 'EfficientNetLite'):
return EfficientNetLite(**backbone_cfg)
elif (name == 'TopFormerBackbone'):
return TopFormerBackbone(**backbone_cfg)
elif (name == 'MixVisionTransformer'):
return MixVisionTransformer(**backbone_cfg)
elif (name == 'IncepTransformer'):
return IncepTransformer(**backbone_cfg)
elif (name == 'EfficientNetLite'):
return EfficientNetLite(**backbone_cfg)
elif (name == 'CustomCspNet'):
return CustomCspNet(**backbone_cfg)
elif (name == 'CSPDarknet'):
return CSPDarknet(**backbone_cfg)
elif (name == 'SGCPNetBackbone'):
return SGCPNetBackbone(**backbone_cfg)
elif (name == 'LFDResNet'):
return LFDResNet(**backbone_cfg)
elif (name == 'YOLOv5CSPDarknet'):
return YOLOv5CSPDarknet(**backbone_cfg)
elif (name == 'YOLOXCSPDarknet'):
return YOLOXCSPDarknet(**backbone_cfg)
elif (name == 'YOLOv6EfficientRep'):
return YOLOv6EfficientRep(**backbone_cfg)
elif (name == 'YOLOv7CSPVoVNet'):
return YOLOv7CSPVoVNet(**backbone_cfg)
elif (name == 'YOLOXPAIEfficientRep'):
return YOLOXPAIEfficientRep(**backbone_cfg)
elif (name == 'RepVGG'):
return RepVGG(**backbone_cfg)
elif (name == 'RegSegBackbone'):
return RegSegBackbone(**backbone_cfg)
else:
raise NotImplementedError(name) |
class JsonLoader(Loader):
def __init__(self, fields=None, dropna=False):
super(JsonLoader, self).__init__()
self.dropna = dropna
self.fields = None
self.fields_list = None
if fields:
self.fields = {}
for (k, v) in fields.items():
self.fields[k] = (k if (v is None) else v)
self.fields_list = list(self.fields.keys())
def _load(self, path):
ds = DataSet()
for (idx, d) in _read_json(path, fields=self.fields_list, dropna=self.dropna):
if self.fields:
ins = {self.fields[k]: v for (k, v) in d.items()}
else:
ins = d
ds.append(Instance(**ins))
return ds |
class TFDebertaV2PreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def group_norm(x, num_groups, weight=None, bias=None, eps=1e-05):
input_shape = x.shape
ndim = len(input_shape)
(N, C) = input_shape[:2]
G = num_groups
assert ((C % G) == 0), 'input channel dimension must divisible by number of groups'
x = x.view(N, G, (- 1))
mean = x.mean((- 1), keepdim=True)
var = x.var((- 1), keepdim=True)
x = ((x - mean) / (var + eps).sqrt())
x = x.view(input_shape)
view_shape = ((1, (- 1)) + ((1,) * (ndim - 2)))
if (weight is not None):
return ((x * weight.view(view_shape)) + bias.view(view_shape))
return x |
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
(yield)
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger |
class ConcatModel(torch.nn.Module):
def __init__(self):
super(ConcatModel, self).__init__()
def forward(self, x):
return torch.concat([x, x]) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, dilation=1):
super(BasicBlock, self).__init__()
out_channels = (self.expansion * channels)
self.conv1 = nn.Conv2d(in_channels, channels, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(channels)
if ((stride != 1) or (in_channels != out_channels)):
conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
bn = nn.BatchNorm2d(out_channels)
self.downsample = nn.Sequential(conv, bn)
else:
self.downsample = nn.Sequential()
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = (out + self.downsample(x))
out = F.relu(out)
return out |
class ROIHeadsPatcher():
def __init__(self, cfg, heads):
self.heads = heads
self.use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
def mock_roi_heads(self, tensor_mode=True):
kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__
mask_head_mod = mask_head.BaseMaskRCNNHead.__module__
mock_ctx_managers = [mock_fastrcnn_outputs_inference(tensor_mode=tensor_mode, check=True, box_predictor_type=type(self.heads.box_predictor))]
if getattr(self.heads, 'keypoint_on', False):
mock_ctx_managers += [mock_keypoint_rcnn_inference(tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint)]
if getattr(self.heads, 'mask_on', False):
mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)]
with contextlib.ExitStack() as stack:
for mgr in mock_ctx_managers:
stack.enter_context(mgr)
(yield) |
class CharCNN(nn.Module):
def __init__(self, args):
super(CharCNN, self).__init__()
self.conv1 = nn.Sequential(nn.Conv1d(args.num_features, 256, kernel_size=7, stride=1), nn.ReLU(), nn.MaxPool1d(kernel_size=3, stride=3))
self.conv2 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=7, stride=1), nn.ReLU(), nn.MaxPool1d(kernel_size=3, stride=3))
self.conv3 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=3, stride=1), nn.ReLU())
self.conv4 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=3, stride=1), nn.ReLU())
self.conv5 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=3, stride=1), nn.ReLU())
self.conv6 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=3, stride=1), nn.ReLU(), nn.MaxPool1d(kernel_size=3, stride=3))
self.fc1 = nn.Sequential(nn.Linear(8704, 1024), nn.ReLU(), nn.Dropout(p=args.dropout))
self.fc2 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU(), nn.Dropout(p=args.dropout))
self.fc3 = nn.Linear(1024, 4)
self.log_softmax = nn.LogSoftmax()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.log_softmax(x)
return x |
class ConfigHandler():
args: Namespace
unknown: List[str]
def __call__(self) -> Dict[(str, Any)]:
config = self._obtain_config()
config = merge(config, self.args.merge_files)
sys.argv = ([sys.argv[0]] + self.unknown)
self._edit_config(config)
return config
def _edit_config(self, config):
to_delete = []
for (k, v) in config.items():
if (v is None):
to_delete.append(k)
if (config[k] == ''):
to_delete.append(k)
for k in to_delete:
del config[k] |
class ArrayWrapper():
def __init__(self, array, **kwargs):
self.array = array
def __array_interface__(self):
return self.array.__array_interface__ |
def get_cfl_setup(CFL=None, dt=None):
if ((CFL is None) and (dt is None)):
raise ValueError('Specifiy either CFL or dt in CFL setup')
def setup_cfl_condition(problem):
ts_conf = problem.ts_conf
mesh = problem.domain.mesh
dim = mesh.dim
first_field = list(problem.fields.values())[0]
first_field_name = list(problem.fields.keys())[0]
approx_order = first_field.approx_order
mats = problem.create_materials(['a', 'D'])
try:
velo = problem.conf_materials['material_a__0'].values['val']
max_velo = nm.max(nm.linalg.norm(velo))
except KeyError:
max_velo = 1
try:
diffusion = problem.conf_materials['material_D__0'].values['val']
max_diffusion = nm.max(nm.linalg.norm(diffusion))
except KeyError:
max_diffusion = None
dx = nm.min(problem.domain.mesh.cmesh.get_volumes(dim))
output('Preprocess hook - setup_cfl_condition:...')
output('Approximation order of field {}({}) is {}'.format(first_field_name, first_field.family_name, approx_order))
output('Space divided into {0} cells, {1} steps, step size {2}'.format(mesh.n_el, len(mesh.coors), dx))
if (dt is None):
adv_dt = get_cfl_advection(max_velo, dx, approx_order, CFL)
diff_dt = get_cfl_diffusion(max_diffusion, dx, approx_order, CFL)
_dt = min(adv_dt, diff_dt)
else:
output('CFL coefficient {0} ignored, dt specified directly'.format(CFL))
_dt = dt
tn = int(nm.ceil(((ts_conf.t1 - ts_conf.t0) / _dt)))
dtdx = (_dt / dx)
ts_conf.dt = _dt
ts_conf.n_step = tn
ts_conf.cour = (max_velo * dtdx)
output('Time divided into {0} nodes, {1} steps, step size is {2}'.format((tn - 1), tn, _dt))
output('Courant number c = max(norm(a)) * dt/dx = {0}'.format(ts_conf.cour))
output('Time stepping solver is {}'.format(ts_conf.kind))
output('... CFL setup done.')
return setup_cfl_condition |
class RandomStrVar(TemplateVar):
def __len__(self):
return 1
def _randomize(self):
return ''.join((random.choice((string.ascii_letters + string.digits)) for _ in range(10)))
def __getitem__(self, index):
return self._randomize()
def __iter__(self):
while True:
(yield self._randomize()) |
class TorchMBBatchRLAlgorithm(MBBatchRLAlgorithm):
def to(self, device):
for net in self.trainer.networks:
net.to(device)
for net in self.model_trainer.networks:
net.to(device)
def training_mode(self, mode):
for net in self.trainer.networks:
net.train(mode)
for net in self.model_trainer.networks:
net.train(mode) |
class AuxiliaryHead(nn.Module):
def __init__(self, input_size, C, n_classes):
assert (input_size in [7, 8])
super().__init__()
self.net = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=(input_size - 5), padding=0, count_include_pad=False), nn.Conv2d(C, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, kernel_size=2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.linear = nn.Linear(768, n_classes)
def forward(self, x):
out = self.net(x)
out = out.view(out.size(0), (- 1))
logits = self.linear(out)
return logits |
class MotionSyntheticProtocol(Protocol):
def __init__(self, data_dir='/home/piaozx/liuwen/p300/human_pose/processed'):
super().__init__()
self.data_dir = data_dir
self.processed_dir = os.path.join(data_dir, 'processed')
self.train_ids_file = 'train.txt'
self.test_ids_file = 'val.txt'
self.eval_path = 'MS_protocol.json'
full_eval_path = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'data', 'MS_protocol.json')
self.eval_info = load_json_file(full_eval_path)['val']
self.vid_names = list(self.eval_info.keys())
self._all_vid_smpls = {}
self._all_vid_offsets = {}
self._all_vid_kps = {}
self._num_source = 1
self._load_smpls = False
self._load_kps = False
def __len__(self):
return len(self.vid_names)
def take_images_paths(self, vid_name, start, end):
vid_path = os.path.join(self.processed_dir, vid_name, 'images')
vid_images_paths = glob.glob(os.path.join(vid_path, '*'))
vid_images_paths.sort()
images_paths = vid_images_paths[start:(end + 1)]
return images_paths
def setup(self, num_sources=1, load_smpls=False, load_kps=False):
self._num_source = num_sources
self._load_smpls = load_smpls
self._load_kps = load_kps
def __getitem__(self, item):
num_sources = self._num_source
load_smpls = self._load_smpls
load_kps = self._load_kps
vid_name = self.vid_names[item]
vid_info = self.eval_info[vid_name]
eval_info = dict()
src_vid_smpls = self.get_smpls(vid_name)
src_vid_kps = self.get_kps(vid_name)
src_vid_path = os.path.join(self.processed_dir, vid_name, 'images')
src_img_paths = glob.glob(os.path.join(src_vid_path, '*'))
src_img_paths.sort()
src_img_names = vid_info['s_n'][str(num_sources)]
src_img_ids = [int(t.split('.')[0].split('_')[(- 1)]) for t in src_img_names]
eval_info['source'] = {'s_n': num_sources, 'name': vid_name, 'formated_name': self.format_name(vid_name), 'vid_path': os.path.join(self.processed_dir, vid_name, 'images'), 'images': [src_img_paths[t] for t in src_img_ids], 'smpls': (src_vid_smpls[src_img_ids] if load_smpls else None), 'kps': (src_vid_kps[src_img_ids] if load_kps else None)}
self_imitation = vid_info['self_imitation']
eval_info['self_imitation'] = {'name': self_imitation['target'], 'formated_name': self.format_name(self_imitation['target']), 'images': src_img_paths[self_imitation['range'][0]:(self_imitation['range'][1] + 1)], 'smpls': (src_vid_smpls[self_imitation['range'][0]:(self_imitation['range'][1] + 1)] if load_smpls else None), 'kps': (src_vid_kps[self_imitation['range'][0]:(self_imitation['range'][1] + 1)] if load_kps else None), 'self_imitation': True}
cross_imitation = vid_info['cross_imitation']
target_vid_name = cross_imitation['target']
target_vid_smpls = self.get_smpls(target_vid_name)
target_vid_kps = self.get_kps(target_vid_name)
cross_images_paths = self.take_images_paths(vid_name=target_vid_name, start=cross_imitation['range'][0], end=cross_imitation['range'][1])
eval_info['cross_imitation'] = {'name': target_vid_name, 'formated_name': self.format_name(target_vid_name), 'images': cross_images_paths, 'smpls': (target_vid_smpls[cross_imitation['range'][0]:(cross_imitation['range'][1] + 1)] if load_smpls else None), 'kps': (target_vid_kps[cross_imitation['range'][0]:(cross_imitation['range'][1] + 1)] if load_kps else None), 'self_imitation': False}
eval_info['flag'] = self.take_images_paths(vid_name=vid_name, start=vid_info['flag'][0], end=vid_info['flag'][1])
assert ((cross_imitation['range'][1] - cross_imitation['range'][0]) == (vid_info['flag'][1] - vid_info['flag'][0]))
return eval_info
def get_smpl_path(self, name):
smpl_path = os.path.join(self.processed_dir, name, 'pose_shape.pkl')
return smpl_path
def get_kps_path(self, name):
smpl_path = os.path.join(self.processed_dir, name, 'kps.pkl')
return smpl_path
def get_smpls(self, name):
smpls = None
if (name in self.eval_info):
if (name not in self._all_vid_smpls):
smpl_path = self.get_smpl_path(name)
smpl_data = load_pickle_file(smpl_path)
cams = smpl_data['cams']
thetas = smpl_data['pose']
betas = np.repeat(smpl_data['shape'], cams.shape[0], axis=0)
smpls = np.concatenate([cams, thetas, betas], axis=1)
self._all_vid_smpls[name] = smpls
else:
smpls = self._all_vid_smpls[name]
return smpls
def get_kps(self, name):
kps = None
if (name in self.eval_info):
if (name not in self._all_vid_kps):
kps_path = self.get_kps_path(name)
kps = load_pickle_file(kps_path)
self._all_vid_kps[name] = kps
else:
kps = self._all_vid_kps[name]
return kps
def total_frames(self):
total = 0
for (vid_name, vid_info) in self.eval_info.items():
src_vid = os.path.join(self.processed_dir, vid_name)
length = len(os.listdir(src_vid))
total += length
return total |
def GenerateSM80_Simt_f64(manifest, args):
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_instructions = [MathInstruction([1, 1, 1], DataType.f64, DataType.f64, DataType.f64, OpcodeClass.Simt, MathOperation.multiply_add)]
min_cc = 80
max_cc = 1024
alignment_constraints = [1]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([128, 128, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc), TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc), TileDescription([32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints) |
class DealCorpus(object):
def __init__(self, config):
self.config = config
self.train_corpus = self._read_file(self.config.train_path)
self.val_corpus = self._read_file(self.config.val_path)
self.test_corpus = self._read_file(self.config.test_path)
self._extract_vocab()
self._extract_goal_vocab()
self._extract_outcome_vocab()
print('Loading corpus finished.')
def _read_file(self, path):
with open(path, 'r') as f:
data = f.readlines()
return self._process_dialogue(data)
def _process_dialogue(self, data):
def transform(token_list):
(usr, sys) = ([], [])
ptr = 0
while (ptr < len(token_list)):
turn_ptr = ptr
turn_list = []
while True:
cur_token = token_list[turn_ptr]
turn_list.append(cur_token)
turn_ptr += 1
if (cur_token == EOS):
ptr = turn_ptr
break
all_sent_lens.append(len(turn_list))
if (turn_list[0] == USR):
usr.append(Pack(utt=turn_list, speaker=USR))
elif (turn_list[0] == SYS):
sys.append(Pack(utt=turn_list, speaker=SYS))
else:
raise ValueError('Invalid speaker')
all_dlg_lens.append((len(usr) + len(sys)))
return (usr, sys)
new_dlg = []
all_sent_lens = []
all_dlg_lens = []
for raw_dlg in data:
raw_words = raw_dlg.split()
cur_dlg = []
words = raw_words[(raw_words.index('<dialogue>') + 1):raw_words.index('</dialogue>')]
words += [EOS]
usr_first = True
if (words[0] == SYS):
words = ([USR, BOD, EOS] + words)
usr_first = True
elif (words[0] == USR):
words = ([SYS, BOD, EOS] + words)
usr_first = False
else:
print('FATAL ERROR!!! ({})'.format(words))
exit((- 1))
(usr_utts, sys_utts) = transform(words)
for (usr_turn, sys_turn) in zip(usr_utts, sys_utts):
if usr_first:
cur_dlg.append(usr_turn)
cur_dlg.append(sys_turn)
else:
cur_dlg.append(sys_turn)
cur_dlg.append(usr_turn)
if ((len(usr_utts) - len(sys_utts)) == 1):
cur_dlg.append(usr_utts[(- 1)])
elif ((len(sys_utts) - len(usr_utts)) == 1):
cur_dlg.append(sys_utts[(- 1)])
cur_goal = raw_words[(raw_words.index('<partner_input>') + 1):raw_words.index('</partner_input>')]
if (len(cur_goal) != 6):
print('FATAL ERROR!!! ({})'.format(cur_goal))
exit((- 1))
cur_out = raw_words[(raw_words.index('<output>') + 1):raw_words.index('</output>')]
if (len(cur_out) != 6):
print('FATAL ERROR!!! ({})'.format(cur_out))
exit((- 1))
new_dlg.append(Pack(dlg=cur_dlg, goal=cur_goal, out=cur_out))
print(('Max utt len = %d, mean utt len = %.2f' % (np.max(all_sent_lens), float(np.mean(all_sent_lens)))))
print(('Max dlg len = %d, mean dlg len = %.2f' % (np.max(all_dlg_lens), float(np.mean(all_dlg_lens)))))
return new_dlg
def _extract_vocab(self):
all_words = []
for dlg in self.train_corpus:
for turn in dlg.dlg:
all_words.extend(turn.utt)
vocab_count = Counter(all_words).most_common()
raw_vocab_size = len(vocab_count)
discard_wc = np.sum([c for (t, c) in vocab_count])
print(((('vocab size of train set = %d,\n' % (raw_vocab_size,)) + ('cut off at word %s with frequency = %d,\n' % (vocab_count[(- 1)][0], vocab_count[(- 1)][1]))) + ('OOV rate = %.2f' % ((1 - (float(discard_wc) / len(all_words))),))))
self.vocab = (SPECIAL_TOKENS_DEAL + [t for (t, cnt) in vocab_count if (t not in SPECIAL_TOKENS_DEAL)])
self.vocab_dict = {t: idx for (idx, t) in enumerate(self.vocab)}
self.unk_id = self.vocab_dict[UNK]
global DECODING_MASKED_TOKENS
from string import ascii_letters, digits
letter_set = set(list((ascii_letters + digits)))
vocab_list = [t for (t, cnt) in vocab_count]
masked_words = []
for word in vocab_list:
tmp_set = set(list(word))
if (len((letter_set & tmp_set)) == 0):
masked_words.append(word)
print('Take care of {} special words (masked).'.format(len(masked_words)))
def _extract_goal_vocab(self):
all_goal = []
for dlg in self.train_corpus:
all_goal.extend(dlg.goal)
vocab_count = Counter(all_goal).most_common()
raw_vocab_size = len(vocab_count)
discard_wc = np.sum([c for (t, c) in vocab_count])
print(((('goal vocab size of train set = %d, \n' % (raw_vocab_size,)) + ('cut off at word %s with frequency = %d, \n' % (vocab_count[(- 1)][0], vocab_count[(- 1)][1]))) + ('OOV rate = %.2f' % ((1 - (float(discard_wc) / len(all_goal))),))))
self.goal_vocab = ([UNK] + [g for (g, cnt) in vocab_count])
self.goal_vocab_dict = {t: idx for (idx, t) in enumerate(self.goal_vocab)}
self.goal_unk_id = self.goal_vocab_dict[UNK]
def _extract_outcome_vocab(self):
all_outcome = []
for dlg in self.train_corpus:
all_outcome.extend(dlg.out)
vocab_count = Counter(all_outcome).most_common()
raw_vocab_size = len(vocab_count)
discard_wc = np.sum([c for (t, c) in vocab_count])
print(((('outcome vocab size of train set = %d, \n' % (raw_vocab_size,)) + ('cut off at word %s with frequency = %d, \n' % (vocab_count[(- 1)][0], vocab_count[(- 1)][1]))) + ('OOV rate = %.2f' % ((1 - (float(discard_wc) / len(all_outcome))),))))
self.outcome_vocab = ([UNK] + [o for (o, cnt) in vocab_count])
self.outcome_vocab_dict = {t: idx for (idx, t) in enumerate(self.outcome_vocab)}
self.outcome_unk_id = self.outcome_vocab_dict[UNK]
def get_corpus(self):
id_train = self._to_id_corpus('Train', self.train_corpus)
id_val = self._to_id_corpus('Valid', self.val_corpus)
id_test = self._to_id_corpus('Test', self.test_corpus)
return (id_train, id_val, id_test)
def _to_id_corpus(self, name, data):
results = []
for dlg in data:
if (len(dlg.dlg) < 1):
continue
id_dlg = []
for turn in dlg.dlg:
id_turn = Pack(utt=self._sent2id(turn.utt), speaker=turn.speaker)
id_dlg.append(id_turn)
id_goal = self._goal2id(dlg.goal)
id_out = self._outcome2id(dlg.out)
results.append(Pack(dlg=id_dlg, goal=id_goal, out=id_out))
return results
def _sent2id(self, sent):
return [self.vocab_dict.get(t, self.unk_id) for t in sent]
def _goal2id(self, goal):
return [self.goal_vocab_dict.get(g, self.goal_unk_id) for g in goal]
def _outcome2id(self, outcome):
return [self.outcome_vocab_dict.get(o, self.outcome_unk_id) for o in outcome]
def sent2id(self, sent):
return self._sent2id(sent)
def goal2id(self, goal):
return self._goal2id(goal)
def outcome2id(self, outcome):
return self._outcome2id(outcome)
def id2sent(self, id_list):
return [self.vocab[i] for i in id_list]
def id2goal(self, id_list):
return [self.goal_vocab[i] for i in id_list]
def id2outcome(self, id_list):
return [self.outcome_vocab[i] for i in id_list] |
def test_model(test_set):
device = torch.device(('cuda:0' if args.use_cuda else 'cpu'))
cd_event_model = load_check_point(config_dict['cd_event_model_path'])
cd_entity_model = load_check_point(config_dict['cd_entity_model_path'])
cd_event_model.to(device)
cd_entity_model.to(device)
doc_to_entity_mentions = load_entity_wd_clusters(config_dict)
(_, _) = test_models(test_set, cd_event_model, cd_entity_model, device, config_dict, write_clusters=True, out_dir=args.out_dir, doc_to_entity_mentions=doc_to_entity_mentions, analyze_scores=True)
run_conll_scorer() |
def quadratic_L_function__numerical(n, d, num_terms=1000):
if isinstance(n.parent(), sage.rings.abc.RealField):
R = n.parent()
else:
from sage.rings.real_mpfr import RealField
R = RealField()
if (n < 0):
raise ValueError('the Dirichlet series does not converge here')
d1 = fundamental_discriminant(d)
ans = R.zero()
for i in range(1, num_terms):
ans += R((kronecker_symbol(d1, i) / (R(i) ** n)))
return ans |
def mockenv_context(*remove, **update):
env = os.environ
update = (update or {})
remove = (remove or [])
stomped = ((set(update.keys()) | set(remove)) & set(env.keys()))
update_after = {k: env[k] for k in stomped}
remove_after = frozenset((k for k in update if (k not in env)))
try:
env.update(update)
[env.pop(k, None) for k in remove]
(yield)
finally:
env.update(update_after)
[env.pop(k) for k in remove_after] |
def gcd(*values):
if any(((not isinstance(v, int)) for v in values)):
raise TypeError('value must be integers.')
if any(((v <= 0) for v in values)):
raise ValueError('arguments must be positive.')
if (not values):
raise ValueError('must give at least 1 value.')
if (len(values) == 1):
return values[0]
if (len(values) > 2):
return reduce(gcd, values)
(a, b) = values
while b:
(a, b) = (b, (a % b))
return a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.