code stringlengths 101 5.91M |
|---|
class HeckeTriangleGroup(FinitelyGeneratedMatrixGroup_generic, UniqueRepresentation):
Element = HeckeTriangleGroupElement
def __classcall__(cls, n=3):
if (n == infinity):
n = infinity
else:
n = ZZ(n)
if (n < 3):
raise AttributeError('n has to be infinity or an Integer >= 3.')
return super().__classcall__(cls, n)
def __init__(self, n):
self._n = n
self.element_repr_method('default')
if (n in [3, infinity]):
self._base_ring = ZZ
self._lam = (ZZ.one() if (n == 3) else ZZ(2))
else:
lam_symbolic = coerce_AA((E((2 * n)) + (~ E((2 * n)))))
K = NumberField(self.lam_minpoly(), 'lam', embedding=lam_symbolic)
self._base_ring = K.maximal_order()
self._lam = self._base_ring.gen(1)
T = matrix(self._base_ring, [[1, self._lam], [0, 1]])
S = matrix(self._base_ring, [[0, (- 1)], [1, 0]])
FinitelyGeneratedMatrixGroup_generic.__init__(self, ZZ(2), self._base_ring, [S, T])
def _repr_(self):
return 'Hecke triangle group for n = {}'.format(self._n)
def _latex_(self):
return ('\\Gamma^{(%s)}' % latex(self._n))
def element_repr_method(self, method=None):
if (method is None):
return self._element_repr_method
elif (method in ['default', 'basic', 'block', 'conj']):
self._element_repr_method = method
else:
raise ValueError(f'the specified method {method} is not supported')
def one(self):
return self.I()
def lam_minpoly(self):
n = self._n
if (n == infinity):
lam_symbolic = QQ(2)
else:
lam_symbolic = (E((2 * n)) + (~ E((2 * n))))
return lam_symbolic.minpoly()
def base_ring(self):
return self._base_ring
def base_field(self):
if (self._n in [3, infinity]):
return QQ
else:
return self._base_ring.number_field()
def n(self):
return self._n
def lam(self):
return self._lam
def rho(self):
if (self._n == infinity):
return AA.one()
else:
rho = AlgebraicField()(E((2 * self._n)))
rho.simplify()
return rho
def alpha(self):
return (QQ((1, 2)) * (QQ((1, 2)) - (ZZ.one() / self._n)))
def beta(self):
return (QQ((1, 2)) * (QQ((1, 2)) + (ZZ.one() / self._n)))
_method
def I(self):
return self(matrix(self._base_ring, [[1, 0], [0, 1]]), check=False)
_method
def T(self, m=1):
return self(matrix(self._base_ring, [[1, (self._lam * m)], [0, 1]]), check=False)
_method
def S(self):
return self.gen(0)
_method
def U(self):
return (self.T() * self.S())
def V(self, j):
return ((self.U() ** (j - 1)) * self.T())
def dvalue(self):
n = self._n
if (n == 3):
return (1 / ZZ(((2 ** 6) * (3 ** 3))))
if (n == 4):
return (1 / ZZ((2 ** 8)))
if (n == 6):
return (1 / ZZ(((2 ** 2) * (3 ** 3))))
if (n == infinity):
return (1 / ZZ((2 ** 6)))
return exp((((((- ZZ(2)) * psi1(ZZ.one())) + psi1((ZZ.one() - self.alpha()))) + psi1((ZZ.one() - self.beta()))) - (pi * sec((pi / self._n)))))
def is_arithmetic(self) -> bool:
return (self._n in [ZZ(3), ZZ(4), ZZ(6), infinity])
def get_FD(self, z):
ID = self.I()
T = self.T()
S = self.S()
TI = self.T((- 1))
A = ID
w = z
while ((abs(w) < ZZ.one()) or (abs(w.real()) > (self.lam() / ZZ(2)))):
if (abs(w) < ZZ.one()):
w = self.S().acton(w)
A = (S * A)
while (w.real() >= (self.lam() / ZZ(2))):
w = TI.acton(w)
A = (TI * A)
while (w.real() < ((- self.lam()) / ZZ(2))):
w = T.acton(w)
A = (T * A)
if (w.real() == (self.lam() / ZZ(2))):
w = TI.acton(w)
A = (TI * A)
if ((abs(w) == ZZ.one()) and (w.real() > ZZ.zero())):
w = S.acton(w)
A = (S * A)
AI = A.inverse()
return (AI, A.acton(z))
def in_FD(self, z) -> bool:
return (self.get_FD(z)[0] == self.I())
def root_extension_field(self, D):
K = self.base_field()
x = PolynomialRing(K, 'x').gen()
D = self.base_ring()(D)
if D.is_square():
return K
else:
L = K.extension(((x ** 2) - D), 'e')
return L
_method
def root_extension_embedding(self, D, K=None):
D = self.base_ring()(D)
F = self.root_extension_field(D)
if (K is None):
if (coerce_AA(D) > 0):
K = AA
else:
K = AlgebraicField()
L = list(F.embeddings(K))
def emb_key(emb):
L = []
gens_len = len(emb.im_gens())
for k in range(gens_len):
a = emb.im_gens()[k]
try:
a.simplify()
a.exactify()
except AttributeError:
pass
if (abs(a.imag()) > abs(a.real())):
if (a.imag() < 0):
a = (- infinity)
else:
a = ZZ(0)
else:
a = a.real()
L.append(a)
L.reverse()
return L
if (len(L) > 1):
L.sort(key=emb_key)
return L[(- 1)]
def _elliptic_conj_reps(self):
if (not hasattr(self, '_max_block_length')):
self._conjugacy_representatives()
elif (ZZ((- 4)) in self._conj_prim):
return
D = self.U().discriminant()
if (D not in self._conj_prim):
self._conj_prim[D] = []
self._conj_prim[D].append(self.U())
D = self.S().discriminant()
if (D not in self._conj_prim):
self._conj_prim[D] = []
self._conj_prim[D].append(self.S())
other_reps = [(self.U() ** k) for k in range((- ((self.n() - 1) / 2).floor()), ((self.n() // 2) + 1)) if (k not in [0, 1])]
for v in other_reps:
D = v.discriminant()
if (D not in self._conj_nonprim):
self._conj_nonprim[D] = []
self._conj_nonprim[D].append(v)
def _conjugacy_representatives(self, max_block_length=0, D=None):
from sage.combinat.partition import OrderedPartitions
from sage.combinat.combinat import tuples
if (D is not None):
max_block_length = max(AA.zero(), coerce_AA(((D + 4) / (self.lam() ** 2)))).sqrt().floor()
else:
try:
max_block_length = ZZ(max_block_length)
if (max_block_length < 0):
raise TypeError
except TypeError:
raise ValueError('max_block_length must be a non-negative integer!')
if (not hasattr(self, '_max_block_length')):
self._max_block_length = ZZ.zero()
self._conj_block = {}
self._conj_nonprim = {}
self._conj_prim = {}
self._conj_prim[ZZ.zero()] = []
self._conj_prim[ZZ.zero()].append(self.V((self.n() - 1)))
self._elliptic_conj_reps()
if (max_block_length <= self._max_block_length):
return
def is_cycle(seq):
length = len(seq)
for n in divisors(length):
if ((n < length) and is_cycle_of_length(seq, n)):
return True
return False
def is_cycle_of_length(seq, n):
for j in range(n, len(seq)):
if (seq[j] != seq[(j % n)]):
return False
return True
j_list = range(1, self.n())
for t in range((self._max_block_length + 1), (max_block_length + 1)):
t_ZZ = ZZ(t)
if (t_ZZ not in self._conj_block):
self._conj_block[t_ZZ] = set()
partitions = OrderedPartitions(t).list()
for par in partitions:
len_par = len(par)
exp_list = tuples(j_list, len_par)
for ex in exp_list:
keep = True
if (len_par > 1):
for k in range((- 1), (len_par - 1)):
if (ex[k] == ex[(k + 1)]):
keep = False
break
elif (ex[0] == 1):
keep = False
elif (ex[0] == (self.n() - 1)):
keep = False
if keep:
conj_type = cyclic_representative(tuple(((ZZ(ex[k]), ZZ(par[k])) for k in range(len_par))))
self._conj_block[t_ZZ].add(conj_type)
for el in self._conj_block[t_ZZ]:
group_el = prod([(self.V(el[k][0]) ** el[k][1]) for k in range(len(el))])
D = group_el.discriminant()
assert (coerce_AA(D) > 0)
if (not (((len(el) == 1) and (el[0][1] > 1)) or is_cycle(el))):
if (D not in self._conj_prim):
self._conj_prim[D] = []
self._conj_prim[D].append(group_el)
else:
if (D not in self._conj_nonprim):
self._conj_nonprim[D] = []
self._conj_nonprim[D].append(group_el)
self._max_block_length = max_block_length
def class_representatives(self, D, primitive=True):
if ((coerce_AA(D) == 0) and (not primitive)):
raise ValueError('There are infinitely many non-primitive conjugacy classes of discriminant 0.')
self._conjugacy_representatives(D=D)
L = []
if (D in self._conj_prim):
L += self._conj_prim[D]
if ((not primitive) and (D in self._conj_nonprim)):
L += self._conj_nonprim[D]
if (not L):
raise ValueError('D = {} is not a{} discriminant for {}'.format(D, (' primitive' if primitive else ''), self))
else:
return L
def class_number(self, D, primitive=True):
if (coerce_AA(D) <= 0):
raise NotImplementedError
self._conjugacy_representatives(D=D)
num = ZZ(0)
if (D in self._conj_prim):
num = len(self._conj_prim[D])
if ((not primitive) and (D in self._conj_nonprim)):
num += len(self._conj_nonprim[D])
if (num == 0):
raise ValueError('D = {} is not a{} discriminant for {}'.format(D, (' primitive' if primitive else ''), self))
else:
return num
def is_discriminant(self, D, primitive=True) -> bool:
self._conjugacy_representatives(0)
t_bound = max(AA.zero(), coerce_AA(((D + 4) / (self.lam() ** 2)))).sqrt().floor()
for t in range((self._max_block_length + 1), (t_bound + 1)):
self._conjugacy_representatives(t)
if (D in self._conj_prim):
return True
if ((not primitive) and (D in self._conj_nonprim)):
return True
if (D in self._conj_prim):
return True
elif ((not primitive) and (D in self._conj_nonprim)):
return True
else:
return False
def list_discriminants(self, D, primitive=True, hyperbolic=True, incomplete=False):
self._conjugacy_representatives(D=D)
if incomplete:
max_D = infinity
else:
max_D = coerce_AA(D)
if hyperbolic:
L = [key for key in self._conj_prim if (0 < coerce_AA(key) <= max_D)]
else:
L = [key for key in self._conj_prim if (coerce_AA(key) <= max_D)]
if (not primitive):
if hyperbolic:
L += [key for key in self._conj_nonprim if ((0 < coerce_AA(key) <= max_D) and (key not in L))]
else:
L += [key for key in self._conj_nonprim if ((coerce_AA(key) <= max_D) and (key not in L))]
return sorted(L, key=coerce_AA)
def reduced_elements(self, D):
L = self.class_representatives(D=D, primitive=True)
R = []
for v in L:
R += v.reduced_elements()
return R
def simple_elements(self, D):
L = self.class_representatives(D=D, primitive=True)
R = []
for v in L:
R += v.simple_elements()
return R
def rational_period_functions(self, k, D):
try:
k = ZZ(k)
if (not ZZ(2).divides(k)):
raise TypeError
except TypeError:
raise ValueError('k={} has to be an even integer!'.format(k))
z = PolynomialRing(self.base_ring(), 'z').gen()
R = []
if (k != 0):
R.append((ZZ(1) - (z ** (- k))))
if (k == 2):
R.append((z ** (- 1)))
L = self.class_representatives(D=D, primitive=True)
for v in L:
rat = v.rational_period_function(k)
if (rat != 0):
R.append(rat)
return R |
_function_dispatch(_just_dispatcher)
def ljust(a, width, fillchar=' '):
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) |
def _prepare_out_argument(out, dtype, expected_shape):
if (out is None):
return np.empty(expected_shape, dtype=dtype)
if (out.shape != expected_shape):
raise ValueError('Output array has incorrect shape.')
if (not out.flags.c_contiguous):
raise ValueError('Output array must be C-contiguous.')
if (out.dtype != np.float64):
raise ValueError('Output array must be double type.')
return out |
class HumanOthelloPlayer():
def __init__(self, game):
self.game = game
def play(self, board):
valid = self.game.getValidMoves(board, 1)
for i in range(len(valid)):
if valid[i]:
print('[', int((i / self.game.n)), int((i % self.game.n)), end='] ')
while True:
input_move = input()
input_a = input_move.split(' ')
if (len(input_a) == 2):
try:
(x, y) = [int(i) for i in input_a]
if (((0 <= x) and (x < self.game.n) and (0 <= y) and (y < self.game.n)) or ((x == self.game.n) and (y == 0))):
a = (((self.game.n * x) + y) if (x != (- 1)) else (self.game.n ** 2))
if valid[a]:
break
except ValueError:
print('Invalid move')
return a |
def get_distmult_kg_state_dict(state_dict):
kg_state_dict = dict()
for param_name in ['kg.entity_embeddings.weight', 'kg.relation_embeddings.weight']:
kg_state_dict[param_name.split('.', 1)[1]] = state_dict['state_dict'][param_name]
return kg_state_dict |
class spmatrix():
def _bsr_container(self):
from ._bsr import bsr_matrix
return bsr_matrix
def _coo_container(self):
from ._coo import coo_matrix
return coo_matrix
def _csc_container(self):
from ._csc import csc_matrix
return csc_matrix
def _csr_container(self):
from ._csr import csr_matrix
return csr_matrix
def _dia_container(self):
from ._dia import dia_matrix
return dia_matrix
def _dok_container(self):
from ._dok import dok_matrix
return dok_matrix
def _lil_container(self):
from ._lil import lil_matrix
return lil_matrix
def __mul__(self, other):
return self._mul_dispatch(other)
def __rmul__(self, other):
return self._rmul_dispatch(other)
def __pow__(self, power):
from .linalg import matrix_power
return matrix_power(self, power)
def set_shape(self, shape):
new_self = self.reshape(shape, copy=False).asformat(self.format)
self.__dict__ = new_self.__dict__
def get_shape(self):
return self._shape
shape = property(fget=get_shape, fset=set_shape, doc='Shape of the matrix')
def asfptype(self):
return self._asfptype()
def getmaxprint(self):
return self._getmaxprint()
def getformat(self):
return self.format
def getnnz(self, axis=None):
return self._getnnz(axis=axis)
def getH(self):
return self.conjugate().transpose()
def getcol(self, j):
return self._getcol(j)
def getrow(self, i):
return self._getrow(i) |
def _train(config):
data_filter = get_squad_data_filter(config)
train_data = read_data(config, 'train', config.load, data_filter=data_filter)
dev_data = read_data(config, 'dev', True, data_filter=data_filter)
update_config(config, [train_data, dev_data])
_config_debug(config)
word2vec_dict = (train_data.shared['lower_word2vec'] if config.lower_word else train_data.shared['word2vec'])
word2idx_dict = train_data.shared['word2idx']
idx2vec_dict = {word2idx_dict[word]: vec for (word, vec) in word2vec_dict.items() if (word in word2idx_dict)}
emb_mat = np.array([(idx2vec_dict[idx] if (idx in idx2vec_dict) else np.random.multivariate_normal(np.zeros(config.word_emb_size), np.eye(config.word_emb_size))) for idx in range(config.word_vocab_size)])
config.emb_mat = emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
trainer = MultiGPUTrainer(config, models)
evaluator = MultiGPUF1Evaluator(config, models, tensor_dict=(model.tensor_dict if config.vis else None))
graph_handler = GraphHandler(config, model)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_steps = (config.num_steps or (int(math.ceil((train_data.num_examples / (config.batch_size * config.num_gpus)))) * config.num_epochs))
global_step = 0
for batches in tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps, shuffle=True, cluster=config.cluster), total=num_steps):
global_step = (sess.run(model.global_step) + 1)
get_summary = ((global_step % config.log_period) == 0)
(loss, summary, train_op) = trainer.step(sess, batches, get_summary=get_summary)
if get_summary:
graph_handler.add_summary(summary, global_step)
if ((global_step % config.save_period) == 0):
graph_handler.save(sess, global_step=global_step)
if (not config.eval):
continue
if ((global_step % config.eval_period) == 0):
num_steps = math.ceil((dev_data.num_examples / (config.batch_size * config.num_gpus)))
if (0 < config.val_num_batches < num_steps):
num_steps = config.val_num_batches
e_train = evaluator.get_evaluation_from_batches(sess, tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps))
graph_handler.add_summaries(e_train.summaries, global_step)
e_dev = evaluator.get_evaluation_from_batches(sess, tqdm(dev_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps))
graph_handler.add_summaries(e_dev.summaries, global_step)
if config.dump_eval:
graph_handler.dump_eval(e_dev)
if config.dump_answer:
graph_handler.dump_answer(e_dev)
if ((global_step % config.save_period) != 0):
graph_handler.save(sess, global_step=global_step) |
def clean_summary(source):
print('Cleaning source: ', source)
source_summary_dir_base = '../cleaning_phase/'
dest_dir_base = '../finished_summaries/'
spacy_nlp = spacy.load('en_core_web_lg')
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if (not os.path.exists(dest_dir)):
os.makedirs(dest_dir)
def remove_initial_prefixes(line, full_summary_content=False):
line = line.strip()
pat_story_summary = '(.*?)(SUMMARY.*$)'
if re.match(pat_story_summary, line):
to_replace = re.match(pat_story_summary, line).group(1)
line = line.replace(to_replace, '', 1).strip()
line = line.replace('Scene Summaries With Notes', '').strip()
pat_pinkmonkey_noise = '^(History is littered with.*?)(Chapter|Scene|Summary)'
if re.match(pat_pinkmonkey_noise, line):
to_replace = re.match(pat_pinkmonkey_noise, line).group(1)
line = line.replace(to_replace, '').strip()
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]{0,}'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
if (len(to_replace) < 150):
line = line.replace(to_replace, '', 1).strip()
if (not full_summary_content):
line = line.replace('<PARAGRAPH>', ' ').strip()
line = line.replace('PARAGRAPH>', ' ').strip()
pat_period_line_break = '.*(\\.(\\n\\t)+[ ]?(\\n\\t)*).*'
if re.match(pat_period_line_break, line):
to_replace = re.match(pat_period_line_break, line).group(1)
line = line.replace(to_replace, '. ')
pat_line_break = '.*((\\n\\t)+[ ]?(\\n\\t)*).*'
if re.match(pat_line_break, line):
to_replace = re.match(pat_line_break, line).group(1)
line = line.replace(to_replace, ' . ')
return line.strip()
def unify_text(text):
text_lower = text.lower().strip()
text_unified = unidecode(text_lower.translate(str.maketrans('', '', string.punctuation)).replace(' ', '')).strip()
return text_unified
def remove_prefixes(line, summary_name, is_analysis_text=False):
line = line.strip()
if (summary_name != ''):
pat_summary_name = ('^([ ,:]{0,}%s[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$)' % summary_name)
if re.search(pat_summary_name, line, re.IGNORECASE):
to_replace = re.match(pat_summary_name, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
pat_part_chapter = '^(part [ivxl|0-9]{1,}[ ,]{1,}chapter [ivxl|0-9]{1,}[ :])(.*$)'
pat_act_scene = '^(act ([ivxl|0-9]{1,})[ ,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,})[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$)'
pat2 = '^([,]{0,}[ ]{0,}(chapters|chapter|act|scene) ([ivxl|0-9]{1,}[ ]{0,}[,-]{1,}[ ]{0,}[ivxl|0-9]{1,})[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$)'
of_pat2 = '^(of (chapters|chapter|act|scene) ([ivxl|0-9]{1,}[ ]{0,}[,-]{0,}[ ]{0,}[ivxl|0-9]{0,})[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$)'
pat3 = '^((chapters|chapter|act|scene) ([ivxl|0-9]{1,})[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$)'
pat_nl = '^((chapters|chapter|act|scene) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$)'
of_pat_nl = '^((of (chapters|chapter|act|scene) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?[ :,-.]{1,})(?-i:[A-Z|"|\']+.*$))'
if re.search(pat_part_chapter, line, re.IGNORECASE):
to_replace = re.match(pat_part_chapter, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(2)
line = line.replace(to_replace, '', 1).strip()
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat_part_chapter, line, re.IGNORECASE):
to_replace = re.match(pat_part_chapter, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace, '', 1).strip()
return line.strip()
def check_download_links(line):
if (('Click on' in line) or ('Click over' in line) or ('Click that' in line) or ('Click to' in line) or ('Check out' in line) or ('download' in line)):
return ''
return line
def remove_misc_content(line):
line = line.replace('See Important Quotations Explained', '').strip()
line = line.replace('Your browser does not support the IFRAME tag.', '').strip()
pat_translation = '^(.*)(Read a translation of.*?(scene|scenes|chapter|chapters|act) [ivxl|0-9]{1,}(.*?)-[ ]{0,})(.*$)'
if re.match(pat_translation, line, re.IGNORECASE):
while re.match(pat_translation, line, re.IGNORECASE):
to_replace = re.match(pat_translation, line, re.IGNORECASE).group(2)
line = line.replace(to_replace, ' ').strip()
line = line.replace('Read a translation of', ' ').strip()
if ('aEUR"' in line):
line = line.replace('aEUR"', '')
line = check_download_links(line)
return line.strip()
book_count = 0
for book_name in os.listdir(source_summary_dir):
book_count += 1
book_name_dir = os.path.join(source_summary_dir, book_name)
book_dir = os.path.join(dest_dir, book_name)
print('book_name_dir: ', book_name_dir)
if (not os.path.exists(book_dir)):
os.makedirs(book_dir)
for section in os.listdir(book_name_dir):
summary_path = os.path.join(book_name_dir, section)
fp = open(summary_path, 'r')
try:
summary_json = json.loads(fp.readlines()[0])
except:
print(book_name_dir, '=Error reading json==', section)
continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if ('url' in summary_json):
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if (('analysis' in summary_json) and (summary_json['analysis'] is not None) and (summary_json['analysis'].strip() != '')):
analysis_already_present = 1
for paragraph in list(filter(None, re.split('<PARAGRAPH>|PARAGRAPH>', summary_json['analysis']))):
cleaned_paragraph = remove_initial_prefixes(unidecode(paragraph.replace('\t', ' ').replace('\n', ' ')).strip())
if (cleaned_paragraph != ''):
analysis_list.append(cleaned_paragraph)
analysis_start = 0
start_adding_lines = 0
if ((book_name == 'Pygmalion') and (section == 'section_0_part_0.txt')):
summary_json['summary'] = summary_json['summary'].replace('Summary and Commentary on Preface', '')
if ((source == 'pinkmonkey') and (book_name == 'Main Street') and (section == 'section_27_part_0.txt')):
summary_json['summary'] = summary_json['summary'].replace('Carol Notes that', 'Carol notes that')
if ((source == 'pinkmonkey') and (book_name == 'Middlemarch') and (section == 'section_1_part_0.txt')):
summary_json['summary'] = summary_json['summary'].replace('promptly Notes', 'promptly notes')
if ((source == 'pinkmonkey') and (book_name == 'Middlemarch') and (section == 'section_27_part_0.txt')):
summary_json['summary'] = summary_json['summary'].replace('but Notes Casaubon\\s', 'but notes Casaubon\\s')
if ((source == 'pinkmonkey') and (book_name == 'Middlemarch') and (section == 'section_28_part_0.txt')):
summary_json['summary'] = summary_json['summary'].replace('copies out his Notes', 'copies out his notes')
summary_content = remove_initial_prefixes(summary_json['summary'], full_summary_content=True)
for paragraph in list(filter(None, re.split('<PARAGRAPH>|PARAGRAPH>', summary_content))):
if ((source == 'shmoop') and (('Character Analysis' in paragraph) or ('Character Clues' in paragraph))):
continue
if (('Analysis' in paragraph) or ('Commentary' in paragraph) or ('Notes' in paragraph) or ('Interpretation' in paragraph)):
sub_lines = [sent.text for sent in list(spacy_nlp(paragraph).sents)]
sub_lines = list(filter(None, sub_lines))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
pat_analysis_separators = '^((.*?)(Analysis|Commentary|Notes|Interpretation))'
for sub_line in sub_lines:
sub_line = sub_line.strip()
if (sub_line == ''):
continue
if re.match(pat_analysis_separators, sub_line):
analysis_start = 1
if analysis_start:
if ((sub_line == '"') and (analysis_sub_lines_to_include != [])):
analysis_sub_lines_to_include[(- 1)] = (analysis_sub_lines_to_include[(- 1)] + sub_line)
else:
analysis_sub_lines_to_include.append(sub_line)
elif ((sub_line == '"') and (summary_sub_lines_to_include != [])):
summary_sub_lines_to_include[(- 1)] = (summary_sub_lines_to_include[(- 1)] + sub_line)
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_misc_content(remove_initial_prefixes(unidecode(' '.join(summary_sub_lines_to_include)).replace('\t', ' ').replace('\n', ' ').strip()))
if (cleaned_summ_line != ''):
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_misc_content(remove_initial_prefixes(unidecode(' '.join(analysis_sub_lines_to_include)).replace('\t', ' ').replace('\n', ' ').strip()))
if (cleaned_analysis_line != ''):
analysis_list.append(cleaned_analysis_line)
if (not analysis_start):
cleaned_paragraph = remove_misc_content(remove_initial_prefixes(unidecode(paragraph.replace('\t', ' ').replace('\n', ' ')).strip()))
if (cleaned_paragraph != ''):
summary_list.append(' '.join(cleaned_paragraph.split()))
if (analysis_start and start_adding_lines and (not analysis_already_present)):
cleaned_paragraph = remove_misc_content(remove_initial_prefixes(unidecode(paragraph.replace('\t', ' ').replace('\n', ' ')).strip()))
if (cleaned_paragraph != ''):
analysis_list.append(' '.join(cleaned_paragraph.split()))
if (analysis_start == 1):
start_adding_lines = 1
section_path = os.path.join(book_dir, section)
summary_text = ' '.join(summary_list)
analysis_text = ' '.join(analysis_list)
if (summary_text != ''):
summary_text = remove_prefixes(summary_text, new_json_dict['name'].strip(), is_analysis_text=False)
if (analysis_text != ''):
analysis_text = remove_prefixes(analysis_text, new_json_dict['name'].strip(), is_analysis_text=True)
new_json_dict['summary'] = summary_text
new_json_dict['analysis'] = analysis_text
with open(section_path, 'w') as fout:
json.dump(new_json_dict, fout)
print('source: ', source, ' book_count: ', book_count) |
def socket_write(socket, fn, args):
data_fn = int(fn).to_bytes(4, 'little', signed=False)
data_bytes = pickle.dumps(args)
data_size = len(data_bytes).to_bytes(8, 'little', signed=False)
socket.send(data_fn)
socket.send(data_size)
socket.send(data_bytes) |
class VecPyTorch():
def __init__(self, venv, device):
self.venv = venv
self.num_envs = venv.num_envs
self.observation_space = venv.observation_space
self.action_space = venv.action_space
self.device = device
def setup_scene(self, traj_data, r_idx, args):
(obs, infos) = self.venv.setup_scene(traj_data, r_idx, args)
return (obs, infos)
def to_thor_api_exec(self, action, object_id='', smooth_nav=False):
(obs, reward, done, info, events, actions) = self.venv.to_thor_api_exec(action, object_id, smooth_nav)
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).float()
return (obs, reward, done, info, events, actions)
def va_interact(self, action, interact_mask=None, smooth_nav=True, mask_px_sample=1, debug=False):
(obs, rew, done, infos, success, event, target_instance_id, emp, api_action) = self.venv.va_interact(action, interact_mask, smooth_nav, mask_px_sample, debug)
obs = torch.from_numpy(obs).float().to(self.device)
rew = torch.from_numpy(rew).float()
return (obs, rew, done, infos, success[0], event[0], target_instance_id[0], emp[0], api_action[0])
def consecutive_interaction(self, interaction, target_instance):
(obs, rew, done, info, success) = self.venv.consecutive_interaction(interaction, target_instance)
obs = torch.from_numpy(obs).float().to(self.device)
rew = torch.from_numpy(rew).float()
return (obs, rew, done, info, success[0])
def decompress_mask(self, mask):
mask = self.venv.decompress_mask(mask)
return mask
def reset_goal(self, load, goal_name, cs):
infos = self.venv.reset_goal(load, goal_name, cs)
return infos
def reset(self):
(obs, info) = self.venv.reset()
obs = torch.from_numpy(obs).float().to(self.device)
return (obs, info)
def evaluate(self, e):
(log_entry, success) = self.venv.evaluate(e)
return (log_entry, success)
def load_initial_scene(self):
(obs, info, actions_dict) = self.venv.load_initial_scene()
obs = torch.from_numpy(obs).float().to(self.device)
return (obs, info, actions_dict)
def load_next_scene(self, load):
(obs, info, actions_dict) = self.venv.load_next_scene(load)
obs = torch.from_numpy(obs).float().to(self.device)
return (obs, info, actions_dict)
def step_async(self, actions):
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
(obs, reward, done, info) = self.venv.step_wait()
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).float()
return (obs, reward, done, info)
def step(self, actions):
actions = actions.cpu().numpy()
(obs, reward, done, info) = self.venv.step(actions)
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).float()
return (obs, reward, done, info)
def get_rewards(self, inputs):
reward = self.venv.get_rewards(inputs)
reward = torch.from_numpy(reward).float()
return reward
def plan_act_and_preprocess(self, inputs, goal_spotted):
(obs, reward, done, info, gs, next_step_dict) = self.venv.plan_act_and_preprocess(inputs, goal_spotted)
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).float()
return (obs, reward, done, info, gs, next_step_dict)
def get_instance_mask(self):
return self.venv.get_instance_mask()
def reset_total_cat(self, total_cat_dict, categories_in_inst):
self.venv.reset_total_cat(total_cat_dict, categories_in_inst)
def close(self):
return self.venv.close() |
def register_Ns3WifiMac_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::WifiMac const &', 'arg0')])
cls.add_method('ConfigureStandard', 'void', [param('ns3::WifiPhyStandard', 'standard')])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('Enqueue', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Mac48Address', 'to'), param('ns3::Mac48Address', 'from')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Enqueue', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Mac48Address', 'to')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetAckTimeout', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetAddress', 'ns3::Mac48Address', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetBasicBlockAckTimeout', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetBssid', 'ns3::Mac48Address', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetCompressedBlockAckTimeout', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetCtsTimeout', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True)
cls.add_method('GetEifsNoDifs', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetHeConfiguration', 'ns3::Ptr< ns3::HeConfiguration >', [], is_const=True)
cls.add_method('GetHtConfiguration', 'ns3::Ptr< ns3::HtConfiguration >', [], is_const=True)
cls.add_method('GetPifs', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetRifs', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetShortSlotTimeSupported', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetSifs', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetSlot', 'ns3::Time', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetSsid', 'ns3::Ssid', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetVhtConfiguration', 'ns3::Ptr< ns3::VhtConfiguration >', [], is_const=True)
cls.add_method('GetWifiPhy', 'ns3::Ptr< ns3::WifiPhy >', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetWifiRemoteStationManager', 'ns3::Ptr< ns3::WifiRemoteStationManager >', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('NotifyPromiscRx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('NotifyRx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('NotifyRxDrop', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('NotifyTx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('NotifyTxDrop', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('ResetWifiPhy', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetAckTimeout', 'void', [param('ns3::Time', 'ackTimeout')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Mac48Address', 'address')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetBasicBlockAckTimeout', 'void', [param('ns3::Time', 'blockAckTimeout')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetCompressedBlockAckTimeout', 'void', [param('ns3::Time', 'blockAckTimeout')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetCtsTimeout', 'void', [param('ns3::Time', 'ctsTimeout')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice > const', 'device')])
cls.add_method('SetEifsNoDifs', 'void', [param('ns3::Time', 'eifsNoDifs')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetForwardUpCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Mac48Address, ns3::Mac48Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'upCallback')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetLinkDownCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'linkDown')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetLinkUpCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'linkUp')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetMaxPropagationDelay', 'void', [param('ns3::Time', 'delay')])
cls.add_method('SetPifs', 'void', [param('ns3::Time', 'pifs')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetPromisc', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetRifs', 'void', [param('ns3::Time', 'rifs')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetShortSlotTimeSupported', 'void', [param('bool', 'enable')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetSifs', 'void', [param('ns3::Time', 'sifs')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetSlot', 'void', [param('ns3::Time', 'slotTime')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetSsid', 'void', [param('ns3::Ssid', 'ssid')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetWifiPhy', 'void', [param('ns3::Ptr< ns3::WifiPhy >', 'phy')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetWifiRemoteStationManager', 'void', [param('ns3::Ptr< ns3::WifiRemoteStationManager >', 'stationManager')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('ConfigureDcf', 'void', [param('ns3::Ptr< ns3::Txop >', 'dcf'), param('uint32_t', 'cwmin'), param('uint32_t', 'cwmax'), param('bool', 'isDsss'), param('ns3::AcIndex', 'ac')], visibility='protected')
cls.add_method('FinishConfigureStandard', 'void', [param('ns3::WifiPhyStandard', 'standard')], is_pure_virtual=True, visibility='private', is_virtual=True)
return |
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc |
def display_chromagraph(audio_file_path, ctr=1):
(y, sr) = librosa.load(audio_file_path)
plt.figure(figsize=(8, 4))
C = librosa.feature.chroma_cqt(y=y, sr=sr)
fig_ax = librosa.display.specshow(C, y_axis='chroma')
plt.colorbar()
plt.title('Chromagram')
plt.savefig(f'{ctr}.png') |
class OpenObjectAction(BaseAction):
valid_actions = {'OpenObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
subgoal = expert_plan[goal_idx]['planner_action']
(reward, done) = (self.rewards['neutral'], False)
target_recep = get_object(subgoal['objectId'], state.metadata)
if (target_recep is not None):
is_target_open = target_recep['isOpen']
(reward, done) = ((self.rewards['positive'], True) if is_target_open else (self.rewards['negative'], False))
return (reward, done) |
class T5_warpper(nn.Module):
def __init__(self, pretrained_model_name_or_path, bg_word='', dtype='bfloat16', loss_type='CE', use_fed_loss=False, fed_loss_num_classes=1000, inference_text=False, inference_prob=False, inference_prob_fast=False, train_positive_only=False, test_constraint=False, vision_port='encoder', eval_only=False, **kwargs):
super().__init__(**kwargs)
self.dtype = getattr(torch, dtype)
self.config = AutoConfig.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
if eval_only:
self.t5_model.eval()
for (name, param) in self.t5_model.named_parameters():
param.requires_grad = False
param.data = param.data.to(self.dtype)
self.eos_token_id = self.tokenizer('\n', add_special_tokens=False).input_ids[0]
self.text_list_to_feature = {}
(enabled=False)
_grad()
def forward_text(self, text_list, cache=False):
if (cache and (tuple(text_list) in self.text_list_to_feature)):
return self.text_list_to_feature[tuple(text_list)]
text_token = self.tokenizer(text_list, return_tensors='pt', padding='longest').to(self.device)
input_ids = text_token.input_ids
attention_mask = text_token.attention_mask
encoder_outputs = self.t5_model.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=None, head_mask=None, output_attentions=True, output_hidden_states=True, return_dict=True)
last_hidden_state = encoder_outputs.last_hidden_state
feature = agg_lang_feat(last_hidden_state, attention_mask).clone().detach()
if cache:
self.text_list_to_feature[tuple(text_list)] = feature
return feature
def device(self):
return self.t5_model.device |
def test_electrocardiogram():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
ecg = electrocardiogram()
assert (ecg.dtype == float)
assert_equal(ecg.shape, (108000,))
assert_almost_equal(ecg.mean(), (- 0.))
assert_almost_equal(ecg.std(), 0.) |
def dot(A: dace.float32[N], B: dace.float32[N], out: dace.float32[1]):
def product(i: _[0:N]):
(a << A[i])
(b << B[i])
(o >> out(1, (lambda x, y: (x + y))))
o = (a * b) |
def double_backward_for_global(g_dx0, g_db0, g_dg0, dy, x0, b0, g0, rm, rv, axes, decay_rate, eps):
axes0 = [a for a in range(x0.ndim)]
axes = list((set(axes0) - set(axes)))
v_eps_rsqrt1 = ((rv + eps) ** ((- 1.0) / 2.0))
g_x0 = ((g_dg0 * dy) * v_eps_rsqrt1)
g_g0 = F.sum(((g_dx0 * dy) * v_eps_rsqrt1), axes, True)
g_dy = ((((g_dx0 * g0) * v_eps_rsqrt1) + ((g_dg0 * (x0 - rm)) * v_eps_rsqrt1)) + g_db0)
return (g_dy, g_x0, None, g_g0) |
def train(model, data, train_idx, optimizer, device):
model = model.to(device)
data = data.to(device)
train_idx = train_idx.to(device)
model.train()
optimizer.zero_grad()
out = model(x=data.x, edge_index=data.edge_index)[train_idx]
loss = F.nll_loss(out, data.y.squeeze(1)[train_idx])
loss.backward()
optimizer.step()
return loss.item() |
def representative_dataset(input_shape, num_of_inputs=1):
(yield ([np.random.randn(*input_shape).astype(np.float32)] * num_of_inputs)) |
def test_delta_encode() -> None:
patient = femr.datasets.RawPatient(patient_id=123, events=[femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2), concept_id=1234), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2), concept_id=1234), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 12), concept_id=1234, value=3), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 14), concept_id=1234, value=3), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 19), concept_id=1234, value=5), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 20), concept_id=1234, value=3)])
expected = femr.datasets.RawPatient(patient_id=123, events=[femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2), concept_id=1234), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 12), concept_id=1234, value=3), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 19), concept_id=1234, value=5), femr.datasets.RawEvent(start=datetime.datetime(1999, 7, 2, 20), concept_id=1234, value=3)])
assert (delta_encode(patient) == expected) |
class MicoGripper(Gripper):
def __init__(self, count: int=0):
super().__init__(count, 'MicoHand', ['MicoHand_joint1_finger1', 'MicoHand_joint1_finger3']) |
def UniformList(name, typ, size=None, parts=None):
assert ((size is not None) ^ (parts is not None))
def serialize(uniform_list):
return b''.join([typ.serialize(obj) for obj in uniform_list])
def deserialize(buf):
nonlocal size
nonlocal parts
if (len(buf) <= 4):
return []
if (parts is not None):
size = (len(buf) // parts)
assert ((len(buf) % size) == 0)
return [typ.deserialize(buf[i:(i + size)]) for i in range(0, len(buf), size)]
return register_type(type(name, (), dict(serialize=serialize, deserialize=deserialize))) |
def test_write_sentences():
with tempfile.TemporaryDirectory() as tempdir:
raw_filename = os.path.join(tempdir, 'raw.tsv')
with open(raw_filename, 'w') as fout:
fout.write(FBK_SAMPLE)
sentences = split_wikiner.read_sentences(raw_filename, 'utf-8')
copy_filename = os.path.join(tempdir, 'copy.tsv')
split_wikiner.write_sentences_to_file(sentences, copy_filename)
sent2 = split_wikiner.read_sentences(raw_filename, 'utf-8')
assert (sent2 == sentences) |
def test_execute_python_code_overwrites_file(random_code: str, random_string: str, agent: Agent):
ai_name = agent.ai_name
destination = os.path.join(agent.config.workspace_path, ai_name, 'executed_code', 'test_code.py')
os.makedirs(os.path.dirname(destination), exist_ok=True)
with open(destination, 'w+') as f:
f.write('This will be overwritten')
sut.execute_python_code(random_code, 'test_code.py', agent=agent)
with open(destination) as f:
assert (f.read() == random_code) |
class TestRecipe(unittest.TestCase):
def setUp(self):
Recipe.configure({})
self.r1 = Recipe([Recipe.ONION, Recipe.ONION, Recipe.ONION])
self.r2 = Recipe([Recipe.ONION, Recipe.ONION, Recipe.ONION])
self.r3 = Recipe([Recipe.ONION, Recipe.TOMATO])
self.r4 = Recipe([Recipe.ONION, Recipe.TOMATO])
self.r5 = Recipe([Recipe.TOMATO, Recipe.ONION])
self.r6 = Recipe([Recipe.ONION, Recipe.ONION])
self.recipes = [self.r1, self.r2, self.r3, self.r4, self.r5, self.r6]
self.pickle_temp_dir = os.path.join(TESTING_DATA_DIR, 'recipes')
if (not os.path.exists(self.pickle_temp_dir)):
os.makedirs(self.pickle_temp_dir)
def tearDown(self):
Recipe.configure({})
if os.path.exists(self.pickle_temp_dir):
shutil.rmtree(self.pickle_temp_dir)
def test_eq(self):
self.assertEqual(self.r1, self.r2, 'Failed basic equality check')
self.assertNotEqual(self.r1, self.r3, 'Failed Basic inequality check')
self.assertNotEqual(self.r1, self.r6, 'Failed inequality check with all one ingredient')
self.assertEqual(self.r3, self.r4, 'Failed basic equality check')
self.assertEqual(self.r4, self.r5, 'Failed ordered equality check')
def test_caching(self):
self.assertIs(self.r1, self.r2)
self.assertIs(self.r3, self.r4)
self.assertIs(self.r4, self.r5)
self.assertFalse((self.r6 is self.r1), 'different recipes cached to same value')
def test_serialization(self):
loaded_recipes = []
for (i, recipe) in enumerate(self.recipes):
pickle_path = os.path.join(self.pickle_temp_dir, 'recipe_{}'.format(i))
save_pickle(recipe, pickle_path)
loaded = load_pickle(pickle_path)
loaded_recipes.append(loaded)
for (original, loaded) in zip(self.recipes, loaded_recipes):
self.assertEqual(original, loaded)
def test_value(self):
for recipe in self.recipes:
self.assertEqual(recipe.value, 20)
def test_time(self):
for recipe in self.recipes:
self.assertEqual(recipe.time, 20)
def test_all_recipes(self):
for recipe in self.recipes:
self.assertTrue((recipe in Recipe.ALL_RECIPES))
self.assertEqual(len(Recipe.ALL_RECIPES), self._expected_num_recipes(len(Recipe.ALL_INGREDIENTS), Recipe.MAX_NUM_INGREDIENTS))
Recipe.configure({'max_num_ingredients': 4})
self.assertEqual(len(Recipe.ALL_RECIPES), self._expected_num_recipes(len(Recipe.ALL_INGREDIENTS), 4))
def test_invalid_input(self):
self.assertRaises(ValueError, Recipe, [Recipe.ONION, Recipe.TOMATO, 'carrot'])
self.assertRaises(ValueError, Recipe, ([Recipe.ONION] * 4))
self.assertRaises(ValueError, Recipe, [])
self.assertRaises(ValueError, Recipe, 'invalid argument')
def test_recipes_generation(self):
self.assertRaises(AssertionError, Recipe.generate_random_recipes, max_size=(Recipe.MAX_NUM_INGREDIENTS + 1))
self.assertRaises(AssertionError, Recipe.generate_random_recipes, min_size=0)
self.assertRaises(AssertionError, Recipe.generate_random_recipes, min_size=3, max_size=2)
self.assertRaises(AssertionError, Recipe.generate_random_recipes, ingredients=['onion', 'tomato', 'fake_ingredient'])
self.assertRaises(AssertionError, Recipe.generate_random_recipes, n=99999)
self.assertEqual(len(Recipe.generate_random_recipes(n=3)), 3)
self.assertEqual(len(Recipe.generate_random_recipes(n=99, unique=False)), 99)
two_sized_recipes = [Recipe(['onion', 'onion']), Recipe(['onion', 'tomato']), Recipe(['tomato', 'tomato'])]
for _ in range(100):
self.assertCountEqual(two_sized_recipes, Recipe.generate_random_recipes(n=3, min_size=2, max_size=2, ingredients=['onion', 'tomato']))
only_onions_recipes = [Recipe(['onion', 'onion']), Recipe(['onion', 'onion', 'onion'])]
for _ in range(100):
self.assertCountEqual(only_onions_recipes, Recipe.generate_random_recipes(n=2, min_size=2, max_size=3, ingredients=['onion']))
self.assertCountEqual(only_onions_recipes, set([Recipe.generate_random_recipes(n=1, recipes=only_onions_recipes)[0] for _ in range(100)]))
def _expected_num_recipes(self, num_ingredients, max_len):
return (comb((num_ingredients + max_len), num_ingredients) - 1) |
def test_sugar_4():
resi = ['RC5_1_0', 'RG_69_0', 'RU_37_0']
na = ['Phase', 'tm']
(angles_b, rr) = bb.pucker_angles(fname, residues=resi)
stri = ('%20s ' % '#')
for pp in na:
stri += (' %10s ' % pp)
stri += '\n'
for e in range(angles_b.shape[1]):
stri += ('%20s ' % rr[e])
for k in range(angles_b.shape[2]):
stri += (' %10.4f ' % angles_b[(0, e, k)])
stri += '\n'
fh = open(('%s/sugar_04.test.dat' % outdir), 'w')
fh.write(stri)
fh.close()
comp(('%s/sugar_04.test.dat' % refdir)) |
class SimpleSelfAttention2(nn.Module):
def __init__(self, n_in: int, ks=1):
super().__init__()
self.conv = conv1d(n_in, n_in, ks, padding=(ks // 2), bias=False)
self.gamma = nn.Parameter(tensor([0.0]))
self.n_in = n_in
def forward(self, x):
size = x.size()
x = x.view(*size[:2], (- 1))
convx = self.conv(x)
xxT = torch.bmm(x, x.permute(0, 2, 1).contiguous())
o = torch.bmm(xxT, convx)
o = ((self.gamma * o) + x)
return o.view(*size).contiguous() |
class DatasetExample():
index: int
answers: List[str]
positive_passages: List[DatasetPassage]
other_passages: List[DatasetPassage]
is_gold_positive: bool
query_token_ids: List[int]
def to_tuple(self) -> tuple:
return (self.index, self.answers, [dataclasses.astuple(p) for p in self.positive_passages], [dataclasses.astuple(p) for p in self.other_passages], self.is_gold_positive, self.query_token_ids)
def from_tuple(input_tuple: tuple) -> DatasetExample:
return DatasetExample(input_tuple[0], input_tuple[1], [DatasetPassage(*t) for t in input_tuple[2]], [DatasetPassage(*t) for t in input_tuple[3]], input_tuple[4], input_tuple[5]) |
class HTML():
def __init__(self, web_dir, title, image_subdir='', reflesh=0):
self.title = title
self.web_dir = web_dir
self.img_subdir = image_subdir
self.img_dir = os.path.join(self.web_dir, image_subdir)
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (reflesh > 0):
with self.doc.head:
meta( content=str(reflesh))
def get_image_dir(self):
return self.img_dir
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style='table-layout: fixed;')
self.doc.add(self.t)
def add_images(self, ims, txts, links, width=400):
self.add_table()
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
with td(style='word-wrap: break-word;', halign='center', valign='top'):
with p():
with a(href=os.path.join(link)):
img(style=('width:%dpx' % width), src=os.path.join(im))
br()
p(txt)
def save(self, file='index'):
html_file = ('%s/%s.html' % (self.web_dir, file))
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close() |
def test_tree_pandas_output_formats():
clusterer = HDBSCAN(gen_min_span_tree=True).fit(X)
if_pandas(clusterer.condensed_tree_.to_pandas)()
if_pandas(clusterer.single_linkage_tree_.to_pandas)()
if_pandas(clusterer.minimum_spanning_tree_.to_pandas)() |
def print_yellow(info, value='', verbose=True):
if (verbose is False):
return
print((((Fore.YELLOW + ('[%s] ' % info)) + Style.RESET_ALL) + str(value))) |
def check_if_correct_cls(args, model, dataloader, sample_list):
if (args.dataset != 'cifar10'):
return sample_list
model.to(args.device)
count = 0
sample_list_selected = []
with torch.no_grad():
model.eval()
for (index, (name, data, label)) in enumerate(dataloader):
data = data.to(args.device)
label = label.to(args.device)
print(('img: %d ' % index), name, 'label:', label)
data = normalize(args, data)
output = model(data)
pred = torch.argmax(output, dim=1)
print('pred:', pred.item())
if (pred.item() == label.item()):
count += 1
sample_list_selected.append(sample_list[index])
else:
print('Predict incorrectly.')
print('')
print(count, 'images/tabular data are classified correctly.')
return sample_list_selected |
def load_glove(glove_pt, idx_to_token):
glove = pickle.load(open(glove_pt, 'rb'))
dim = len(glove['the'])
matrix = []
for i in range(len(idx_to_token)):
token = idx_to_token[i]
tokens = token.split()
if (len(tokens) > 1):
v = np.zeros((dim,))
for token in tokens:
v = (v + glove.get(token, glove['the']))
v = (v / len(tokens))
else:
v = glove.get(token, glove['the'])
matrix.append(v)
matrix = np.asarray(matrix)
return matrix |
class Job(object):
def __init__(self, op_args):
self._op_args = op_args
def op_args(self):
return self._op_args |
def get_full_profiles(graph, model, model_args, model_kwargs, n_iter, profile_ops, max_depth, basic_blocks, force_no_recomp_scopes, save_memory_mode, use_graph_profiler, use_network_profiler):
print('-I- profiling model (recomp)')
(recomputation_times, max_mem_usage_bytes_r) = get_profiles(graph, model, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, profile_ops=profile_ops, recomputation=True, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes)
print('-I- profiling model (no recomp)')
warnings.warn('Need to reset max mem usage!!')
for node in graph.nodes:
node.max_memory_bytes = 0
(no_recomputation_times, max_mem_usage_bytes_nr) = get_profiles(graph, model, model_args=model_args, model_kwargs=model_kwargs, use_network_profiler=use_network_profiler, use_graph_profiler=use_graph_profiler, save_memory_mode=save_memory_mode, profile_ops=profile_ops, recomputation=False, n_iter=n_iter, max_depth=max_depth, basic_blocks=basic_blocks, force_no_recomp_scopes=force_no_recomp_scopes)
warnings.warn('Need to reset max mem usage!!')
for node in graph.nodes:
node.max_memory_bytes = 0
for n in graph.nodes:
if (n.scope not in no_recomputation_times):
no_recomputation_times[n.scope] = ExecTimes(0, 0)
if (n.scope not in recomputation_times):
recomputation_times[n.scope] = ExecTimes(0, 0)
weights = {n.id: FullExecTimes(recomputation_times[n.scope], no_recomputation_times[n.scope]) for n in graph.nodes}
for node in graph.nodes:
t = max_mem_usage_bytes_r.get(node.scope, None)
if (t is not None):
node.max_memory_bytes = t
print('-I- model profiled')
return (weights, max_mem_usage_bytes_r, max_mem_usage_bytes_nr) |
def format_rule(rule, kg):
rule_str = ''
for j in range(len(rule)):
relation_id = int(rule[j])
rel = kg.id2relation[relation_id]
if (not rel.endswith('_inv')):
rule_str += '-{}-> '.format(rel)
else:
rule_str += '<-{}-'.format(rel)
return rule_str |
def test_calculate_precision_multiple():
pred1 = torch.tensor([6, 7, 8, 9, 10], dtype=torch.long)
pred2 = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
pred3 = torch.tensor([1, 2, 3, 5, 7, 6], dtype=torch.long)
true1 = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
true2 = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
true3 = torch.tensor([1, 2, 3, 5], dtype=torch.long)
pred = VariableShapeList.from_tensors([pred1, pred2, pred3])
true = VariableShapeList.from_tensors([true1, true2, true3])
precision = vsl_precision(pred, true)
assert torch.allclose(precision, torch.tensor([0, 1, (4 / 6)], dtype=torch.float)) |
def ngram_evaluation_details(data, LM):
details = []
for sentence in data:
counter = collections.Counter()
for (token, context) in sentence:
counter['num_tokens'] += 1
counter['neglogprob'] += (- LM.logprob(token, context))
details.append(counter)
return details |
class Frame(object):
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
if (not hasattr(self._gdbframe, 'select')):
print('Unable to select frame: this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
index = 0
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
def is_python_frame(self):
if self.is_evalframe():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframe(self):
if (self._gdbframe.name() == EVALFRAME):
if (self._gdbframe.type() == gdb.NORMAL_FRAME):
return True
return False
def is_other_python_frame(self):
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
if self.is_gc_collect():
return 'Garbage-collecting'
frame = self._gdbframe
caller = frame.name()
if (not caller):
return False
if (caller in ('_PyCFunction_FastCallDict', '_PyCFunction_FastCallKeywords')):
arg_name = 'func'
try:
func = frame.read_var(arg_name)
return str(func)
except RuntimeError:
return ('PyCFunction invocation (unable to read %s)' % arg_name)
if (caller == 'wrapper_call'):
try:
func = frame.read_var('wp')
return str(func)
except RuntimeError:
return '<wrapper_call invocation>'
return False
def is_waiting_for_gil(self):
name = self._gdbframe.name()
if name:
return ('pthread_cond_timedwait' in name)
def is_gc_collect(self):
return (self._gdbframe.name() == 'collect')
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if (not frame.is_optimized_out()):
return frame
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if (not frame.is_optimized_out()):
return frame
return orig_frame
except ValueError:
return None
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
def get_selected_python_frame(cls):
try:
frame = cls.get_selected_frame()
except gdb.error:
return None
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
return None
def get_selected_bytecode_frame(cls):
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframe():
return frame
frame = frame.older()
return None
def print_summary(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, ('#%i %s\n' % (self.get_index(), line)))
if (not pyop.is_optimized_out()):
line = pyop.current_line()
if (line is not None):
sys.stdout.write((' %s\n' % line.strip()))
else:
sys.stdout.write(('#%i (unable to read python frame information)\n' % self.get_index()))
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(('#%i %s\n' % (self.get_index(), info)))
else:
sys.stdout.write(('#%i\n' % self.get_index()))
def print_traceback(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if (not pyop.is_optimized_out()):
line = pyop.current_line()
if (line is not None):
sys.stdout.write((' %s\n' % line.strip()))
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write((' %s\n' % info))
else:
sys.stdout.write(' (not a python frame)\n') |
class AdamGapAware(GapAwareBase):
def __init__(self, optimizer, from_grad=False):
super().__init__(optimizer)
gap_aware_adam_init(optimizer)
def apply_from_grad(self):
opt_state = self.optimizer.state
with torch.no_grad():
for pg in self.optimizer.param_groups:
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
eta = pg['lr']
if (weight_decay != 0):
raise NotImplementedError()
for p in pg['params']:
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'] ** 0.5) + eps)
m = opt_state[p]['exp_avg']
v = opt_state[p]['exp_avg_sq']
gt = p.grad
gap = adam_gap1(beta1, beta2, eps, eta, gt, m, v).abs_()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
def apply_on_stashed(self, stashed_theta):
opt_state = self.optimizer.state
with torch.no_grad():
for (pg, spg) in zip(self.optimizer.param_groups, stashed_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, sp) in zip(pg['params'], spg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - sp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
p.grad.data += p.data.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_theta(self, real_theta):
opt_state = self.optimizer.state
penatly_arr = []
with torch.no_grad():
for (pg, rpg) in zip(self.optimizer.param_groups, real_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, rp) in zip(pg['params'], rpg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - rp).abs()
penalty = (1 + (gap / avg_steps_needed))
penatly_arr.append(torch.mean(penalty).item())
p.grad.data /= penalty
p.grad.data += rp.data.mul((weight_decay * ((1 - penalty) / penalty)))
print('mean_penaltly', np.mean(penatly_arr))
def update_running_stats(self):
pass |
def sampleInhomogeneousPoissonProc(tt, lam):
N_t = len(tt)
dt = np.diff(tt)
dlam = np.diff(lam)
trapLam = ((0.5 * dt) * ((2 * lam[1:]) - dlam))
cumLam = np.ravel(np.cumsum(trapLam))
cumLam = np.hstack((np.array([0.0]), cumLam))
intLam = cumLam[(- 1)]
N = np.random.poisson(intLam)
Q = np.random.uniform(0, intLam, size=N)
Q = np.sort(Q)
S = np.zeros(N)
tt_off = 0
for (n, q) in enumerate(Q):
while (q > cumLam[tt_off]):
tt_off += 1
assert (tt_off < N_t), 'ERROR: inverted spike time exceeds time limit!'
q_lb = cumLam[(tt_off - 1)]
q_ub = cumLam[tt_off]
q_frac = ((q - q_lb) / (q_ub - q_lb))
assert ((q_frac >= 0.0) and (q_frac <= 1.0)), 'ERROR: invalid spike index'
tt_lb = tt[(tt_off - 1)]
tt_ub = tt[tt_off]
S[n] = (tt_lb + (q_frac * (tt_ub - tt_lb)))
return S |
def prepare_align(config):
in_dir = config['path']['corpus_path']
out_dir = config['path']['raw_path']
sampling_rate = config['preprocessing']['audio']['sampling_rate']
max_wav_value = config['preprocessing']['audio']['max_wav_value']
cleaners = config['preprocessing']['text']['text_cleaners']
for speaker in tqdm(os.listdir(in_dir)):
for chapter in os.listdir(os.path.join(in_dir, speaker)):
for file_name in os.listdir(os.path.join(in_dir, speaker, chapter)):
if (file_name[(- 4):] != '.wav'):
continue
base_name = file_name[:(- 4)]
text_path = os.path.join(in_dir, speaker, chapter, '{}.normalized.txt'.format(base_name))
wav_path = os.path.join(in_dir, speaker, chapter, '{}.wav'.format(base_name))
with open(text_path) as f:
text = f.readline().strip('\n')
text = _clean_text(text, cleaners)
os.makedirs(os.path.join(out_dir, speaker), exist_ok=True)
(wav, _) = librosa.load(wav_path, sr=sampling_rate)
wav = ((wav / max(abs(wav))) * max_wav_value)
wavfile.write(os.path.join(out_dir, speaker, '{}.wav'.format(base_name)), sampling_rate, wav.astype(np.int16))
with open(os.path.join(out_dir, speaker, '{}.lab'.format(base_name)), 'w') as f1:
f1.write(text) |
_utils.test(require=ti.extension.bls)
def test_scatter_1d_trivial():
_test_bls_stencil(1, 128, bs=32, stencil=((0,),), scatter=True) |
class PVTv2(nn.Module):
def __init__(self, model_name: str='B1', pretrained: str=None, num_classes: int=1000, *args, **kwargs) -> None:
super().__init__()
assert (model_name in pvtv2_settings.keys()), f'PVTv2 model name should be in {list(pvtv2_settings.keys())}'
depths = pvtv2_settings[model_name]
embed_dims = [64, 128, 320, 512]
drop_path_rate = 0.1
self.patch_embed1 = PatchEmbed(3, embed_dims[0], 7, 4)
self.patch_embed2 = PatchEmbed(embed_dims[0], embed_dims[1], 3, 2)
self.patch_embed3 = PatchEmbed(embed_dims[1], embed_dims[2], 3, 2)
self.patch_embed4 = PatchEmbed(embed_dims[2], embed_dims[3], 3, 2)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
self.block1 = nn.ModuleList([Block(embed_dims[0], 1, 8, 8, dpr[(cur + i)]) for i in range(depths[0])])
self.norm1 = nn.LayerNorm(embed_dims[0])
cur += depths[0]
self.block2 = nn.ModuleList([Block(embed_dims[1], 2, 4, 8, dpr[(cur + i)]) for i in range(depths[1])])
self.norm2 = nn.LayerNorm(embed_dims[1])
cur += depths[1]
self.block3 = nn.ModuleList([Block(embed_dims[2], 5, 2, 4, dpr[(cur + i)]) for i in range(depths[2])])
self.norm3 = nn.LayerNorm(embed_dims[2])
cur += depths[2]
self.block4 = nn.ModuleList([Block(embed_dims[3], 8, 1, 4, dpr[(cur + i)]) for i in range(depths[3])])
self.norm4 = nn.LayerNorm(embed_dims[3])
self.head = nn.Linear(embed_dims[(- 1)], num_classes)
self._init_weights(pretrained)
def _init_weights(self, pretrained: str=None) -> None:
if pretrained:
try:
self.load_state_dict(torch.load(pretrained, map_location='cpu'))
except RuntimeError:
pretrained_dict = torch.load(pretrained, map_location='cpu')
pretrained_dict.popitem()
pretrained_dict.popitem()
self.load_state_dict(pretrained_dict, strict=False)
finally:
print(f'Loaded imagenet pretrained from {pretrained}')
else:
for (n, m) in self.named_modules():
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
else:
nn.init.xavier_uniform_(m.weight)
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if (m.bias is not None):
nn.init.zeros_(m.bias)
def forward(self, x: Tensor) -> Tensor:
B = x.shape[0]
(x, H, W) = self.patch_embed1(x)
for blk in self.block1:
x = blk(x, H, W)
x = self.norm1(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
(x, H, W) = self.patch_embed2(x)
for blk in self.block2:
x = blk(x, H, W)
x = self.norm2(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
(x, H, W) = self.patch_embed3(x)
for blk in self.block3:
x = blk(x, H, W)
x = self.norm3(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
(x, H, W) = self.patch_embed4(x)
for blk in self.block4:
x = blk(x, H, W)
x = self.norm4(x)
x = self.head(x.mean(dim=1))
return x |
def test_complexity_print_changed_only():
class DummyEstimator(TransformerMixin, BaseEstimator):
nb_times_repr_called = 0
def __init__(self, estimator=None):
self.estimator = estimator
def __repr__(self):
DummyEstimator.nb_times_repr_called += 1
return super().__repr__()
def transform(self, X, copy=None):
return X
estimator = DummyEstimator(make_pipeline(DummyEstimator(DummyEstimator()), DummyEstimator(), 'passthrough'))
with config_context(print_changed_only=False):
repr(estimator)
nb_repr_print_changed_only_false = DummyEstimator.nb_times_repr_called
DummyEstimator.nb_times_repr_called = 0
with config_context(print_changed_only=True):
repr(estimator)
nb_repr_print_changed_only_true = DummyEstimator.nb_times_repr_called
assert (nb_repr_print_changed_only_false == nb_repr_print_changed_only_true) |
def train(epoch, train_idxs):
global lr, train_acc
model.train()
batch_idx = 1
total_loss = 0
correct = 0
X_train = text_features[train_idxs]
Y_train = text_targets[train_idxs]
for i in range(0, X_train.shape[0], config['batch_size']):
if ((i + config['batch_size']) > X_train.shape[0]):
(x, y) = (X_train[i:], Y_train[i:])
else:
(x, y) = (X_train[i:(i + config['batch_size'])], Y_train[i:(i + config['batch_size'])])
if config['cuda']:
(x, y) = (Variable(torch.from_numpy(x).type(torch.FloatTensor), requires_grad=True).cuda(), Variable(torch.from_numpy(y)).cuda())
else:
(x, y) = (Variable(torch.from_numpy(x).type(torch.FloatTensor), requires_grad=True), Variable(torch.from_numpy(y)))
optimizer.zero_grad()
output = model(x)
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(y.data.view_as(pred)).cpu().sum()
loss = criterion(output, y)
loss.backward()
optimizer.step()
batch_idx += 1
total_loss += loss.item()
train_acc = correct
print('Train Epoch: {:2d}\t Learning rate: {:.4f}\tLoss: {:.6f}\t Accuracy: {}/{} ({:.0f}%)\n '.format((epoch + 1), config['learning_rate'], total_loss, correct, X_train.shape[0], ((100.0 * correct) / X_train.shape[0]))) |
def get_model_para_number(model):
total_number = 0
for para in model.parameters():
total_number += torch.numel(para)
return total_number |
class Poisson(_SimpleDistributionMixin):
def __init__(self, rate):
(tensorlib, _) = get_backend()
self.rate = rate
self._pdf = tensorlib.poisson_dist(rate)
def expected_data(self):
return self.rate |
class Identity(nn.Module):
def __init__(self):
pass
def forward(self, x):
return x |
_module()
class Recognizer3D_TL(BaseRecognizer):
def __init__(self, backbone, cls_head=None, neck=None, train_cfg=None, test_cfg=None):
super(Recognizer3D_TL, self).__init__(backbone, cls_head, neck, train_cfg, test_cfg)
self.teacher = load_teacher_model().cuda()
self.teacher.eval()
for param in self.teacher.parameters():
param.requires_grad = False
def forward_train(self, imgs, labels, **kwargs):
assert self.with_cls_head
imgs = imgs.reshape((((- 1),) + imgs.shape[2:]))
losses = dict()
x = self.extract_feat(imgs)
if self.with_neck:
(x, loss_aux) = self.neck(x, labels.squeeze())
losses.update(loss_aux)
(cls_score, token_preds) = self.cls_head(x)
teacher_preds = self.teacher(imgs)
(B, D, H, W, C) = teacher_preds.shape
if (token_preds.shape[1] != teacher_preds.shape[1]):
ratio = (teacher_preds.shape[1] // token_preds.shape[1])
teacher_preds = teacher_preds.reshape(B, ratio, (D // ratio), H, W, C)
teacher_preds = teacher_preds.mean(dim=1)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, token_preds=token_preds.reshape((- 1), C), teacher_preds=teacher_preds.reshape((- 1), C), gamma=0.5)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
batches = imgs.shape[0]
num_segs = imgs.shape[1]
imgs = imgs.reshape((((- 1),) + imgs.shape[2:]))
if (self.max_testing_views is not None):
total_views = imgs.shape[0]
assert (num_segs == total_views), 'max_testing_views is only compatible with batch_size == 1'
view_ptr = 0
feats = []
while (view_ptr < total_views):
batch_imgs = imgs[view_ptr:(view_ptr + self.max_testing_views)]
x = self.extract_feat(batch_imgs)
if self.with_neck:
(x, _) = self.neck(x)
feats.append(x)
view_ptr += self.max_testing_views
if isinstance(feats[0], tuple):
len_tuple = len(feats[0])
feat = [torch.cat([x[i] for x in feats]) for i in range(len_tuple)]
feat = tuple(feat)
else:
feat = torch.cat(feats)
else:
feat = self.extract_feat(imgs)
if self.with_neck:
(feat, _) = self.neck(feat)
if self.feature_extraction:
avg_pool = nn.AdaptiveAvgPool3d(1)
if isinstance(feat, tuple):
feat = [avg_pool(x) for x in feat]
feat = torch.cat(feat, axis=1)
else:
feat = avg_pool(feat)
feat = feat.reshape((batches, num_segs, (- 1)))
feat = feat.mean(axis=1)
return feat
assert self.with_cls_head
(cls_score, _) = self.cls_head(feat)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_test(self, imgs):
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs, softmax=False):
assert self.with_cls_head
imgs = imgs.reshape((((- 1),) + imgs.shape[2:]))
x = self.extract_feat(imgs)
if self.with_neck:
(x, _) = self.neck(x)
(outs, _) = self.cls_head(x)
if softmax:
outs = nn.functional.softmax(outs)
return (outs,)
def forward_gradcam(self, imgs):
assert self.with_cls_head
return self._do_test(imgs) |
def Zero_Masking(input_tensor, mask_org):
output = input_tensor.clone()
output.mul_(mask_org)
return output |
def model_fields(model, only=None, exclude=None, field_args=None, converter=None):
converter = (converter or ModelConverter())
field_args = (field_args or {})
model_fields = ((f.attname, f) for f in model._meta.fields)
if only:
model_fields = (x for x in model_fields if (x[0] in only))
elif exclude:
model_fields = (x for x in model_fields if (x[0] not in exclude))
field_dict = {}
for (name, model_field) in model_fields:
field = converter.convert(model, model_field, field_args.get(name))
if (field is not None):
field_dict[name] = field
return field_dict |
class Function_Subprogram_Node(FNode):
_attributes = ('name', 'type', 'ret_name')
_fields = ('args', 'specification_part', 'execution_part') |
.parametrize('k_genuine, k_impostor, T_test', [(2, 2, [[0, 1, 3], [0, 1, 4], [0, 2, 3], [0, 2, 4], [1, 0, 3], [1, 0, 4], [1, 2, 3], [1, 2, 4], [2, 0, 3], [2, 0, 4], [2, 1, 3], [2, 1, 4], [3, 4, 1], [3, 4, 2], [3, 5, 1], [3, 5, 2], [4, 3, 1], [4, 3, 2], [4, 5, 1], [4, 5, 2], [5, 3, 1], [5, 3, 2], [5, 4, 1], [5, 4, 2]]), (1, 3, [[0, 1, 3], [0, 1, 4], [0, 1, 5], [1, 0, 3], [1, 0, 4], [1, 0, 5], [2, 1, 3], [2, 1, 4], [2, 1, 5], [3, 4, 0], [3, 4, 1], [3, 4, 2], [4, 3, 0], [4, 3, 1], [4, 3, 2], [5, 4, 0], [5, 4, 1], [5, 4, 2]]), (1, 2, [[0, 1, 3], [0, 1, 4], [1, 0, 3], [1, 0, 4], [2, 1, 3], [2, 1, 4], [3, 4, 1], [3, 4, 2], [4, 3, 1], [4, 3, 2], [5, 4, 1], [5, 4, 2]])])
def test_generate_knntriplets_under_edge(k_genuine, k_impostor, T_test):
X = np.array([[0, 0], [2, 2], [4, 4], [8, 8], [16, 16], [32, 32], [33, 33]])
y = np.array([1, 1, 1, 2, 2, 2, (- 1)])
T = Constraints(y).generate_knntriplets(X, k_genuine, k_impostor)
assert np.array_equal(sorted(T.tolist()), T_test) |
def rank2_ZZ(n=400, min=0, max=(2 ** 64), system='sage'):
if (system == 'sage'):
A = random_matrix(ZZ, (n + 10), n, x=min, y=(max + 1))
t = cputime()
v = A.rank()
return cputime(t)
elif (system == 'magma'):
code = ('\nn := %s;\nA := RMatrixSpace(IntegerRing(), n+10, n)![Random(%s,%s) : i in [1..n*(n+10)]];\nt := Cputime();\nK := Rank(A);\ns := Cputime(t);\n' % (n, min, max))
if verbose:
print(code)
magma.eval(code)
return float(magma.eval('s'))
else:
raise ValueError(('unknown system "%s"' % system)) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Please give a config.json file with training/model/data/param details')
parser.add_argument('--gpu_id', help='Please give a value for gpu id')
parser.add_argument('--dataset', help='Please give a value for dataset name')
parser.add_argument('--seed', help='Please give a value for seed')
parser.add_argument('--epochs', help='Please give a value for epochs')
parser.add_argument('--batch_size', help='Please give a value for batch_size')
parser.add_argument('--init_lr', help='Please give a value for init_lr')
parser.add_argument('--lr_reduce_factor', help='Please give a value for lr_reduce_factor')
parser.add_argument('--lr_schedule_patience', help='Please give a value for lr_schedule_patience')
parser.add_argument('--min_lr', help='Please give a value for min_lr')
parser.add_argument('--weight_decay', help='Please give a value for weight_decay')
parser.add_argument('--print_epoch_interval', help='Please give a value for print_epoch_interval')
parser.add_argument('--L', help='Please give a value for L')
parser.add_argument('--hidden_dim', help='Please give a value for hidden_dim')
parser.add_argument('--out_dim', help='Please give a value for out_dim')
parser.add_argument('--residual', help='Please give a value for residual')
parser.add_argument('--edge_feat', help='Please give a value for edge_feat')
parser.add_argument('--readout', help='Please give a value for readout')
parser.add_argument('--in_feat_dropout', help='Please give a value for in_feat_dropout')
parser.add_argument('--dropout', help='Please give a value for dropout')
parser.add_argument('--graph_norm', help='Please give a value for graph_norm')
parser.add_argument('--batch_norm', help='Please give a value for batch_norm')
parser.add_argument('--max_time', help='Please give a value for max_time')
parser.add_argument('--expid', help='Experiment id.')
parser.add_argument('--type_net', default='simple', help='Type of net')
parser.add_argument('--lap_norm', default='none', help='Laplacian normalisation')
parser.add_argument('--aggregators', type=str, help='Aggregators to use.')
parser.add_argument('--scalers', type=str, help='Scalers to use.')
parser.add_argument('--towers', type=int, default=5, help='Towers to use.')
parser.add_argument('--divide_input_first', type=bool, help='Whether to divide the input in first layer.')
parser.add_argument('--divide_input_last', type=bool, help='Whether to divide the input in last layers.')
parser.add_argument('--edge_dim', type=int, help='Size of edge embeddings.')
parser.add_argument('--pretrans_layers', type=int, help='pretrans_layers.')
parser.add_argument('--posttrans_layers', type=int, help='posttrans_layers.')
parser.add_argument('--pos_enc_dim', default=0, type=int, help='Positional encoding dimension')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
if (args.gpu_id is not None):
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
if (args.dataset is not None):
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = HIVDataset(DATASET_NAME, pos_enc_dim=int(args.pos_enc_dim), norm=args.lap_norm)
params = config['params']
if (args.seed is not None):
params['seed'] = int(args.seed)
if (args.epochs is not None):
params['epochs'] = int(args.epochs)
if (args.batch_size is not None):
params['batch_size'] = int(args.batch_size)
if (args.init_lr is not None):
params['init_lr'] = float(args.init_lr)
if (args.lr_reduce_factor is not None):
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if (args.lr_schedule_patience is not None):
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if (args.min_lr is not None):
params['min_lr'] = float(args.min_lr)
if (args.weight_decay is not None):
params['weight_decay'] = float(args.weight_decay)
if (args.print_epoch_interval is not None):
params['print_epoch_interval'] = int(args.print_epoch_interval)
if (args.max_time is not None):
params['max_time'] = float(args.max_time)
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if (args.L is not None):
net_params['L'] = int(args.L)
if (args.hidden_dim is not None):
net_params['hidden_dim'] = int(args.hidden_dim)
if (args.out_dim is not None):
net_params['out_dim'] = int(args.out_dim)
if (args.residual is not None):
net_params['residual'] = (True if (args.residual == 'True') else False)
if (args.edge_feat is not None):
net_params['edge_feat'] = (True if (args.edge_feat == 'True') else False)
if (args.readout is not None):
net_params['readout'] = args.readout
if (args.in_feat_dropout is not None):
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if (args.dropout is not None):
net_params['dropout'] = float(args.dropout)
if (args.graph_norm is not None):
net_params['graph_norm'] = (True if (args.graph_norm == 'True') else False)
if (args.batch_norm is not None):
net_params['batch_norm'] = (True if (args.batch_norm == 'True') else False)
if (args.aggregators is not None):
net_params['aggregators'] = args.aggregators
if (args.scalers is not None):
net_params['scalers'] = args.scalers
if (args.towers is not None):
net_params['towers'] = args.towers
if (args.divide_input_first is not None):
net_params['divide_input_first'] = args.divide_input_first
if (args.divide_input_last is not None):
net_params['divide_input_last'] = args.divide_input_last
if (args.edge_dim is not None):
net_params['edge_dim'] = args.edge_dim
if (args.pretrans_layers is not None):
net_params['pretrans_layers'] = args.pretrans_layers
if (args.posttrans_layers is not None):
net_params['posttrans_layers'] = args.posttrans_layers
if (args.type_net is not None):
net_params['type_net'] = args.type_net
if (args.pos_enc_dim is not None):
net_params['pos_enc_dim'] = args.pos_enc_dim
D = torch.cat([torch.sparse.sum(g.adjacency_matrix(transpose=True), dim=(- 1)).to_dense() for g in dataset.train.graph_lists])
net_params['avg_d'] = dict(lin=torch.mean(D), exp=torch.mean((torch.exp(torch.div(1, D)) - 1)), log=torch.mean(torch.log((D + 1))))
net_params['total_param'] = view_model_param(net_params)
train_val_pipeline(dataset, params, net_params) |
def din_model_fn(features, labels, mode, params):
with tf.variable_scope('dense_input'):
dense_input = fc.input_layer(features, params['dense_feature_columns'])
with tf.variable_scope('category_input'):
category_input = fc.input_layer(features, params['category_feature_columns'])
with tf.variable_scope('target_input'):
(target_input, _) = tf.contrib.feature_column.sequence_input_layer(features, params['target_feedid_feature_columns'])
target_input = tf.squeeze(target_input, axis=1)
with tf.variable_scope('his_seq_input'):
(sequnence_input, sequnence_length) = tf.contrib.feature_column.sequence_input_layer(features, params['sequence_feature_columns'])
with tf.variable_scope('attention_part'):
attention_output = din_attention(target_input, sequnence_input, sequnence_length, is_softmax=params['use_softmax'])
concat_all = tf.concat([dense_input, category_input, target_input, attention_output], axis=(- 1))
with tf.variable_scope('fcn'):
net = concat_all
for (i, unit) in enumerate(params['hidden_units']):
layer_index = (i + 1)
net = tf.layers.dense(net, unit, activation=None)
if (params['activation'] == 'dice'):
net = dice(net, name=layer_index)
else:
net = prelu(net, name=layer_index)
if params['batch_norm']:
net = tf.layers.batch_normalization(net, training=(mode == tf.estimator.ModeKeys.TRAIN))
if (('dropout_rate' in params) and (0.0 < params['dropout_rate'] < 1.0)):
net = tf.layers.dropout(net, params['dropout_rate'], training=(mode == tf.estimator.ModeKeys.TRAIN))
logit = tf.layers.dense(net, 1)
prediction = tf.sigmoid(logit, name='prediction')
if (mode == tf.estimator.ModeKeys.PREDICT):
predictions = {'probabilities': prediction}
export_outputs = {'prediction': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs=export_outputs)
y = labels['read_comment']
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logit), name='ce_loss')
if (params['mini_batch_aware_regularization'] and (params['l2_lambda'] > 0)):
embedding_vars = tf.concat([category_input, target_input, attention_output], axis=(- 1))
l2_loss = ((params['l2_lambda'] * tf.nn.l2_loss(embedding_vars)) / tf.cast(tf.shape(embedding_vars)[0], dtype=tf.float32))
loss = tf.add_n([loss, l2_loss])
accuracy = tf.metrics.accuracy(labels=y, predictions=tf.to_float(tf.greater_equal(prediction, 0.5)))
auc = tf.metrics.auc(labels=y, predictions=prediction)
metrics = {'eval_accuracy': accuracy, 'eval_auc': auc}
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'], beta1=0.9, beta2=0.999, epsilon=1e-08)
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
assert (mode == tf.estimator.ModeKeys.TRAIN)
tf.summary.scalar('train_accuracy', accuracy[1])
tf.summary.scalar('train_auc', auc[1])
log_hook = tf.train.LoggingTensorHook({'train_loss': loss, 'train_auc': auc[1], 'attention_weights': attention_output}, every_n_iter=100)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[log_hook]) |
def _convert_example_to_features(example, label_list, max_seq_length, tokenizer):
label_map = {label: i for (i, label) in enumerate(label_list)}
tokens_a = tokenizer.tokenize(example.text_a)
if (len(tokens_a) > (max_seq_length - 2)):
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ((['[CLS]'] + tokens_a) + ['[SEP]'])
segment_ids = ([0] * len(tokens))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
label_id = label_map[example.label]
return (torch.tensor(input_ids), torch.tensor(input_mask), torch.tensor(segment_ids), torch.tensor(label_id)) |
def test_custom_rule(testdir, openapi3_base_url):
testdir.make_test(f'''
from hypothesis.stateful import initialize, rule
schema.base_url = "{openapi3_base_url}"
class APIWorkflow(schema.as_state_machine()):
def validate_response(self, response, case):
pass
(data=st.just("foo"))
def some(self, data):
assert 0
TestStateful = APIWorkflow.TestCase
''')
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.re_match_lines([".*state.some\\(data='foo'\\)"]) |
.parametrize('dtype', [ti.f32, ti.f64])
.parametrize('solver_type', ['LLT', 'LDLT', 'LU'])
.parametrize('ordering', ['AMD', 'COLAMD'])
_utils.test(arch=ti.x64)
def test_sparse_LLT_solver(dtype, solver_type, ordering):
np_dtype = ti.lang.util.to_numpy_type(dtype)
n = 10
A = np.random.rand(n, n)
A_psd = (np.dot(A, A.transpose()) + np.eye(n)).astype(np_dtype)
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100, dtype=dtype)
b = ti.field(dtype=dtype, shape=n)
def fill(Abuilder: ti.types.sparse_matrix_builder(), InputArray: ti.types.ndarray(), b: ti.template()):
for (i, j) in ti.ndrange(n, n):
Abuilder[(i, j)] += InputArray[(i, j)]
for i in range(n):
b[i] = (i + 1)
fill(Abuilder, A_psd, b)
A = Abuilder.build()
solver = ti.linalg.SparseSolver(dtype=dtype, solver_type=solver_type, ordering=ordering)
solver.analyze_pattern(A)
solver.factorize(A)
x = solver.solve(b)
res = np.linalg.solve(A_psd, b.to_numpy())
for i in range(n):
assert (x[i] == test_utils.approx(res[i], rel=1.0)) |
class omegaconf_no_object_check():
def __init__(self):
self.old_is_primitive = _utils.is_primitive_type
def __enter__(self):
_utils.is_primitive_type = (lambda _: True)
def __exit__(self, type, value, traceback):
_utils.is_primitive_type = self.old_is_primitive |
def register_functions_ns3_Config(module, root_module):
module.add_function('Connect', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
module.add_function('ConnectWithoutContext', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
module.add_function('Disconnect', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
module.add_function('DisconnectWithoutContext', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
module.add_function('GetRootNamespaceObject', 'ns3::Ptr< ns3::Object >', [param('uint32_t', 'i')])
module.add_function('GetRootNamespaceObjectN', 'std::size_t', [])
module.add_function('LookupMatches', 'ns3::Config::MatchContainer', [param('std::string', 'path')])
module.add_function('RegisterRootNamespaceObject', 'void', [param('ns3::Ptr< ns3::Object >', 'obj')])
module.add_function('Reset', 'void', [])
module.add_function('Set', 'void', [param('std::string', 'path'), param('ns3::AttributeValue const &', 'value')])
module.add_function('SetDefault', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
module.add_function('SetDefaultFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
module.add_function('SetGlobal', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
module.add_function('SetGlobalFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
module.add_function('UnregisterRootNamespaceObject', 'void', [param('ns3::Ptr< ns3::Object >', 'obj')])
return |
def test_keyword_assert():
N.set(128)
A = np.random.rand(N.get()).astype(np.float32)
B = np.zeros((N.get(),), dtype=np.float32)
C = True
D = True
try:
keyword_assert(A, B, C, D)
except Exception as e:
print(e)
return True
assert np.allclose(A, B) |
_toolkit()
class Terminal(FunctionToolkit):
name_for_human = 'Terminal command executor'
description_for_human = 'Executes commands in a terminal.'
name_for_model = 'Terminal'
description_for_model = "Executes commands in a terminal on the user's local system. Use it to run valid terminal commands for tasks such as file management, system control, and more"
tool_classes = [TerminalExecute] |
def make_file(file_name):
if (not os.path.exists(file_name)):
open(file_name, 'a').close()
return file_name |
class IsProbabilityMatrix(Constraint):
def __call__(self, w):
w *= K.cast(K.greater_equal(w, 0.0), K.floatx())
return (w / (K.epsilon() + K.sum(w, axis=0, keepdims=True))) |
class Plateau(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.12)] * self.N), ([5.12] * self.N)))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 30.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return (30.0 + sum(floor(abs(x)))) |
def structure_description(G, latex=False):
import re
def correct_dihedral_degree(match):
return ('%sD%d' % (match.group(1), (int(match.group(2)) // 2)))
description = str(G._gap_().StructureDescription())
description = re.sub('(\\A|\\W)D(\\d+)', correct_dihedral_degree, description)
if (not latex):
return description
description = description.replace('x', '\\times').replace(':', '\\rtimes')
description = re.sub('([A-Za-z]+)([0-9]+)', '\\g<1>_{\\g<2>}', description)
description = re.sub('O([+-])', 'O^{\\g<1>}', description)
return description |
def check_fuzzer_ready_one(fuzzer):
global ARGS, FUZZERS, TARGET, OUTPUT
ready_path = os.path.join(OUTPUT, TARGET, fuzzer, 'ready')
if (not os.path.exists(ready_path)):
return False
return True |
def destroy_process_group():
global _backend
global _initialized
torch._C._dist_destroy_process_group()
_backend = dist_backend.UNDEFINED
_initialized = 0 |
class Angle():
def __init__(self, va, vb):
self.va = va
self.vb = vb
def theta(self):
theta = math.degrees(math.acos((((self.va.x * self.vb.x) + (self.va.y * self.vb.y)) / (math.hypot(self.va.x, self.va.y) * math.hypot(self.vb.x, self.vb.y)))))
return theta |
.usefixtures('num_cpus', 'io_type')
class StandardTests(BaseTest):
def setup_class(cls):
cls.qbt = None
cls.qbt_type = None
cls.file_str = ''
cls.op1_str = ''
cls.op2_str = ''
cls.param_name = ''
cls.param_list = None
def test_hamiltonian_is_hermitian(self, io_type):
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
hamiltonian = self.qbt.hamiltonian()
assert np.isclose(np.max(np.abs((hamiltonian - hamiltonian.conj().T))), 0.0)
def test_eigenvals(self, io_type):
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
evals_reference = specdata.energy_table
return self.eigenvals(io_type, evals_reference)
def test_eigenvecs(self, io_type):
testname = ((self.file_str + '_2.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
evecs_reference = specdata.state_table
return self.eigenvecs(io_type, evecs_reference)
def test_plot_wavefunction(self, io_type):
if ('plot_wavefunction' not in dir(self.qbt_type)):
pytest.skip('This is expected, no reason for concern.')
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
self.qbt.plot_wavefunction(esys=None, which=5, mode='real')
self.qbt.plot_wavefunction(esys=None, which=9, mode='abs_sqr')
def test_plot_evals_vs_paramvals(self, num_cpus, io_type):
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
return self.plot_evals_vs_paramvals(num_cpus, self.param_name, self.param_list)
def test_get_spectrum_vs_paramvals(self, num_cpus, io_type):
testname = ((self.file_str + '_4.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
self.param_list = specdata.param_vals
evecs_reference = specdata.state_table
evals_reference = specdata.energy_table
return self.get_spectrum_vs_paramvals(num_cpus, io_type, self.param_name, self.param_list, evals_reference, evecs_reference)
def test_matrixelement_table(self, io_type):
testname = ((self.file_str + '_5.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
matelem_reference = specdata.matrixelem_table
return self.matrixelement_table(io_type, self.op1_str, matelem_reference)
def test_plot_matrixelements(self, io_type):
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
self.plot_matrixelements(self.op1_str, evals_count=10)
def test_print_matrixelements(self, io_type):
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
self.print_matrixelements(self.op2_str)
def test_plot_matelem_vs_paramvals(self, num_cpus, io_type):
testname = ((self.file_str + '_1.') + io_type)
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
self.plot_matelem_vs_paramvals(num_cpus, self.op1_str, self.param_name, self.param_list, select_elems=[(0, 0), (1, 4), (1, 0)])
def test_plot_potential(self, io_type):
if ('plot_potential' not in dir(self.qbt_type)):
pytest.skip('This is expected, no reason for concern.')
testname = (self.file_str + '_1.hdf5')
specdata = SpectrumData.create_from_file((DATADIR + testname))
self.qbt = self.qbt_type(**specdata.system_params)
self.qbt.plot_potential() |
class Few_Shot_CLI(LightningCLI):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
parser.add_argument('is_test', type=bool, default=False, help='whether in testing only mode')
parser.add_argument('model_name', type=str, default='PN', help='The model name to train on. It should match the file name that contains the model.')
parser.add_argument('load_pretrained', type=bool, default=False, help='whether load pretrained model. This is is different from resume_from_checkpoint that loads everything from a breakpoint.')
parser.add_argument('pre_trained_path', type=str, default='', help='The path of pretrained model. For testing only.')
parser.add_argument('load_backbone_only', type=bool, default=False, help='whether only load the backbone.')
parser.add_argument('num_test', type=int, default=5, help='The number of processes of implementing testing.\\\n The average accuracy and 95% confidence interval across\\\n all repeated processes will be calculated.')
parser.add_argument('seed', type=int, default=5, help='The seed of training and testing.')
def parse_arguments(self) -> None:
self.config = self.parser.parse_args(_skip_check=True)
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
log_dir = (trainer.log_dir or trainer.default_root_dir)
config_path = os.path.join(log_dir, self.config_filename)
self.parser.save(self.config, config_path, skip_none=False, skip_check=True)
def before_instantiate_classes(self) -> None:
self.model_class = get_module(self.config['model_name'])
def before_fit(self):
if self.config['load_pretrained']:
if ('clip' in self.config['model']['backbone_name']):
if (self.config['pre_trained_path'] == 'None'):
pass
else:
zs_clip = deepcopy(self.model)
state = torch.load(self.config['pre_trained_path'])['state_dict']
if self.config['load_backbone_only']:
state = utils.preserve_key(state, 'backbone')
self.model.backbone.load_state_dict(state)
else:
self.model.load_state_dict(state)
self.model = wise_merge(zs_clip, self.model)
del zs_clip
else:
state = torch.load(self.config['pre_trained_path'])['state_dict']
if self.config['load_backbone_only']:
state = utils.preserve_key(state, 'backbone')
self.model.backbone.load_state_dict(state)
else:
self.model.load_state_dict(state)
def fit(self):
if self.config['is_test']:
pass
else:
self.trainer.fit(**self.fit_kwargs)
def after_fit(self):
seed_everything(self.config['seed'])
if self.config['is_test']:
acc_list = []
for _ in range(self.config['num_test']):
result = self.trainer.test(self.model, datamodule=self.datamodule)
acc_list.append((result[0]['test/acc'] * 100))
acc_list = np.array(acc_list)
mean = np.mean(acc_list)
confidence_interval = (np.std(acc_list) * 1.96)
with open(os.path.join(self.trainer.log_dir, 'test_result.json'), 'w') as f:
json.dump({'mean': mean, 'confidence interval': confidence_interval}, f)
else:
pass |
class VizWizEvalDataset(VQAEvalDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
if ('val' in ann['image']):
image_path = os.path.join(self.vis_root.replace('images', 'val'), ann['image'])
else:
image_path = os.path.join(self.vis_root.replace('images', 'test'), ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image)
question = self.text_processor(ann['question'])
if ('answers' in ann):
num_annotators = len(ann['answers'])
answers = [item['answer'] for item in ann['answers']]
answer_counts = Counter(answers)
answers = list(set(answers))
weights = [(answer_counts[ans] / num_annotators) for ans in answers]
else:
return {'image': image, 'question_id': ann['image'], 'instance_id': ann['instance_id'], 'text_input': question}
return {'image': image, 'text_input': question, 'instance_id': ann['instance_id'], 'question_id': ann['instance_id'], 'weights': weights, 'answer': answers} |
class OSBlockINv3(nn.Module):
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
super(OSBlockINv3, self).__init__()
assert (T >= 1)
assert ((out_channels >= reduction) and ((out_channels % reduction) == 0))
mid_channels = (out_channels // reduction)
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, (T + 1)):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
self.downsample = None
if (in_channels != out_channels):
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN_in = nn.InstanceNorm2d(out_channels, affine=NORM_AFFINE)
self.IN_out = nn.InstanceNorm2d(out_channels, affine=NORM_AFFINE)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = (x2 + self.gate(x2_t))
x3 = self.conv3(x2)
x3 = self.IN_in(x3)
if (self.downsample is not None):
identity = self.downsample(identity)
out = (x3 + identity)
out = self.IN_out(out)
return F.relu(out) |
def test_Unions_enum_null():
filename = os.path.join(SAMPLES_DIR, 'enum_null_test_data.avro')
data = ['TWO', None, 'ONE', None, 'FOUR', None, 'THREE']
assert (ak.from_avro_file(file=filename).to_list() == data) |
class MobileBertForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def main():
total_count = 0
greedy_succ = 0
for index in range(7):
with open((((('./attack_mhm_' + str((index * 400))) + '_') + str(((index + 1) * 400))) + '.csv')) as rf:
reader = csv.DictReader(rf)
for row in reader:
total_count += int(row['Query Times'])
print(total_count) |
class TranslateY(DauphinTransform):
def __init__(self, name=None, prob=1.0, level=0, max_degree=10):
self.max_degree = max_degree
self.value_range = (0, self.max_degree)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, 'float')
if (random.random() > 0.5):
degree = (- degree)
return (pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, 0, 1, degree)), label)
def __repr__(self):
return f'<Transform ({self.name}), prob={self.prob}, level={self.level}, max_degree={self.max_degree}>' |
class Preprocesser(object):
def __init__(self, opt):
self.opt = opt
def read_unimorph_data(self, file):
raise NotImplementedError
def read_data(self, file):
raise NotImplementedError
def match_edit_script(self, short_script, long_script):
raise NotImplementedError
def find_substr(self, unimorph_ed, past_ed):
opt = self.opt
clean_lemma = list()
for word in unimorph_ed:
if (len(word) < opt.nchar):
clean_lemma.append(word)
continue
prefix_lst = []
suffix_lst = []
for i in range(opt.nchar, len(word)):
(prefix, suffix) = (word[:i], word[i:])
if (opt.prefix and (len(prefix) >= opt.nchar)):
if ((prefix in unimorph_ed) and self.match_edit_script(unimorph_ed[prefix], unimorph_ed[word])):
prefix_lst.append(prefix)
if (opt.suffix and (len(suffix) >= opt.nchar)):
if ((suffix in unimorph_ed) and self.match_edit_script(unimorph_ed[suffix], unimorph_ed[word])):
suffix_lst.append(suffix)
if (prefix_lst or suffix_lst):
pass
else:
clean_lemma.append(word)
clean_lemma_set = set(clean_lemma)
for word in past_ed:
if (len(word) < opt.nchar):
clean_lemma.append(word)
continue
for i in range(opt.nchar, len(word)):
(prefix, suffix) = (word[:i], word[i:])
if (opt.prefix and (len(prefix) >= opt.nchar)):
if ((prefix in clean_lemma_set) and self.match_edit_script(unimorph_ed[prefix], past_ed[word])):
clean_lemma_set.remove(prefix)
if (opt.suffix and (len(suffix) >= opt.nchar)):
if ((suffix in clean_lemma_set) and self.match_edit_script(unimorph_ed[suffix], past_ed[word])):
clean_lemma_set.remove(suffix)
final_lemma = []
for lemma in clean_lemma_set:
if (lemma not in past_ed):
final_lemma.append(lemma)
return final_lemma
def run(self):
opt = self.opt
(unimorph_data, unimorph_edit_script) = self.read_unimorph_data(opt.unimorph)
(past_data, past_edit_script) = self.read_data(opt.past)
lemma = self.find_substr(unimorph_edit_script, past_edit_script)
if opt.outdir:
maybe_mkdir(opt.outdir)
lemma = sorted(lemma)
lemma = np.random.RandomState(opt.seed).permutation(lemma)
split = int((opt.split * len(lemma)))
dev = lemma[:split]
train = lemma[split:]
test = list(past_data.keys())
with open(f'{opt.outdir}.train', 'w', encoding='utf-8') as fp:
for lemma in train:
fp.writelines(unimorph_data[lemma])
with open(f'{opt.outdir}.dev', 'w', encoding='utf-8') as fp:
for lemma in dev:
fp.writelines(unimorph_data[lemma])
with open(f'{opt.outdir}.test', 'w', encoding='utf-8') as fp:
for lemma in test:
fp.writelines(past_data[lemma]) |
def get_CTranS_config():
config = ml_collections.ConfigDict()
config.transformer = ml_collections.ConfigDict()
config.KV_size = 512
config.KV_sizec = 512
config.transformer.num_heads = 4
config.transformer.num_layers = 4
config.expand_ratio = 4
config.transformer.embeddings_dropout_rate = 0.1
config.transformer.attention_dropout_rate = 0.1
config.transformer.dropout_rate = 0
config.patch_sizes = [16, 8, 4, 2, 1]
config.base_channel = 64
config.n_classes = 1
return config |
def simulator(theta, X0=30, Y0=1, T=20, subsample=10, flatten=True, obs_noise=0.1, rng=None):
if (rng is None):
rng = np.random.default_rng()
x0 = (X0, Y0)
(alpha, beta, gamma, delta) = theta
t_vec = np.linspace(0, T, T)
pp = odeint(_deriv, x0, t_vec, args=(alpha, beta, gamma, delta))
if (subsample is not None):
pp = pp[::(T // subsample)]
pp[(pp < 0)] = 0.0
x = rng.lognormal(np.log1p(pp), sigma=obs_noise)
if flatten:
return x.flatten()
return x |
def grep(filepath, query):
lines = []
with open(filepath, 'r') as f:
for line in f:
if (query in line):
lines.append(line.rstrip())
return lines |
class ConvDecoder(tf.keras.Model):
def __init__(self, units_full=128, init_size=16, num_filters=[64, 32, 16, 8], deconvlay_config=dict(kernel_size=4, strides=2, padding='SAME', activation='relu', kernel_initializer='he_normal'), actlay_config=dict(activation='relu', kernel_initializer='he_normal'), add_init_fin=True, **kwargs):
super().__init__(**kwargs)
self.units_full = units_full
self.init_size = init_size
self.num_filters = num_filters
self.dense_layers = [tf.keras.layers.Dense(units_full, **actlay_config), tf.keras.layers.Dense((init_size * num_filters[0]), **actlay_config)]
self.deconv_layers = []
for filters in num_filters[1:]:
self.deconv_layers.append(Conv1DTranspose(filters=filters, **deconvlay_config))
last_config = copy.deepcopy(deconvlay_config)
last_config['strides'] = 1
last_config['activation'] = None
self.deconv_layers.append(Conv1DTranspose(filters=1, **last_config))
self.deconv_layers.append(tf.keras.layers.Flatten())
self.add_init_fin = add_init_fin
def call(self, input_tensor):
x = input_tensor
for layer in self.dense_layers:
x = layer(x)
x = tf.reshape(x, shape=((- 1), self.init_size, self.num_filters[0]))
for layer in self.deconv_layers:
x = layer(x)
if self.add_init_fin:
x += input_tensor
return x
def get_config(self):
base_config = super().get_config()
return {**base_config, 'init_size': self.init_size, 'units_full': self.units_full, 'num_filters': self.num_filters, 'deconvlay_config': self.deconvlay_config, 'actlay_config': self.actlay_config, 'layers': self.layers, 'add_init_fin': self.add_init_fin} |
def require_running_program(function):
(function)
def wrapper(*args, **kwargs):
try:
gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError('No frame is currently selected.')
return function(*args, **kwargs)
return wrapper |
def crappyhist(a, bins=20, width=30, range_=(0, 1)):
(h, b) = numpy.histogram(a, bins)
for i in range(0, bins):
print('{:12.5f} | {:{width}s} {}'.format(b[i], ('#' * int(((width * h[i]) / numpy.amax(h)))), h[i], width=width))
print('{:12.5f} |'.format(b[bins])) |
_utils.test(debug=True, advanced_optimization=False, require=ti.extension.data64, exclude=[ti.vulkan, ti.metal, ti.opengl, ti.gles])
def test_ipow_negative_exp_i64():
_ipow_negative_exp(ti.i64) |
def get_optimizer(name, params):
if (name == 'SGD'):
return partial(torch.optim.SGD, lr=params['lr'], momentum=params['momentum'], weight_decay=params['weight_decay'])
elif (name == 'Adam'):
return partial(torch.optim.Adam, lr=params['lr'], betas=tuple(params['betas']), weight_decay=params['weight_decay'])
elif (name == 'AdamW'):
return partial(torch.optim.AdamW, lr=params['lr'], betas=tuple(params['betas']), weight_decay=params['weight_decay']) |
class TimeSeriesTransformerForPrediction(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class RoIDataLayer(object):
def __init__(self, roidb, num_classes):
self._roidb = roidb
self._num_classes = num_classes
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
if cfg.TRAIN.HAS_RPN:
if ((self._cur + cfg.TRAIN.IMS_PER_BATCH) >= len(self._roidb)):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:(self._cur + cfg.TRAIN.IMS_PER_BATCH)]
self._cur += cfg.TRAIN.IMS_PER_BATCH
else:
db_inds = np.zeros(cfg.TRAIN.IMS_PER_BATCH, dtype=np.int32)
i = 0
while (i < cfg.TRAIN.IMS_PER_BATCH):
ind = self._perm[self._cur]
num_objs = self._roidb[ind]['boxes'].shape[0]
if (num_objs != 0):
db_inds[i] = ind
i += 1
self._cur += 1
if (self._cur >= len(self._roidb)):
self._shuffle_roidb_inds()
return db_inds
def _get_next_minibatch(self):
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def forward(self):
blobs = self._get_next_minibatch()
return blobs |
def fpInfinity(s, negative):
_z3_assert(isinstance(s, FPSortRef), 'sort mismatch')
_z3_assert(isinstance(negative, bool), 'expected Boolean flag')
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, negative), s.ctx) |
def sobel(image, mask=None, *, axis=None, mode='reflect', cval=0.0):
output = _generic_edge_filter(image, smooth_weights=SOBEL_SMOOTH, axis=axis, mode=mode, cval=cval)
output = _mask_filter_result(output, mask)
return output |
def test_allknn_fit_resample_mode():
allknn = AllKNN(kind_sel='mode')
(X_resampled, y_resampled) = allknn.fit_resample(X, Y)
X_gt = np.array([[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), 0.], [(- 0.), 0.], [1., 0.], [1., 0.], [(- 0.), 0.], [(- 1.), 0.], [0., 0.], [(- 0.), 0.], [0., 0.498805], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [1., (- 0.)], [0., (- 0.)], [0., (- 1.)], [0., (- 0.)], [0.2096964, (- 0.)], [1., (- 0.)], [0., (- 0.)], [0., (- 0.)], [1., (- 0.)], [1.0304995, (- 0.)], [0., (- 1.)], [(- 0.), (- 0.)], [0., (- 1.)], [0., (- 0.)], [0.1732627, (- 1.)]])
y_gt = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt) |
def main(args):
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
print('tr: {}, va: {}'.format(args.training_samples, args.validation_samples))
print('ds: {}, ln: {}'.format(args.dataset, args.label_noise))
ngm_string = '{:f}'.format(args.negative_gaussian_mean)
K = 2
(train_loader, vali_loader, test_loader, train_cl_loader, vali_cl_loader, test_cl_loader) = get_dataloader(args)
model = get_model(args, K)
device = torch.device(('cuda:{}'.format(args.gpu_id) if torch.cuda.is_available() else 'cpu'))
model = model.to(device)
optimizer = get_optimizer(args, model)
save_table = np.zeros(shape=(args.epochs, 11))
ce_mean = nn.CrossEntropyLoss(reduction='mean')
for param in optimizer.param_groups:
current_lr = param['lr']
(mlflow.set_tags(dict([kw.split(':') for kw in args.tags.split(',')])) if (len(args.tags) > 0) else None)
mlflow.set_tags({l: True for l in args.labels.split(',')})
for epoch in range((- 1), args.epochs):
(flooded_count, mini_batch_count) = (0, 0)
model.train()
if (epoch != (- 1)):
for (images, labels, _) in train_loader:
(images, labels) = (images.to(device), labels.to(device))
outputs = model(images)
loss_mean = ce_mean(outputs, labels)
if (args.flood_level > 0):
loss_corrected = ((loss_mean - args.flood_level).abs() + args.flood_level)
else:
loss_corrected = loss_mean
if (loss_corrected != loss_mean):
flooded_count += 1
mini_batch_count += 1
optimizer.zero_grad()
loss_corrected.backward()
if (args.gradient_norm > 0):
clip_grad_norm_(model.parameters(), args.gradient_norm)
optimizer.step()
for param in optimizer.param_groups:
current_lr = param['lr']
(tr_acc, tr_loss, va_acc, va_loss, te_acc, te_loss) = get_acc_loss(train_loader, vali_loader, test_loader, model, device)
(tr_cl_acc, tr_cl_loss, va_cl_acc, va_cl_loss, te_cl_acc, te_cl_loss) = get_acc_loss(train_cl_loader, vali_cl_loader, test_cl_loader, model, device)
proportion = ((float(flooded_count) / mini_batch_count) if (mini_batch_count != 0) else 0)
print('flood: {} rs: {}'.format(args.flood_level, args.random_seed))
print('Epoch: {} LR: {} TrLss: {:.4g} VaLss: {:.4g} TeLss: {:.4g} TrAcc: {:.3g} VaAcc: {:.3g} TeAcc: {:.3g}'.format((epoch + 1), current_lr, tr_loss, va_loss, te_loss, tr_acc, va_acc, te_acc))
print('TrClLss: {:.4g} VaClLss: {:.4g} TeClLss: {:.4g} TrClAcc: {:.3g} VaClAcc: {:.3g} TeClAcc: {:.3g}'.format(tr_cl_loss, va_cl_loss, te_cl_loss, tr_cl_acc, va_cl_acc, te_cl_acc))
print('Flood prop: {:.4g}'.format(proportion))
mlflow.log_metrics(step=(epoch + 1), metrics={'currentLr': current_lr, 'trLss': tr_loss, 'vaLss': va_loss, 'teLss': te_loss, 'trAcc': tr_acc, 'vaAcc': va_acc, 'teAcc': te_acc, 'trclLss': tr_cl_loss, 'vaclLss': va_cl_loss, 'teclLss': te_cl_loss, 'trclAcc': tr_cl_acc, 'vaclAcc': va_cl_acc, 'teclAcc': te_cl_acc, 'floodProp': proportion}) |
_grad()
def evaluate(model, data_loader, tokenizer, device, config, info='None'):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = f'{info} Evaluation:'
print_freq = 50
for (image0, image1, text, targets) in metric_logger.log_every(data_loader, print_freq, header):
(image0, image1, targets) = (image0.to(device), image1.to(device), targets.to(device))
text_inputs = tokenizer(text, padding='longest', return_tensors='pt').to(device)
prediction = model(image0, image1, text_inputs, targets=targets, train=False)
(_, pred_class) = prediction.max(1)
accuracy = ((targets == pred_class).sum() / targets.size(0))
metric_logger.meters['acc'].update(accuracy.item(), n=image0.size(0))
metric_logger.synchronize_between_processes()
print(f'{info} Averaged stats:', metric_logger.global_avg())
return {k: '{:.4f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()} |
def identity_block(input_tensor, kernel_size, filters, stage, block):
(filters1, filters2, filters3) = filters
if (K.image_data_format() == 'channels_last'):
bn_axis = 3
else:
bn_axis = 1
conv_name_base = ((('res' + str(stage)) + block) + '_branch')
bn_name_base = ((('bn' + str(stage)) + block) + '_branch')
x = Conv2D(filters1, (1, 1), name=(conv_name_base + '2a'))(input_tensor)
x = BatchNormalization(axis=bn_axis, name=(bn_name_base + '2a'))(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', name=(conv_name_base + '2b'))(x)
x = BatchNormalization(axis=bn_axis, name=(bn_name_base + '2b'))(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=(conv_name_base + '2c'))(x)
x = BatchNormalization(axis=bn_axis, name=(bn_name_base + '2c'))(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'}) |
def test_set_max_len():
nlp = stanza.Pipeline(**{'processors': 'tokenize', 'dir': TEST_MODELS_DIR, 'lang': 'en', 'download_method': None, 'tokenize_max_seqlen': 20})
doc = nlp('This is a doc withaverylongtokenthatshouldbereplaced')
assert (len(doc.sentences) == 1)
assert (len(doc.sentences[0].words) == 5)
assert (doc.sentences[0].words[(- 1)].text == tokenize_processor.TOKEN_TOO_LONG_REPLACEMENT) |
def make_plots(statistics_file):
print('\n Make Plots')
with open(statistics_file, 'r') as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
FILETYPE = 'eps'
numRows = len(configX)
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ['ssim', 'lpips']
numCols = len(statTags)
fourier = fourierX[0][0]
(fig, axs) = plt.subplots(numRows, numCols, squeeze=False, sharex=True, figsize=(7, (1 + (2 * numRows))))
legend_handles = []
legend_names = []
for row in range(numRows):
local_stat = stats[row]
axs[(row, 0)].set_ylabel(configX[row][0])
for (col, (name, tag)) in enumerate(zip(statNames, statTags)):
ax = axs[(row, col)]
if (row == 0):
ax.set_title(name)
(X, Xlabel) = (None, None)
for (ni, (network_label, network_channels, network_layers)) in enumerate(networkX):
X = []
Xlabel = []
Y = []
err = []
for (i, (impN, impH, impV)) in enumerate(importanceX[:(- numExtraImportance)]):
filename = (FILENAME_PATTERN % (configX[row][0], network_label, fourier, impN))
(y, e) = local_stat[filename][tag]
X.append((i + (0.02 * ni)))
Xlabel.append(impH)
Y.append(y)
err.append(e)
h = ax.errorbar(X, Y, yerr=err, fmt='.-')
for (i, (impN, impH, impV)) in enumerate(importanceX[(- numExtraImportance):]):
filename = (FILENAME_PATTERN % (configX[row][0], network_label, fourier, impN))
(y, e) = local_stat[filename][tag]
X.append((X[(- 1)] + 1))
Xlabel.append(impH)
Y.append(y)
err.append(e)
ax.errorbar(X[(- 1):], Y[(- 1):], yerr=err[(- 1):], color=h[0].get_color(), fmt='.')
if ((row == 0) and (col == 0)):
legend_handles.append(h)
legend_names.append(f'{network_channels} channels, {network_layers} layers')
ax.set_xticks(X)
ticks = ax.set_xticklabels(Xlabel)
for tick in ticks:
tick.set_rotation(45)
ax.set_xlabel('Fourier std')
tag = 'lpips'
worst_lpips = 0
worst_filename = None
best_lpips = 1
best_filename = None
for (network_label, network_channels, network_layers) in networkX:
for (i, (impN, impH, impV)) in enumerate(importanceX[:(- numExtraImportance)]):
filename = (FILENAME_PATTERN % (configX[row][0], network_label, fourier, impN))
(y, _) = local_stat[filename][tag]
if (y < best_lpips):
best_lpips = y
best_filename = filename
if (y > worst_lpips):
worst_lpips = y
worst_filename = filename
density_filenames = [(FILENAME_PATTERN % (configX[row][0], network_label, fourier, importanceX[(- 2)][0])) for (network_label, network_channels, network_layers) in networkX]
density_filename = min(density_filenames, key=(lambda filename: local_stat[filename][tag][0]))
color_filenames = [(FILENAME_PATTERN % (configX[row][0], network_label, fourier, importanceX[(- 1)][0])) for (network_label, network_channels, network_layers) in networkX]
color_filename = min(color_filenames, key=(lambda filename: local_stat[filename][tag][0]))
shutil.copyfile(os.path.join(output_folder, ('images_%s/reference/reference000.png' % configX[row][0])), os.path.join(output_folder, ('%s_reference.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], best_filename))), os.path.join(output_folder, ('%s_best.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], worst_filename))), os.path.join(output_folder, ('%s_worst.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], worst_filename))), os.path.join(output_folder, ('%s_worst.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], density_filename))), os.path.join(output_folder, ('%s_density.png' % configX[row][0])))
shutil.copyfile(os.path.join(output_folder, ('images_%s/%s/img000.png' % (configX[row][0], color_filename))), os.path.join(output_folder, ('%s_color.png' % configX[row][0])))
lgd = fig.legend(legend_handles, legend_names, loc='upper center', bbox_to_anchor=(0.5, 0.05), ncol=len(legend_handles))
fig.savefig(os.path.join(output_folder, ('Importance-SSIM.%s' % FILETYPE)), bbox_inches='tight', bbox_extra_artists=(lgd,))
print('Done')
plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.