code stringlengths 101 5.91M |
|---|
class Similarity():
def __init__(self, model_name_or_path='shibing624/text2vec-base-chinese', similarity_type=SimilarityType.COSINE, embedding_type=EmbeddingType.BERT, encoder_type=EncoderType.MEAN, max_seq_length=256):
if (embedding_type not in [EmbeddingType.BERT, EmbeddingType.WORD2VEC]):
logger.warning('embedding_type set error, use default bert')
embedding_type = EmbeddingType.BERT
if (similarity_type not in [SimilarityType.COSINE, SimilarityType.WMD]):
logger.warning('similarity_type set error, use default cosine')
similarity_type = SimilarityType.COSINE
if ((similarity_type == SimilarityType.WMD) and (embedding_type != EmbeddingType.WORD2VEC)):
logger.warning('If use wmd sim type, emb type must be w2v')
embedding_type = EmbeddingType.WORD2VEC
self.similarity_type = similarity_type
self.embedding_type = embedding_type
self.encoder_type = encoder_type
self.jieba_tokenizer = JiebaTokenizer()
if (self.embedding_type == EmbeddingType.WORD2VEC):
try:
self.model = Word2Vec(model_name_or_path)
except ValueError as e:
logger.error(f'{model_name_or_path} is not Word2Vec model')
raise ValueError(e)
elif (self.embedding_type == EmbeddingType.BERT):
try:
self.model = SentenceModel(model_name_or_path, encoder_type=self.encoder_type, max_seq_length=max_seq_length)
except ValueError as e:
logger.error(f'{model_name_or_path} is not Bert model')
raise ValueError(e)
else:
raise ValueError('embedding_type error')
def __str__(self):
return f'<Similarity> model : {self.model}, similarity_type: {self.similarity_type}, embedding_type: {self.embedding_type}'
def get_score(self, sentence1: str, sentence2: str) -> float:
res = 0.0
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
if ((not sentence1) or (not sentence2)):
return res
if (self.similarity_type == SimilarityType.COSINE):
emb1 = self.model.encode(sentence1)
emb2 = self.model.encode(sentence2)
res = (cos_sim(emb1, emb2)[0] if (self.embedding_type == EmbeddingType.BERT) else cosine_distance(emb1, emb2))
res = float(res)
elif (self.similarity_type == SimilarityType.WMD):
token1 = self.jieba_tokenizer.tokenize(sentence1)
token2 = self.jieba_tokenizer.tokenize(sentence2)
res = (1.0 / (1.0 + self.model.w2v.wmdistance(token1, token2)))
return res
def get_scores(self, sentences1: List[str], sentences2: List[str], only_aligned: bool=False) -> ndarray:
if ((not sentences1) or (not sentences2)):
return np.array([])
if (only_aligned and (len(sentences1) != len(sentences2))):
logger.warning('Sentences size not equal, auto set is_aligned=False')
only_aligned = False
embs1 = self.model.encode(sentences1)
embs2 = self.model.encode(sentences2)
if (self.embedding_type == EmbeddingType.BERT):
scores = cos_sim(embs1, embs2).numpy()
else:
scores = np.zeros((len(sentences1), len(sentences2)), dtype=np.float32)
if only_aligned:
for (i, e) in enumerate(zip(embs1, embs2)):
scores[i][i] = cosine_distance(e[0], e[1])
else:
for (i, e1) in enumerate(embs1):
for (j, e2) in enumerate(embs2):
scores[i][j] = cosine_distance(e1, e2)
return scores |
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed((seed + 100))
avg_reward = 0.0
for _ in range(eval_episodes):
(state, done) = (eval_env.reset(), False)
while (not done):
action = eval_env.action_space.sample()
(state, reward, done, _) = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print('')
print(f'Evaluation over {eval_episodes} episodes: {avg_reward:.3f}')
print('')
return avg_reward |
def generate_types(package_name: str, file_name: str, values_indices: T.Mapping[(str, T.Dict[(str, IndexEntry)])], use_eigen_types: bool, shared_types: T.Mapping[(str, str)]=None, scalar_type: str='double', output_dir: T.Openable=None, lcm_bindings_output_dir: T.Openable=None, templates: template_util.TemplateList=None) -> TypesCodegenData:
if (output_dir is None):
output_dir = Path(tempfile.mkdtemp(prefix=f'sf_codegen_types_{package_name}_', dir='/tmp'))
logger.debug(f'Creating temp directory: {output_dir}')
elif (not isinstance(output_dir, Path)):
output_dir = Path(output_dir)
lcm_type_dir = (output_dir / 'lcmtypes')
using_external_templates = True
if (templates is None):
templates = template_util.TemplateList()
using_external_templates = False
types_dict = build_types_dict(package_name=package_name, values_indices=values_indices, shared_types=shared_types)
data = {'T': T, 'Values': Values, 'list': list, 'tuple': tuple, 'issubclass': issubclass, 'name': package_name, 'scalar_type': scalar_type, 'types_dict': types_dict, 'to_set': set, 'DataBuffer': sf.DataBuffer}
types_util = {'np.prod': np.prod}
types_to_generate = []
for typename in types_dict:
if ('.' in typename):
continue
types_to_generate.append(typename)
lcm_files = []
if (len(types_to_generate) > 0):
logger.debug(f'Creating LCM type at: "{lcm_type_dir}"')
lcm_file_name = f'{file_name}.lcm'
lcm_files.append(lcm_file_name)
templates.add(template_path='types.lcm.jinja', data=dict(data, types_to_generate=types_to_generate, types_util=types_util, use_eigen_types=use_eigen_types), config=RenderTemplateConfig(), template_dir=template_util.LCM_TEMPLATE_DIR, output_path=(lcm_type_dir / lcm_file_name))
typenames_dict = {}
namespaces_dict = {}
for name in values_indices.keys():
if ((shared_types is not None) and (name in shared_types)):
typenames_dict[name] = shared_types[name].split('.')[(- 1)]
if ('.' in shared_types[name]):
namespaces_dict[name] = shared_types[name].split('.')[0]
else:
namespaces_dict[name] = package_name
else:
typenames_dict[name] = f'{name}_t'
namespaces_dict[name] = package_name
for (typename, data) in types_dict.items():
unformatted_typenames = T.cast(T.List[str], data['unformatted_typenames'])
for unformatted_typename in unformatted_typenames:
name = unformatted_typename.split('.')[(- 1)]
if ((shared_types is not None) and (name in shared_types)):
name = shared_types[name]
if ('.' in name):
typenames_dict[name] = name.split('.')[(- 1)]
namespaces_dict[name] = name.split('.')[0]
else:
typenames_dict[name] = f'{name}_t'
namespaces_dict[name] = package_name
codegen_data = TypesCodegenData(package_name=package_name, values_indices=values_indices, shared_types=shared_types, scalar_type=scalar_type, output_dir=output_dir, lcm_type_dir=lcm_type_dir, lcm_bindings_output_dir=lcm_bindings_output_dir, lcm_files=lcm_files, types_dict=types_dict, typenames_dict=typenames_dict, namespaces_dict=namespaces_dict)
if (not using_external_templates):
templates.render()
codegen_data.lcm_bindings_dirs = codegen_util.generate_lcm_types(lcm_type_dir, lcm_files, lcm_bindings_output_dir)
return codegen_data |
def _mul_fateman_mul(f, g):
f = f.change_ring(QQ)
g = g.change_ring(QQ)
f_list = f.list()
g_list = g.list()
fgcd = f_list[0].content(f_list)
ggcd = g_list[0].content(g_list)
z_poly_f = (f * fgcd.denominator()).change_ring(ZZ)
z_poly_g = (g * ggcd.denominator()).change_ring(ZZ)
div = (1 / (fgcd.denominator() * ggcd.denominator()))
z_poly_f_list = z_poly_f.coefficients(sparse=False)
z_poly_g_list = z_poly_g.coefficients(sparse=False)
padding = _mul_fateman_to_int2(z_poly_f_list, z_poly_g_list)
n_f = z_poly_f((1 << padding))
n_g = z_poly_g((1 << padding))
if (div == 1):
return _mul_fateman_to_poly((n_f * n_g), padding)
else:
l = _mul_fateman_to_poly((n_f * n_g), padding)
return [QQ((i * div)) for i in l] |
class MultigraphDecoder():
def __init__(self, multigraph_creator):
self.coref_multigraph_creator = multigraph_creator
def decode(self, corpus):
for doc in corpus:
for mention in doc.system_mentions:
mention.attributes['set_id'] = None
self.decode_for_one_document(doc.system_mentions[1:])
def decode_for_one_document(self, mentions):
multigraph = self.coref_multigraph_creator.construct_graph_from_mentions(mentions)
for mention in mentions:
antecedent = self.compute_antecedent(mention, multigraph)
if (antecedent is not None):
if (antecedent.attributes['set_id'] is None):
antecedent.attributes['set_id'] = mentions.index(antecedent)
mention.attributes['set_id'] = antecedent.attributes['set_id']
mention.document.antecedent_decisions[mention.span] = antecedent.span
def compute_antecedent(mention, multigraph):
weights = []
for antecedent in multigraph.edges[mention]:
if (not multigraph.edges[mention][antecedent]['negative_relations']):
weights.append((multigraph.get_weight(mention, antecedent), antecedent))
if ((len(weights) > 0) and (sorted(weights)[(- 1)][0] > 0)):
return sorted(weights)[(- 1)][1] |
class KirillovReshetikhinTableaux(CrystalOfWords):
def __classcall_private__(cls, cartan_type, r, s):
ct = CartanType(cartan_type)
if (not ct.is_affine()):
raise ValueError('The Cartan type must be affine')
typ = ct.type()
if ct.is_untwisted_affine():
if (typ == 'A'):
return KRTableauxRectangle(ct, r, s)
if (typ == 'B'):
if (r == ct.classical().rank()):
return KRTableauxBn(ct, r, s)
return KRTableauxTypeVertical(ct, r, s)
if (typ == 'C'):
if (r == ct.classical().rank()):
return KRTableauxRectangle(ct, r, s)
return KRTableauxTypeHorizonal(ct, r, s)
if (typ == 'D'):
if ((r == ct.classical().rank()) or (r == (ct.classical().rank() - 1))):
return KRTableauxSpin(ct, r, s)
return KRTableauxTypeVertical(ct, r, s)
if (typ == 'E'):
return KRTableauxTypeFromRC(ct, r, s)
else:
if (typ == 'BC'):
return KRTableauxTypeBox(ct, r, s)
typ = ct.dual().type()
if (typ == 'BC'):
return KRTableauxTypeHorizonal(ct, r, s)
if (typ == 'B'):
return KRTableauxTypeVertical(ct, r, s)
if (typ == 'C'):
if (r == ct.dual().classical().rank()):
return KRTableauxDTwistedSpin(ct, r, s)
return KRTableauxTypeBox(ct, r, s)
if (typ == 'G'):
if (r == 1):
return KRTableauxTypeBox(ct, r, s)
return KRTableauxTypeFromRC(ct, r, s)
raise NotImplementedError
def __init__(self, cartan_type, r, s):
self._r = r
self._s = s
self._cartan_type = cartan_type
Parent.__init__(self, category=KirillovReshetikhinCrystals())
self.letters = CrystalOfLetters(cartan_type.classical())
self.module_generators = self._build_module_generators()
def _repr_(self):
return 'Kirillov-Reshetikhin tableaux of type {} and shape ({}, {})'.format(self._cartan_type, self._r, self._s)
def __iter__(self):
index_set = self._cartan_type.classical().index_set()
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet
return RecursivelyEnumeratedSet(self.module_generators, (lambda x: [x.f(i) for i in index_set]), structure='graded').breadth_first_search_iterator()
def module_generator(self, i=None, **options):
if (i is not None):
return self.module_generators[i]
n = self._cartan_type.classical().rank()
if ('shape' in options):
shape = list(options['shape'])
if (len(shape) < n):
shape.extend(([0] * (n - len(shape))))
for mg in self.module_generators:
if (list(mg.classical_weight().to_vector()) == shape):
return mg
return None
if ('column_shape' in options):
shape = list(Partition(options['column_shape']).conjugate())
if (len(shape) < n):
shape.extend(([0] * (n - len(shape))))
for mg in self.module_generators:
if (list(mg.classical_weight().to_vector()) == shape):
return mg
return None
if ('weight' in options):
wt = options['weight']
for mg in self.module_generators:
if (mg.weight() == wt):
return mg
return None
if ('classical_weight' in options):
wt = options['classical_weight']
for mg in self.module_generators:
if (mg.classical_weight() == wt):
return mg
return None
R = self.weight_lattice_realization()
Lambda = R.fundamental_weights()
r = self.r()
s = self.s()
weight = ((s * Lambda[r]) - (((s * Lambda[0]) * Lambda[r].level()) / Lambda[0].level()))
for b in self.module_generators:
if (b.weight() == weight):
return b
assert False
_method
def _build_module_generators(self):
_method(optional=True)
def from_kirillov_reshetikhin_crystal(self, krc):
def _element_constructor_(self, *lst, **options):
if isinstance(lst[0], KirillovReshetikhinGenericCrystalElement):
if ((lst[0].cartan_type() != self.cartan_type()) or (lst[0].parent().r() != self._r) or (lst[0].parent().s() != self._s)):
raise ValueError('the Kirillov-Reshetikhin crystal must have the same Cartan type and (r,s)')
return self.from_kirillov_reshetikhin_crystal(lst[0])
return self.element_class(self, list(lst), **options)
def r(self):
return self._r
def s(self):
return self._s
_method
def kirillov_reshetikhin_crystal(self):
return KashiwaraNakashimaTableaux(self._cartan_type, self._r, self._s)
def classical_decomposition(self):
return self.kirillov_reshetikhin_crystal().classical_decomposition()
def tensor(self, *crystals, **options):
ct = self._cartan_type
from sage.combinat.rigged_configurations.tensor_product_kr_tableaux import TensorProductOfKirillovReshetikhinTableaux
if all(((isinstance(B, (KirillovReshetikhinTableaux, TensorProductOfKirillovReshetikhinTableaux)) and (B.cartan_type() == ct)) for B in crystals)):
dims = [[self._r, self._s]]
for B in crystals:
if isinstance(B, TensorProductOfKirillovReshetikhinTableaux):
dims += B.dims
elif isinstance(B, KirillovReshetikhinTableaux):
dims.append([B._r, B._s])
return TensorProductOfKirillovReshetikhinTableaux(ct, dims)
return super().tensor(*crystals, **options)
_attribute
def _tableau_height(self):
return self._r |
class A000213(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
self._b = []
self._precompute()
def _repr_(self):
return 'Tribonacci numbers: a(n) = a(n-1) + a(n-2) + a(n-3).'
def _precompute(self, how_many=20):
try:
f = self._f
except AttributeError:
self._f = recur_gen3(1, 1, 1, 1, 1, 1)
f = self._f
self._b += [next(f) for i in range(how_many)]
def _eval(self, n):
if (len(self._b) <= n):
self._precompute(((n - len(self._b)) + 1))
return self._b[n]
def list(self, n):
self._eval(n)
return self._b[:n] |
class Latex(LatexCall):
def __init__(self, debug=False, slide=False, density=150, engine=None):
self.__debug = debug
self.__slide = slide
self.__engine = engine
self.__density = density
def _relation_symbols(self):
import operator
return {operator.lt: ' < ', operator.le: ' \\leq ', operator.eq: ' = ', operator.ne: ' \\neq ', operator.ge: ' \\geq ', operator.gt: ' > '}
def _latex_preparse(self, s, locals):
from sage.misc.sage_eval import sage_eval
i0 = (- 1)
while True:
i = s.find('\\sage{')
if ((i == (- 1)) or (i == i0)):
return s
i0 = i
t = s[(i + 6):]
j = t.find('}')
if (j == (- 1)):
return s
var = t[:j]
try:
k = str(latex(sage_eval(var, locals)))
except Exception as msg:
print(msg)
k = ('\\mbox{\\rm [%s undefined]}' % var)
s = ((s[:i] + k) + t[(j + 1):])
def eval(self, x, globals, strip=False, filename=None, debug=None, density=None, engine=None, locals={}):
MACROS = latex_extra_preamble()
if (density is None):
density = self.__density
if (filename is None):
filename = ('sage%s' % random.randint(1, 100))
else:
filename = os.path.splitext(filename)[0]
result = None
with TemporaryDirectory() as base:
(orig_base, filename) = os.path.split(os.path.abspath(filename))
if (len(filename.split()) > 1):
raise ValueError('filename must contain no spaces')
if (debug is None):
debug = self.__debug
x = self._latex_preparse(x, locals)
O = open(os.path.join(base, (filename + '.tex')), 'w')
if self.__slide:
O.write(SLIDE_HEADER)
O.write(MACROS)
O.write('\\begin{document}\n\n')
else:
O.write(LATEX_HEADER)
O.write(MACROS)
O.write('\\begin{document}\n')
O.write(x)
if self.__slide:
O.write('\n\n\\end{document}')
else:
O.write('\n\n\\end{document}\n')
O.close()
if (engine is None):
if (self.__engine is None):
engine = _Latex_prefs._option['engine']
else:
engine = self.__engine
e = _run_latex_(os.path.join(base, (filename + '.tex')), debug=debug, density=density, engine=engine, png=True)
if (e.find('Error') == (- 1)):
shutil.copy(os.path.join(base, (filename + '.png')), os.path.join(orig_base, (filename + '.png')))
result = ''
return result
def blackboard_bold(self, t=None):
if (t is None):
return _Latex_prefs._option['blackboard_bold']
from .latex_macros import sage_configurable_latex_macros
old = _Latex_prefs._option['blackboard_bold']
_Latex_prefs._option['blackboard_bold'] = bool(t)
if (bool(old) != bool(t)):
if old:
old_macro = '\\newcommand{\\Bold}[1]{\\mathbb{#1}}'
else:
old_macro = '\\newcommand{\\Bold}[1]{\\mathbf{#1}}'
if bool(t):
macro = '\\newcommand{\\Bold}[1]{\\mathbb{#1}}'
else:
macro = '\\newcommand{\\Bold}[1]{\\mathbf{#1}}'
sage_configurable_latex_macros.remove(old_macro)
sage_configurable_latex_macros.append(macro)
def matrix_delimiters(self, left=None, right=None):
if ((left is None) and (right is None)):
return _Latex_prefs._option['matrix_delimiters']
else:
if (left is not None):
_Latex_prefs._option['matrix_delimiters'][0] = left
if (right is not None):
_Latex_prefs._option['matrix_delimiters'][1] = right
def vector_delimiters(self, left=None, right=None):
if ((left is None) and (right is None)):
return _Latex_prefs._option['vector_delimiters']
else:
if (left is not None):
_Latex_prefs._option['vector_delimiters'][0] = left
if (right is not None):
_Latex_prefs._option['vector_delimiters'][1] = right
def matrix_column_alignment(self, align=None):
if (align is None):
return _Latex_prefs._option['matrix_column_alignment']
else:
_Latex_prefs._option['matrix_column_alignment'] = align
_method
def has_file(self, file_name) -> bool:
assert isinstance(file_name, str)
try:
retcode = call(('kpsewhich %s' % file_name), shell=True, stdout=PIPE, stderr=PIPE)
return (retcode == 0)
except OSError:
return False
_method
def check_file(self, file_name, more_info=''):
assert isinstance(file_name, str)
if (not self.has_file(file_name)):
print("\nWarning: `{}` is not part of this computer's TeX installation.".format(file_name))
if more_info:
print(more_info)
def extra_macros(self, macros=None):
if (macros is None):
return _Latex_prefs._option['macros']
else:
_Latex_prefs._option['macros'] = macros
def add_macro(self, macro):
current = latex.extra_macros()
if (current.find(macro) == (- 1)):
_Latex_prefs._option['macros'] += macro
def extra_preamble(self, s=None):
if (s is None):
return _Latex_prefs._option['preamble']
else:
_Latex_prefs._option['preamble'] = s
def add_to_preamble(self, s):
current = latex.extra_preamble()
if (current.find(s) == (- 1)):
_Latex_prefs._option['preamble'] += s
def add_package_to_preamble_if_available(self, package_name):
assert isinstance(package_name, str)
if self.has_file((package_name + '.sty')):
self.add_to_preamble(('\\usepackage{%s}\n' % package_name))
def engine(self, e=None):
if (e is None):
return _Latex_prefs._option['engine']
if (e == 'latex'):
_Latex_prefs._option['engine'] = 'latex'
_Latex_prefs._option['engine_name'] = 'LaTeX'
elif (e == 'pdflatex'):
_Latex_prefs._option['engine'] = 'pdflatex'
_Latex_prefs._option['engine_name'] = 'PDFLaTeX'
elif (e == 'xelatex'):
_Latex_prefs._option['engine'] = e
_Latex_prefs._option['engine_name'] = 'XeLaTeX'
elif (e == 'lualatex'):
_Latex_prefs._option['engine'] = e
_Latex_prefs._option['engine_name'] = 'LuaLaTeX'
else:
raise ValueError(('%s is not a supported LaTeX engine. Use latex, pdflatex, xelatex, or lualatex' % e)) |
def _get_qmu_qsqrt(kernel, inducing_variable):
Z = inducing_variable.Z.numpy()
Kzz = kernel(Z, full_cov=True).numpy()
q_sqrt = np.linalg.cholesky((Kzz + (default_jitter() * np.eye(len(Z)))))
q_mu = (q_sqrt np.random.randn(len(Z), 1))
return (q_mu, q_sqrt) |
class LinkData():
def __init__(self, category, start, end, mention, comp, value, name, link_feat, gl_pos=None):
self.gl_pos = gl_pos
self.category = category
self.start = start
self.end = end
self.mention = mention
self.comp = comp
self.value = str(value)
self.name = name
self.link_feat = link_feat
def serialize(self):
ret_list = []
for key in ('category', 'start', 'end', 'mention', 'comp', 'value', 'name', 'link_feat'):
ret_list.append((key, getattr(self, key)))
if (self.gl_pos is not None):
ret_list = ([('gl_pos', self.gl_pos)] + ret_list)
return ret_list
def display(self):
ret_str = ''
if (self.gl_pos is not None):
ret_str += ('#%02d ' % self.gl_pos)
ret_str += ('%s: [%d, %d) (%s) %s %s ' % (self.category, self.start, self.end, self.mention, self.comp, self.value))
if (self.name != ''):
ret_str += ('(%s) ' % self.name)
ret_str += str(self.link_feat)
return ret_str |
.parametrize('y_pred', [np.array(y_pred_list), y_pred_list])
def test_gamma_conformity_score_consistency(y_pred: NDArray) -> None:
gamma_conf_score = GammaConformityScore()
signed_conf_scores = gamma_conf_score.get_signed_conformity_scores(X_toy, y_toy, y_pred)
y_obs = gamma_conf_score.get_estimation_distribution(X_toy, y_pred, signed_conf_scores)
np.testing.assert_allclose(y_obs, y_toy) |
class SetAttentionBlock(nn.Module):
def __init__(self, d_model, num_heads, d_head, d_ff, dropouth=0.0, dropouta=0.0):
super(SetAttentionBlock, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads, d_head, d_ff, dropouth=dropouth, dropouta=dropouta)
def forward(self, feat, lengths):
return self.mha(feat, feat, lengths, lengths) |
def grid2list(grid):
list_in = [[]]
for grid_temp in grid:
list_out = []
for val in grid_temp:
for list_temp in list_in:
list_out.append((list_temp + [val]))
list_in = list_out
return list_in |
def test_dataset():
dataset = soundata.initialize('esc50')
assert isinstance(dataset, core.Dataset)
dataset = soundata.initialize('urbansound8k')
assert isinstance(dataset, core.Dataset)
dataset = soundata.initialize('urbansed')
assert isinstance(dataset, core.Dataset)
print(dataset) |
class Lighting(object):
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if (self.alphastd == 0):
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone().mul(alpha.view(1, 3).expand(3, 3)).mul(self.eigval.view(1, 3).expand(3, 3)).sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img)) |
class loss_count(torch.autograd.Function):
def forward(ctx, outputs, target, network_config, layer_config):
desired_count = network_config['desired_count']
undesired_count = network_config['undesired_count']
shape = outputs.shape
n_steps = shape[4]
out_count = torch.sum(outputs, dim=4)
delta = ((out_count - target) / n_steps)
mask = torch.ones_like(out_count)
mask[(target == undesired_count)] = 0
mask[(delta < 0)] = 0
delta[(mask == 1)] = 0
mask = torch.ones_like(out_count)
mask[(target == desired_count)] = 0
mask[(delta > 0)] = 0
delta[(mask == 1)] = 0
delta = delta.unsqueeze_((- 1)).repeat(1, 1, 1, 1, n_steps)
return delta
def backward(ctx, grad):
return (grad, None, None, None) |
def tqdm_with_logging(iterable=None, desc=None, total=None, leave=True, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, logging_on_close=True, logging_on_update=False):
return TqdmToLogger(iterable=iterable, desc=desc, total=total, leave=leave, ncols=ncols, mininterval=mininterval, maxinterval=maxinterval, miniters=miniters, ascii=ascii, disable=disable, unit=unit, unit_scale=unit_scale, dynamic_ncols=dynamic_ncols, smoothing=smoothing, bar_format=bar_format, initial=initial, position=position, postfix=postfix, logging_on_close=logging_on_close, logging_on_update=logging_on_update) |
def basewise_bits(motif):
epsilon = 0.001
assert (motif.shape[1] == 4)
ent = np.apply_along_axis((lambda x: (- np.sum((x * np.log2(np.clip(x, epsilon, (1 - epsilon))))))), 1, motif)
return ent |
def test_toarrow_ListArray_RegularArray():
content = ak.highlevel.Array(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']).layout
offsets = ak.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
array = ak.contents.ListOffsetArray(offsets, content)
assert (array.to_arrow().to_pylist() == [['one', 'two', 'three'], [], ['four', 'five'], ['six'], ['seven', 'eight', 'nine']])
content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1]))
offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak.contents.ListOffsetArray(offsets, content)
regulararray = ak.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
starts = ak.index.Index64(np.array([0, 1], dtype=np.int64))
stops = ak.index.Index64(np.array([2, 3], dtype=np.int64))
listarray = ak.contents.ListArray(starts, stops, regulararray)
assert isinstance(listarray.to_arrow().storage, pyarrow.LargeListArray)
assert (listarray.to_arrow().to_pylist() == [[[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]]], [[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]])
assert (listarray[1:].to_arrow().to_pylist() == [[[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]])
assert isinstance(regulararray.to_arrow().storage, pyarrow.FixedSizeListArray)
assert (regulararray.to_arrow().to_pylist() == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]])
assert (regulararray[1:].to_arrow().to_pylist() == [[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]) |
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False, min_var_est=_eps):
(mmd2, var_est) = _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased)
ratio = (mmd2 / tf.sqrt(tf.maximum(var_est, min_var_est)))
return (mmd2, ratio) |
def check_wv_tok_in_nlu_tok(wv_tok1, nlu_t1):
g_wvi1_corenlp = []
nlu_t1_low = [tok.lower() for tok in nlu_t1]
for (i_wn, wv_tok11) in enumerate(wv_tok1):
wv_tok11_low = [tok.lower() for tok in wv_tok11]
results = find_sub_list(wv_tok11_low, nlu_t1_low)
(st_idx, ed_idx) = results[0]
g_wvi1_corenlp.append([st_idx, ed_idx])
return g_wvi1_corenlp |
class DSTProcessor(DataProcessor):
def __init__(self, root):
self.D = [[], [], []]
self.D[0] = self.load_data(os.path.join(root, 'train_dials.json'))
self.D[1] = self.load_data(os.path.join(root, 'dev_dials.json'))
self.D[2] = self.load_data(os.path.join(root, 'test_dials.json'))
def load_data(self, path):
multi_label_data = []
labels = self.get_labels()
with open(path) as f:
data = json.load(f)
for dial in data:
dialog_history = ''
for (idx, turn) in enumerate(dial['dialogue']):
label_list = []
turn_domain = turn['domain']
text_a = dialog_history
text_b = ((turn['system_transcript'] + ' ') + turn['transcript'])
flag = False
for label in turn['turn_label']:
if (label[0] not in labels):
flag = True
break
label_list.append(label[0])
dialog_history = ((((dialog_history + ' ') + turn['system_transcript']) + ' ') + turn['transcript'])
dialog_history = dialog_history.strip()
if (not flag):
multi_label_data.append([text_a.strip(), text_b.strip(), label_list])
return multi_label_data
def get_train_examples(self, data_dir):
return self._create_examples(self.D[0], 'train')
def get_test_examples(self, data_dir):
return self._create_examples(self.D[2], 'test')
def get_dev_examples(self, data_dir):
return self._create_examples(self.D[1], 'dev')
def get_labels(self):
return ['attraction-area', 'attraction-name', 'attraction-type', 'hotel-area', 'hotel-book day', 'hotel-book people', 'hotel-book stay', 'hotel-internet', 'hotel-name', 'hotel-parking', 'hotel-pricerange', 'hotel-stars', 'hotel-type', 'restaurant-area', 'restaurant-book day', 'restaurant-book people', 'restaurant-book time', 'restaurant-food', 'restaurant-name', 'restaurant-pricerange', 'taxi-arriveby', 'taxi-departure', 'taxi-destination', 'taxi-leaveat', 'train-arriveby', 'train-book people', 'train-day', 'train-departure', 'train-destination', 'train-leaveat']
def _create_examples(self, data, set_type):
examples = []
for d in data:
text_a = d[0]
text_b = d[1]
label = d[2]
examples.append(InputExample(text_a=text_a, text_b=text_b, label=label))
return examples |
def evaluate_single(postprocessed_sql_str, g_str, db_dir, db_id, kmaps):
p_str = postprocessed_sql_str
db_name = db_id
db = os.path.join(db_dir, db_id, (db_id + '.sqlite'))
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords']
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.0}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0.0, 'rec': 0.0, 'f1': 0.0, 'acc_count': 0, 'rec_count': 0}
eval_err_num = 0
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
p_sql = {'except': None, 'from': {'conds': [], 'table_units': []}, 'groupBy': [], 'having': [], 'intersect': None, 'limit': None, 'orderBy': [], 'select': [False, []], 'union': None, 'where': []}
eval_err_num += 1
print('eval_err_num:{}'.format(eval_err_num))
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if (partial_scores[type_]['pred_total'] > 0):
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if (partial_scores[type_]['label_total'] > 0):
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if (partial_scores[type_]['pred_total'] > 0):
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if (partial_scores[type_]['label_total'] > 0):
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
return (exact_score, partial_scores, hardness) |
class TypeConvertTransformer(BaseEstimator, TransformerMixin):
def __init__(self, columns, dtype):
self.columns = columns
self.dtype = dtype
def fit(self, X, *args):
return self
def transform(self, X):
for col in self.columns:
X[col] = X[col].astype(self.dtype)
return X |
def test_polyder():
cases = [([5], 0, [5]), ([5], 1, [0]), ([3, 2, 1], 0, [3, 2, 1]), ([3, 2, 1], 1, [6, 2]), ([3, 2, 1], 2, [6]), ([3, 2, 1], 3, [0]), ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]), ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]), ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]), ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]])]
for (p, m, expected) in cases:
check_polyder(np.array(p).T, m, np.array(expected).T) |
class ParsedData():
parameters: dict[(str, Any)]
body: Any = NOT_SET
def __hash__(self) -> int:
value = hash(tuple(self.parameters.items()))
if (self.body is not NOT_SET):
if isinstance(self.body, (dict, list)):
value ^= hash(json.dumps(self.body, sort_keys=True))
else:
value ^= hash(self.body)
return value |
class Capture(object):
def __init__(self, capfd):
self.capfd = capfd
self.out = ''
self.err = ''
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
(self.out, self.err) = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if (a == b):
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return (item in self.out)
def unordered(self):
return Unordered(self.out)
def stderr(self):
return Output(self.err) |
def parse_module(module_name: str, query_type4py: bool=False) -> _ModuleParseResult:
module = importlib.import_module(module_name)
type4py_data: (Type4pyData | None) = None
syntax_tree: (astroid.Module | None) = None
linenos: int = (- 1)
try:
source_file = inspect.getsourcefile(module)
source_code = inspect.getsource(module)
syntax_tree = astroid.parse(code=source_code, module_name=module_name, path=(source_file if (source_file is not None) else ''))
linenos = len(source_code.splitlines())
if query_type4py:
type4py_data = query_type4py_api(module_name, source_code)
except (TypeError, OSError) as error:
LOGGER.debug(f'Could not retrieve source code for module {module_name} ({error}). Cannot derive syntax tree to allow Pynguin using more precise analysis.')
return _ModuleParseResult(linenos=linenos, module_name=module_name, module=module, syntax_tree=syntax_tree, type4py_data=type4py_data) |
def dla60x_c(pretrained=None, **kwargs):
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 64, 64, 128, 256], block=BottleneckX, **kwargs)
if (pretrained is not None):
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model |
_args('v', 'f', 'i')
def dropout(g, input, p, train):
sym_help.assert_training_mode(train, 'dropout')
if (not sym_help._training_mode):
return input
warnings.warn('Dropout is a training op and should not be exported in inference mode. Make sure to call eval() on the model, and to export it with param training=False.')
(r, _) = g.op('Dropout', input, ratio_f=p, outputs=2)
return r |
def assert_model_downloaded(checkpoint_path, url, use_wget=False):
if Path(checkpoint_path).exists():
log.debug(f'[+] Model already present at {checkpoint_path}!')
return
log.info(f'[-] Model not found at {checkpoint_path}! Will download it')
checkpoint_path = str(checkpoint_path)
if (not use_wget):
gdown.download(url=url, output=checkpoint_path, quiet=False, fuzzy=True)
else:
wget.download(url=url, out=checkpoint_path) |
def _check_timit_folders(uppercase, data_folder):
if uppercase:
test_str = '/TEST/DR1'
train_str = '/TRAIN/DR1'
else:
test_str = '/test/dr1'
train_str = '/train/dr1'
if (not os.path.exists((data_folder + test_str))):
err_msg = ('the folder %s does not exist (it is expected in the TIMIT dataset)' % (data_folder + test_str))
raise FileNotFoundError(err_msg)
if (not os.path.exists((data_folder + train_str))):
err_msg = ('the folder %s does not exist (it is expected in the TIMIT dataset)' % (data_folder + train_str))
raise FileNotFoundError(err_msg) |
def test_folder():
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
dataset = CaptionDataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
ds[0] |
def test_comment_encoding_when_reindent():
sql = u'select foo -- Comment containing Umlauts\nfrom bar'
formatted = sqlparse.format(sql, reindent=True)
assert (formatted == sql) |
class TVQADataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
self._load_metadata()
if (split == 'train'):
names = ['tvqa_train']
elif (split == 'val'):
names = ['tvqa_val']
elif (split == 'test'):
names = ['tvqa_test']
super().__init__(*args, **kwargs, names=names, text_column_name='caption')
self.only_use_relevant_dets = True
if self.only_use_relevant_dets:
self.relevant_dets = []
self.relevant_dets_classes = []
self.fps = 3
def _load_metadata(self):
metadata_dir = './meta_data/tvqa'
split_files = {'train': 'tvqa_train.jsonl', 'val': 'tvqa_val.jsonl', 'test': 'tvqa_test_public.jsonl'}
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
def _get_image_path(self, sample):
dir_name = sample['vid_name'].split('_')[0]
if (dir_name not in ['bbt', 'castle', 'friends', 'grey', 'house', 'met']):
dir_name = 'bbt'
rel_fp = os.path.join('frames/raw_frames/frames_hq/', (dir_name + '_frames'), sample['vid_name'])
return (os.path.join(self.data_dir, rel_fp), rel_fp)
def _get_caption(self, sample):
return sample[0]
def _get_video_len(self, dir):
return len(os.listdir(dir))
def get_raw_video(self, sample):
(abs_fp, rel_fp) = self._get_image_path(sample)
[beg_time, end_time] = sample['ts'].split('-')
clip_len = int(((float(end_time) - float(beg_time)) * self.fps))
clip_len = max(clip_len, (2 * self.num_frames))
rel_frame_index = sample_frames(self.num_frames, clip_len)
begin_frame_index = max(1, int((float(beg_time) * self.fps)))
video_len = self._get_video_len(abs_fp)
frames = []
for index in rel_frame_index:
abs_index = (begin_frame_index + index)
abs_index = min(video_len, abs_index)
image_rel_path = f'{abs_index:05}'
img = cv2.imread(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path)))
if (img is None):
print(sample['vid_name'])
print(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path)))
frame = torch.from_numpy(img).byte()
frame = frame.permute(2, 0, 1)
frames.append(frame)
frames = torch.stack(frames).permute(1, 0, 2, 3)
return frames
def get_text(self, sample):
question = self.get_question(sample)
qa_texts = []
options = ' '.join((sample['a{}'.format(i)] for i in range(5)))
for i in range(5):
raw_text = ((((question + 'Options: ') + options) + 'Answer: ') + sample['a{}'.format(i)])
qa_encoding = self.tokenizer(raw_text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
qa_texts.append((raw_text, qa_encoding))
return qa_texts
def get_answer_label(self, sample):
answer = int(sample['answer_idx'])
return answer
def get_question(self, sample):
return sample['q']
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
result = None
while (result is None):
sample = self.metadata.iloc[index]
try:
self.relevant_dets = []
self.relevant_dets_classes = []
answer = self.get_answer_label(sample)
ret = {'img_index': index, 'cap_index': index, 'raw_index': index, 'answer': answer}
qa_texts = self.get_text(sample)
ret['text'] = qa_texts[0]
for i in range((self.draw_options_text - 1)):
ret.update({f'options_text_{i}': qa_texts[(i + 1)]})
video_tensor = self.get_video(sample)
ret['image'] = video_tensor
result = True
except Exception as e:
print(f'Error while read file idx {sample.name} in {self.names[0]} -> {e}')
print('time stamp is: {}'.format(sample['ts']))
index = random.randint(0, (len(self.metadata) - 1))
return ret |
_args('v', 'i')
def _dim_arange(g, like, dim):
like_shape = g.op('Shape', like)
stop = g.op('Gather', like_shape, g.op('Constant', value_t=torch.tensor(dim)), axis_i=0)
if (sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK):
return g.op('_caffe2::Range', stop)
else:
return arange(g, stop, 4, None, None, None) |
def dir_mat(path):
import scipy.io as sio
path = get_absolute_path(path)
return sio.whosmat(path) |
def filter_newstyle_options(func, **options):
allowed = get_options_from_function(func).keys()
filtered = {}
for key in options.keys():
for prefix in ['', 'use_', 'opt_', 'opt_allow_']:
if ((prefix + key) in allowed):
filtered[(prefix + key)] = options[key]
return filtered |
class Learner(BaseLearner):
def __init__(self, data, model, evaluator, batch_size=1, verbose=False):
super(Learner, self).__init__(data, model, evaluator, batch_size, verbose)
if (type(self.model).__name__ == 'BasicEncoderDecoder'):
setattr(self, '_run_batch', self._run_batch_basic)
elif (type(self.model).__name__ == 'GraphEncoderDecoder'):
setattr(self, '_run_batch', self._run_batch_graph)
def _get_feed_dict(self, batch, encoder_init_state=None, graph_data=None, graphs=None, copy=False, init_checklists=None, encoder_nodes=None, decoder_nodes=None, matched_items=None):
if copy:
targets = graphs.copy_targets(batch['targets'], self.vocab.size)
matched_items = graphs.copy_targets(np.reshape(matched_items, [(- 1), 1]), self.vocab.size)
matched_items = np.reshape(matched_items, [(- 1)])
else:
targets = batch['targets']
encoder_args = {'inputs': batch['encoder_inputs'], 'last_inds': batch['encoder_inputs_last_inds'], 'init_state': encoder_init_state}
decoder_args = {'inputs': batch['decoder_inputs'], 'last_inds': batch['decoder_inputs_last_inds'], 'targets': targets}
kwargs = {'encoder': encoder_args, 'decoder': decoder_args}
if (graph_data is not None):
encoder_args['update_entities'] = graph_data['encoder_entities']
decoder_args['update_entities'] = graph_data['decoder_entities']
encoder_args['utterances'] = graph_data['utterances']
kwargs['graph_embedder'] = graph_data
decoder_args['init_checklists'] = init_checklists
encoder_args['entities'] = encoder_nodes
decoder_args['entities'] = decoder_nodes
decoder_args['cheat_selection'] = decoder_nodes
decoder_args['encoder_entities'] = encoder_nodes
feed_dict = self.model.get_feed_dict(**kwargs)
return feed_dict
def _run_batch_graph(self, dialogue_batch, sess, summary_map, test=False):
encoder_init_state = None
utterances = None
graphs = dialogue_batch['graph']
matched_items = dialogue_batch['matched_items']
for (i, batch) in enumerate(dialogue_batch['batch_seq']):
graph_data = graphs.get_batch_data(batch['encoder_tokens'], batch['decoder_tokens'], batch['encoder_entities'], batch['decoder_entities'], utterances, self.vocab)
init_checklists = graphs.get_zero_checklists(1)
feed_dict = self._get_feed_dict(batch, encoder_init_state, graph_data, graphs, self.data.copy, init_checklists, graph_data['encoder_nodes'], graph_data['decoder_nodes'], matched_items)
if test:
(logits, final_state, utterances, loss, seq_loss, total_loss, sel_loss) = sess.run([self.model.decoder.output_dict['logits'], self.model.final_state, self.model.decoder.output_dict['utterances'], self.model.loss, self.model.seq_loss, self.model.total_loss, self.model.select_loss], feed_dict=feed_dict)
else:
(_, logits, final_state, utterances, loss, seq_loss, sel_loss, gn) = sess.run([self.train_op, self.model.decoder.output_dict['logits'], self.model.final_state, self.model.decoder.output_dict['utterances'], self.model.loss, self.model.seq_loss, self.model.select_loss, self.grad_norm], feed_dict=feed_dict)
encoder_init_state = final_state
if self.verbose:
preds = np.argmax(logits, axis=2)
if self.data.copy:
preds = graphs.copy_preds(preds, self.data.mappings['vocab'].size)
self._print_batch(batch, preds, seq_loss)
if test:
logstats.update_summary_map(summary_map, {'total_loss': total_loss[0], 'num_tokens': total_loss[1]})
else:
logstats.update_summary_map(summary_map, {'loss': loss})
logstats.update_summary_map(summary_map, {'sel_loss': sel_loss})
logstats.update_summary_map(summary_map, {'grad_norm': gn})
def _run_batch_basic(self, dialogue_batch, sess, summary_map, test=False):
encoder_init_state = None
matched_items = dialogue_batch['matched_items']
for batch in dialogue_batch['batch_seq']:
feed_dict = self._get_feed_dict(batch, encoder_init_state, matched_items=matched_items)
if test:
(logits, final_state, loss, seq_loss, total_loss) = sess.run([self.model.decoder.output_dict['logits'], self.model.final_state, self.model.loss, self.model.seq_loss, self.model.total_loss], feed_dict=feed_dict)
else:
(_, logits, final_state, loss, seq_loss, gn) = sess.run([self.train_op, self.model.decoder.output_dict['logits'], self.model.final_state, self.model.loss, self.model.seq_loss, self.grad_norm], feed_dict=feed_dict)
encoder_init_state = final_state
if self.verbose:
preds = np.argmax(logits, axis=2)
self._print_batch(batch, preds, seq_loss)
if test:
logstats.update_summary_map(summary_map, {'total_loss': total_loss[0], 'num_tokens': total_loss[1]})
else:
logstats.update_summary_map(summary_map, {'loss': loss})
logstats.update_summary_map(summary_map, {'grad_norm': gn}) |
def plot_benchmark(output_path_root: str='tmp-/') -> None:
csv_files = glob((os.path.join(output_path_root, '*/') + 'stats.csv'), recursive=True)
pkl_files = []
for csv_file in csv_files:
folder = os.path.dirname(csv_file)
pkl_file = os.path.join(folder, 'results.pkl')
if ((not os.path.exists(csv_file)) and (not os.path.exists(pkl_file))):
pass
elif (not os.path.exists(pkl_file)):
pkl_file = write_pkl_from_csv(csv_file)
pkl_files.append(pkl_file)
plot_benchmark_whole_pkl(pkl_files, output_path_root) |
class MeshInstance():
def __init__(self):
self.mesh_ptr = _ti_core.create_mesh()
self.relation_set = set()
self.verts = MeshElementField(self, MeshElementType.Vertex, {}, {}, {})
self.edges = MeshElementField(self, MeshElementType.Edge, {}, {}, {})
self.faces = MeshElementField(self, MeshElementType.Face, {}, {}, {})
self.cells = MeshElementField(self, MeshElementType.Cell, {}, {}, {})
def get_position_as_numpy(self):
if hasattr(self, '_vert_position'):
return self._vert_position
raise TaichiSyntaxError('Position info is not in the file.')
def set_owned_offset(self, element_type: MeshElementType, owned_offset: ScalarField):
_ti_core.set_owned_offset(self.mesh_ptr, element_type, owned_offset.vars[0].ptr.snode())
def set_total_offset(self, element_type: MeshElementType, total_offset: ScalarField):
_ti_core.set_total_offset(self.mesh_ptr, element_type, total_offset.vars[0].ptr.snode())
def set_index_mapping(self, element_type: MeshElementType, conv_type: ConvType, mapping: ScalarField):
_ti_core.set_index_mapping(self.mesh_ptr, element_type, conv_type, mapping.vars[0].ptr.snode())
def set_num_patches(self, num_patches: int):
_ti_core.set_num_patches(self.mesh_ptr, num_patches)
def set_patch_max_element_num(self, element_type: MeshElementType, max_element_num: int):
_ti_core.set_patch_max_element_num(self.mesh_ptr, element_type, max_element_num)
def set_relation_fixed(self, rel_type: MeshRelationType, value: ScalarField):
self.relation_set.add(rel_type)
_ti_core.set_relation_fixed(self.mesh_ptr, rel_type, value.vars[0].ptr.snode())
def set_relation_dynamic(self, rel_type: MeshRelationType, value: ScalarField, patch_offset: ScalarField, offset: ScalarField):
self.relation_set.add(rel_type)
_ti_core.set_relation_dynamic(self.mesh_ptr, rel_type, value.vars[0].ptr.snode(), patch_offset.vars[0].ptr.snode(), offset.vars[0].ptr.snode())
def add_mesh_attribute(self, element_type, snode, reorder_type):
_ti_core.add_mesh_attribute(self.mesh_ptr, element_type, snode, reorder_type)
def get_relation_size(self, from_index, to_element_type):
return _ti_core.get_relation_size(self.mesh_ptr, from_index.ptr, to_element_type, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()))
def get_relation_access(self, from_index, to_element_type, neighbor_idx_ptr):
return _ti_core.get_relation_access(self.mesh_ptr, from_index.ptr, to_element_type, neighbor_idx_ptr, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info())) |
def check_tree(tree):
def _check_node(node):
if np.isscalar(node):
return True
elif (isinstance(node, tuple) and (len(node) == 2)):
return (_check_node(node[0]) & _check_node(node[1]))
else:
raise Exception('Not a tree!')
return _check_node(tree) |
class CustomTrain(CustomBase):
def __init__(self, size, training_images_list_file):
super().__init__()
with open(training_images_list_file, 'r') as f:
paths = f.read().splitlines()
self.data = ImagePaths(paths=paths, size=size, random_crop=False) |
def randn_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, mu=0, sigma=1, shape=[], seed=(- 1)):
return ([None] * (len(grad_inputs) + len(inputs))) |
class LazySet(set):
_props = ('__str__', '__repr__', '__unicode__', '__hash__', '__sizeof__', '__cmp__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', '__contains__', '__len__', '__nonzero__', '__getitem__', '__setitem__', '__delitem__', '__iter__', '__sub__', '__and__', '__xor__', '__or__', '__rsub__', '__rand__', '__rxor__', '__ror__', '__isub__', '__iand__', '__ixor__', '__ior__', 'add', 'clear', 'copy', 'difference', 'difference_update', 'discard', 'intersection', 'intersection_update', 'isdisjoint', 'issubset', 'issuperset', 'pop', 'remove', 'symmetric_difference', 'symmetric_difference_update', 'union', 'update')
def __new__(cls, fill_iter=None):
if (fill_iter is None):
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if (len(fill_iter) > 0):
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set |
def dice_coef(y_tru, y_prd):
y_tru = tf.reshape(y_tru, [2, (- 1)])
y_prd = tf.reshape(y_prd, [2, (- 1)])
y_prd_pr = tf.sigmoid(y_prd)
intersection = tf.reduce_sum((y_prd_pr * y_tru), 0)
union = (tf.reduce_sum(y_prd_pr, 0) + tf.reduce_sum(y_tru, 0))
dice = (((2.0 * intersection) + _smooth) / (union + _smooth))
return tf.reduce_mean(dice) |
class SentenceREDataset(data.Dataset):
def __init__(self, path, rel2id, tokenizer, kwargs, sort=False, sort_reverse=False):
super().__init__()
self.path = path
self.tokenizer = tokenizer
self.rel2id = rel2id
self.kwargs = kwargs
self.sort = sort
self.data = []
self.NA_id = (- 1)
for r in rel2id:
if (r in NA):
self.NA_id = rel2id[r]
self.load(sort, sort_reverse)
def load(self, sort, sort_reverse):
f = open(self.path)
sentense_idx = [{} for _ in range(1000)]
for (i, line) in enumerate(f.readlines()):
line = line.rstrip()
if (len(line) > 0):
_d = json.loads(line)
if ('token' not in _d):
_d['token'] = _d['text'].split(' ')
if (len(_d['token']) > 512):
continue
if (' '.join(_d['token']) in sentense_idx[len(_d['token'])]):
idx = sentense_idx[len(_d['token'])][' '.join(_d['token'])]
entity_ids = [(- 1), (- 1)]
for (i, k) in enumerate(['h', 't']):
try:
entity_ids[i] = self.data[idx]['entity_list'].index(_d[k])
except ValueError:
self.data[idx]['entity_list'].append(_d[k])
entity_ids[i] = (len(self.data[idx]['entity_list']) - 1)
assert (min(entity_ids) > (- 1))
self.data[idx]['entity_pair_list'].append([entity_ids[0], entity_ids[1], self.rel2id[_d['relation']]])
else:
_d['entity_list'] = [_d['h'], _d['t']]
_d['entity_pair_list'] = [[0, 1, self.rel2id[_d['relation']]]]
del _d['h']
del _d['t']
(indexed_tokens, att_mask, new_index) = self.tokenizer(_d)
_d['indexed_tokens'] = indexed_tokens
_d['att_mask'] = att_mask
_d['new_index'] = new_index
_d['seq_len'] = indexed_tokens.shape[(- 1)]
if (indexed_tokens.shape[(- 1)] <= 512):
self.data.append(_d)
sentense_idx[len(_d['token'])][' '.join(_d['token'])] = (len(self.data) - 1)
if sort:
self.data.sort(key=(lambda x: x['seq_len']), reverse=sort_reverse)
else:
random.shuffle(self.data)
f.close()
def set_max_words(self, w_max=100):
self.data = [d for d in self.data if (len(d['token']) <= w_max)]
def remove_na(self):
new_list = []
for d in self.data:
d['entity_pair_list'] = [tuple(ep) for ep in d['entity_pair_list'] if (ep[2] != self.NA_id)]
if (len(d['entity_pair_list']) > 0):
new_list.append(d)
self.data = new_list
def remove_repeat(self):
for d in self.data:
epl = d['entity_pair_list']
epl = list(set([tuple(ep) for ep in epl]))
epl = [list(ep) for ep in epl]
d['entity_pair_list'] = epl
def char_idx_to_word_idx(self):
for d in self.data:
idx = []
word_i = 0
for (i, c) in enumerate(d['text']):
idx.append(word_i)
if (c == ' '):
word_i += 1
idx.append(word_i)
for e in d['entity_list']:
e['pos'][0] = idx[e['pos'][0]]
e['pos'][1] = (idx[e['pos'][1]] + 1)
def split(self):
new_data = []
for d in self.data:
if (len(d['entity_pair_list']) == 1):
new_data.append(d)
else:
for i in range(len(d['entity_pair_list'])):
d1 = d.copy()
d1['entity_pair_list'] = [d['entity_pair_list'][i]]
new_data.append(d1)
self.data = new_data
random.shuffle(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
seq_len = item['indexed_tokens'].shape[(- 1)]
predicate_one_hot_labels = torch.zeros((1, len(self.rel2id), seq_len, seq_len))
for epl in item['entity_pair_list']:
predicate_one_hot_labels = self.merge_ont_hot_labels(predicate_one_hot_labels, item['entity_list'][epl[0]], item['entity_list'][epl[1]], epl[2], item['new_index'], seq_len)
if (predicate_one_hot_labels.max().item() > 1):
predicate_one_hot_labels = (predicate_one_hot_labels > 0).float()
em = SentenceREDataset.get_entity_mask(item['entity_list'][epl[0]], item['entity_list'][epl[1]], item['new_index'], seq_len)
token_labels = (em.sum(dim=0) + em.sum(dim=1))
token_labels = (token_labels > 0).long()
return (item['indexed_tokens'], item['att_mask'], predicate_one_hot_labels, token_labels)
def merge_ont_hot_labels(self, predicate_one_hot_labels, pos_head, pos_tail, rel_id, new_index, seq_len):
em = SentenceREDataset.get_entity_mask(pos_head, pos_tail, new_index, seq_len)
predicate_one_hot_labels[0][rel_id] += em
return predicate_one_hot_labels
def get_entity_mask(h, t, new_index, seq_len):
pos_head = h['pos']
pos_tail = t['pos']
pos_head = [new_index.index(i) for i in pos_head]
pos_tail = [new_index.index(i) for i in pos_tail]
entity_1 = torch.zeros(seq_len)
entity_1[pos_head[0]:pos_head[1]] = 1
entity_2 = torch.zeros(seq_len)
entity_2[pos_tail[0]:pos_tail[1]] = 1
res = entity_1.unsqueeze(1).repeat_interleave(seq_len, dim=1)
res += entity_2.unsqueeze(1).repeat_interleave(seq_len, dim=1).t()
res = (res == 2).float()
return res
def collate_fn(data):
data = list(zip(*data))
subject_labels = data[(- 1)]
batch_subject_labels = pad_sequence(subject_labels, batch_first=True, padding_value=0)
labels = list(data[(- 2)])
seq_len = batch_subject_labels.shape[(- 1)]
for i in range(len(labels)):
concat_shape = list(labels[i].shape)
concat_shape[(- 1)] = (seq_len - concat_shape[(- 1)])
labels[i] = torch.cat([labels[i], torch.zeros(concat_shape)], dim=(- 1))
concat_shape = list(labels[i].shape)
concat_shape[(- 2)] = (seq_len - concat_shape[(- 2)])
labels[i] = torch.cat([labels[i], torch.zeros(concat_shape)], dim=(- 2))
batch_labels = torch.cat(labels, 0)
seqs = data[0:2]
batch_seqs = []
for seq in seqs:
seq = list(seq)
for i in range(len(seq)):
seq[i] = torch.cat([seq[i], torch.zeros((1, (seq_len - seq[i].shape[(- 1)])), dtype=seq[i].dtype)], dim=(- 1))
batch_seqs.append(torch.cat(seq, dim=0))
return (batch_seqs + [batch_labels, batch_subject_labels])
def information(self):
normal_count = 0
multi_label_count = 0
over_lapping_count = 0
triples_num_count = ([0] * 4)
triples_count = 0
NA_count = 0
for d in self.data:
epl = d['entity_pair_list'].copy()
NA_count += len(epl)
epl = [ep for ep in epl if (ep[2] != self.NA_id)]
NA_count -= len(epl)
triples_count += len(epl)
normal_count += (1 if (is_normal(epl) and (len(epl) > 0)) else 0)
multi_label_count += (1 if is_multi_label(epl) else 0)
over_lapping_count += (1 if is_over_lapping(epl) else 0)
count = (len(epl) if (len(epl) < 3) else 3)
triples_num_count[count] += 1
print(('data: %s\nnormal_count: %d\nmulti_label_count: %d\nover_lapping_count: %d' % (self.path, normal_count, multi_label_count, over_lapping_count)))
print('triples_count :')
for (i, tc) in enumerate(triples_num_count):
print(('%d : %d' % (i, tc)))
print(('NA_count : %d' % NA_count))
print(('data len %d ' % len(self.data)))
print(('triples count %d ' % triples_count)) |
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None):
def docstring_decorator(fn):
model_class = fn.__qualname__.split('.')[0]
is_tf_class = (model_class[:2] == 'TF')
if ('SequenceClassification' in model_class):
code_sample = (TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE)
elif ('QuestionAnswering' in model_class):
code_sample = (TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE)
elif ('TokenClassification' in model_class):
code_sample = (TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE)
elif ('MultipleChoice' in model_class):
code_sample = (TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE)
elif ('MaskedLM' in model_class):
code_sample = (TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE)
elif ('LMHead' in model_class):
code_sample = (TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE)
elif ('Model' in model_class):
code_sample = (TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE)
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
output_doc = (_prepare_output_docstrings(output_type, config_class) if (output_type is not None) else '')
built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
fn.__doc__ = ((((fn.__doc__ or '') + ''.join(docstr)) + output_doc) + built_doc)
return fn
return docstring_decorator |
.operations('invalid')
def test_invalid_operation_suggestion(cli, cli_args, snapshot_cli):
assert (cli.run(*cli_args, '--validate-schema=true') == snapshot_cli) |
def get_a_expr_field_value(node: A_Expr):
if isinstance(node.lexpr, ColumnRef):
column = node.lexpr
value = node.rexpr
elif (isinstance(node.rexpr, ColumnRef) or isinstance(node.rexpr, ColumnRef)):
column = node.rexpr
value = node.lexpr
assert isinstance(column, ColumnRef)
assert isinstance(value, A_Const)
column_name = column.fields[0].sval
value_res = value.val
return (column_name, value_res) |
def train(small_dataset, n_items, constructor, actor_reg_loss_scaler=0.0, n_epochs_pred_only=0, n_epochs_ac_only=0, n_epochs_pred_and_ac=0, break_early=False, lr_actor=0.001, lr_critic=0.0001, lr_ac=2e-06, batch_size=100, min_kl=0.0, max_kl=0.2, batches_to_anneal_over=200000, verbose=False):
print('boom, top of train')
np.random.seed(98765)
tf.set_random_seed(98765)
n_epochs = ((n_epochs_pred_only + n_epochs_ac_only) + n_epochs_pred_and_ac)
tf.reset_default_graph()
tf_dataset = csr_to_tfdataset(small_dataset, batch_size)
print(tf_dataset.output_types, tf_dataset.output_shapes)
data_iterator = tf.data.Iterator.from_structure(tf_dataset.output_types, tf_dataset.output_shapes)
(batch_of_users, good_indices, bad_indices) = data_iterator.get_next()
training_init_op = data_iterator.make_initializer(tf_dataset)
model = constructor(batch_of_users, good_indices, bad_indices, input_dim=n_items, batch_size=batch_size, lr_actor=lr_actor, lr_critic=lr_critic, lr_ac=lr_ac, actor_reg_loss_scaler=actor_reg_loss_scaler)
print(model)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
best_score = (- np.inf)
best_recall = (- np.inf)
best_epoch = 0
batches_seen = 0
for epoch in range(n_epochs):
training_phase = None
print('Starting epoch {}'.format(epoch))
try:
sess.run(training_init_op)
mean_critic_errors = []
mean_true_ndcg = []
bnum = 0
while True:
batches_seen += 1
bnum += 1
_print(bnum, verbose)
if break_early:
if (bnum >= 25):
print('breaking early')
break
kl_scaler = calc_kl_scaler_by_batch(batches_seen, min_kl, max_kl, batches_to_anneal_over)
feed_dict = {model.keep_prob_ph: 0.5, model.kl_loss_scaler: kl_scaler, model.stddev_effect_on_latent_dim_scaler: 1.0, model.train_batch_norm: False, model.epoch: epoch}
to_run = {}
to_run['true_ndcg'] = model.true_ndcg
to_run['mean_critic_error'] = model.mean_critic_error
to_run['critic_regularization_loss'] = model.critic_regularization_loss
to_run['ac_input'] = model.ac_input
to_run['critic_output'] = model.critic_output
feed_dict[model.train_batch_norm] = True
t = time.time()
if (epoch < n_epochs_pred_only):
training_phase = 'ACTOR'
feed_dict[model.train_batch_norm] = True
to_run['_'] = model.actor_train_op
sess_return = sess.run(to_run, feed_dict=feed_dict)
_print('Time taken for n_epochs_pred_only batch: {}'.format((time.time() - t)), verbose)
feed_dict[model.train_batch_norm] = False
elif (n_epochs_pred_only <= epoch < (n_epochs_pred_only + n_epochs_ac_only)):
training_phase = 'CRITIC'
to_run['_'] = model.critic_train_op
sess_return = sess.run(to_run, feed_dict=feed_dict)
_print('Time taken for n_epochs_ac_only batch: {}'.format((time.time() - t)), verbose)
else:
training_phase = 'AC'
to_run['_'] = model.ac_train_op
to_run['__'] = model.critic_train_op
sess_return = sess.run(to_run, feed_dict=feed_dict)
_print('Time taken for n_epochs_pred_ac batch: {}'.format((time.time() - t)), verbose)
mean_critic_errors.append(sess_return['mean_critic_error'])
mean_true_ndcg.append(sess_return['true_ndcg'])
except tf.errors.OutOfRangeError:
print('{} batches in total'.format(bnum))
actual_mean_critic_error = np.asarray(mean_critic_errors).mean()
print('Mean Critic Error for Training: {}'.format(actual_mean_critic_error))
actual_mean_true_ndcg = np.asarray(mean_true_ndcg).mean()
print('Mean True NDCG, calculated our way... {}'.format(actual_mean_true_ndcg))
pass
try:
sess.run(training_init_op)
bnum = 0
ndcgs = []
recalls = []
while True:
batches_seen += 1
bnum += 1
_print(bnum, verbose)
if break_early:
if (bnum >= 25):
print('breaking early')
break
feed_dict = {model.keep_prob_ph: 1.0, model.stddev_effect_on_latent_dim_scaler: 0.0, model.train_batch_norm: False, model.epoch: epoch}
to_run = {'vad_true_ndcg': model.vad_true_ndcg, 'vad_true_recall': model.vad_true_recall}
result = sess.run(to_run, feed_dict=feed_dict)
ndcgs.append(result['vad_true_ndcg'].mean())
recalls.append(result['vad_true_recall'].mean())
except tf.errors.OutOfRangeError:
mean_ndcg = np.asarray(ndcgs).mean()
print('NDCG MEAN: {}'.format(mean_ndcg))
mean_recall = np.asarray(recalls).mean()
print('recall MEAN: {}'.format(mean_recall))
if (mean_ndcg > best_score):
best_epoch = epoch
best_score = mean_ndcg
if (mean_recall > best_recall):
best_recall = mean_recall
print('All done! Best score achieved: {}'.format(best_score))
print('All done! Best recall achieved: {}'.format(best_recall))
print('Best NDCG score happended at epoch {}'.format(best_epoch)) |
class TrainEngine(object):
def __init__(self):
self.hooks = {name: (lambda state: None) for name in ['on_start', 'on_start_epoch', 'on_end_epoch', 'on_start_episode', 'on_end_episode', 'on_end']}
def train(self, loss_func, train_loader, val_loader, epochs, n_episodes, **kwargs):
state = {'train_loader': train_loader, 'val_loader': val_loader, 'loss_func': loss_func, 'sample': None, 'epoch': 1, 'total_episode': 1, 'epochs': epochs, 'n_episodes': n_episodes, 'best_val_loss': np.inf, 'early_stopping_triggered': False}
self.hooks['on_start'](state)
for epoch in range(state['epochs']):
self.hooks['on_start_epoch'](state)
for _ in tqdm(range(state['n_episodes'])):
(support, query, labels) = train_loader.get_next_episode()
state['sample'] = (support, query, labels)
self.hooks['on_start_episode'](state)
self.hooks['on_end_episode'](state)
state['total_episode'] += 1
self.hooks['on_end_epoch'](state)
state['epoch'] += 1
if state['early_stopping_triggered']:
print('Early stopping triggered!')
break
self.hooks['on_end'](state)
logging.info('Training succeed!') |
_module(name='Pretrained')
class PretrainedInit():
def __init__(self, checkpoint: str, prefix: Optional[str]=None, map_location: Optional[str]=None):
self.checkpoint = checkpoint
self.prefix = prefix
self.map_location = map_location
def __call__(self, module: nn.Module) -> None:
pass
def _get_init_info(self) -> str:
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
return info |
def cleanUrl(url):
title = unquote_plus(url)
title = removeAllapostrophe(title)
return title |
def pause(info='Press any key to continue ...', err=False):
if ((not isatty(sys.stdin)) or (not isatty(sys.stdout))):
return
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err) |
def test_fc_dropseq_dataset(save_path: str):
gene_dataset = scvi.data.frontalcortex_dropseq(save_path=save_path)
unsupervised_training_one_epoch(gene_dataset) |
def save_obj_data(model, filename):
assert (('v' in model) and (model['v'].size != 0))
with open(filename, 'w') as fp:
if (('v' in model) and (model['v'].size != 0)):
for v in model['v']:
fp.write(('v %f %f %f\n' % (v[0], v[1], v[2])))
if (('vn' in model) and (model['vn'].size != 0)):
for vn in model['vn']:
fp.write(('vn %f %f %f\n' % (vn[0], vn[1], vn[2])))
if (('vt' in model) and (model['vt'].size != 0)):
for vt in model['vt']:
fp.write(('vt %f %f\n' % (vt[0], vt[1])))
if (('f' in model) and (model['f'].size != 0)):
if (('fn' in model) and (model['fn'].size != 0) and ('ft' in model) and (model['ft'].size != 0)):
assert (model['f'].size == model['fn'].size)
assert (model['f'].size == model['ft'].size)
for (f_, ft_, fn_) in zip(model['f'], model['ft'], model['fn']):
f = (np.copy(f_) + 1)
ft = (np.copy(ft_) + 1)
fn = (np.copy(fn_) + 1)
fp.write(('f %d/%d/%d %d/%d/%d %d/%d/%d\n' % (f[0], ft[0], fn[0], f[1], ft[1], fn[1], f[2], ft[2], fn[2])))
elif (('fn' in model) and (model['fn'].size != 0)):
assert (model['f'].size == model['fn'].size)
for (f_, fn_) in zip(model['f'], model['fn']):
f = (np.copy(f_) + 1)
fn = (np.copy(fn_) + 1)
fp.write(('f %d//%d %d//%d %d//%d\n' % (f[0], fn[0], f[1], fn[1], f[2], fn[2])))
elif (('ft' in model) and (model['ft'].size != 0)):
assert (model['f'].size == model['ft'].size)
for (f_, ft_) in zip(model['f'], model['ft']):
f = (np.copy(f_) + 1)
ft = (np.copy(ft_) + 1)
fp.write(('f %d/%d %d/%d %d/%d\n' % (f[0], ft[0], f[1], ft[1], f[2], ft[2])))
else:
for f_ in model['f']:
f = (np.copy(f_) + 1)
fp.write(('f %d %d %d\n' % (f[0], f[1], f[2]))) |
class HashMissing(HashError):
order = 2
head = 'Hashes are required in --require-hashes mode, but they are missing from some requirements. Here is a list of those requirements along with the hashes their downloaded archives actually had. Add lines like these to your requirements files to prevent tampering. (If you did not enable --require-hashes manually, note that it turns on automatically when any package has a hash.)'
def __init__(self, gotten_hash):
self.gotten_hash = gotten_hash
def body(self):
from pip._internal.utils.hashes import FAVORITE_HASH
package = None
if self.req:
package = (self.req.original_link if self.req.original_link else getattr(self.req, 'req', None))
return ' {} --hash={}:{}'.format((package or 'unknown package'), FAVORITE_HASH, self.gotten_hash) |
def iter10x10y(num):
for ir in range((num - 1), (- 1), (- 1)):
for ic in range((num - 1), (- 1), (- 1)):
(yield (ir, ic)) |
def log_images_from_w(ws, G, names):
for (name, w) in zip(names, ws):
w = w.to(global_config.device)
log_image_from_w(w, G, name) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_atan_forward_backward(seed, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 1)]
inputs += [(rng.randn(2, 3, 4).astype(np.float32) * 1)]
function_tester(rng, F.atan2, np.arctan2, inputs, ctx=ctx, func_name=func_name, atol_f=0.001, atol_b=0.01) |
def print_header_ps(s):
s += '%% --- Auto-generated PostScript ---\n\n\n'
s += '%% Generated on: \n'
s += (('%%' + time.asctime()) + '\n')
return s |
def validate_config_dict(workflow_config_dict):
try:
config_schema.validate(workflow_config_dict)
except SchemaError as se:
raise se |
def load_pointcloud_ply(filename):
with open(filename, 'rb') as fh:
assert (fh.readline().rstrip() == b'ply')
assert (fh.readline().rstrip() == b'format binary_little_endian 1.0')
nr_elements = int(fh.readline().strip().decode('ascii').split(' ')[(- 1)])
assert (fh.readline().rstrip() == b'property float x')
assert (fh.readline().rstrip() == b'property float y')
assert (fh.readline().rstrip() == b'property float z')
assert (fh.readline().rstrip() == b'property uchar red')
assert (fh.readline().rstrip() == b'property uchar green')
assert (fh.readline().rstrip() == b'property uchar blue')
assert (fh.readline().rstrip() == b'end_header')
data = np.fromfile(fh, dtype={'names': ('x', 'y', 'z', 'r', 'g', 'b'), 'formats': ('f4', 'f4', 'f4', 'u1', 'u1', 'u1')}, count=(6 * nr_elements))
data = np.rec.array(data, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('r', 'u1'), ('g', 'u1'), ('b', 'u1')])
data = data.reshape(1, (- 1))
locs = np.concatenate((data['x'], data['y'], data['z']), axis=0)
cols = np.concatenate((data['r'], data['g'], data['b']), axis=0)
return (locs, cols) |
def test_vaihingen():
test_dataset = ISPRSDataset(pipeline=[], img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/img_dir'), ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/ann_dir'))
assert (len(test_dataset) == 1) |
def profile(n):
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name |
def build_detector(cfg, train_cfg=None, test_cfg=None):
if ((train_cfg is not None) or (test_cfg is not None)):
warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning)
assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field '
assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field '
return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) |
class Flatten(Module):
__constants__ = ['start_dim', 'end_dim']
start_dim: int
end_dim: int
def __init__(self, start_dim: int=1, end_dim: int=(- 1)) -> None:
super(Flatten, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input: Tensor) -> Tensor:
return input.flatten(self.start_dim, self.end_dim)
def extra_repr(self) -> str:
return 'start_dim={}, end_dim={}'.format(self.start_dim, self.end_dim) |
def read_json(fn):
try:
with open(fn, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
raise sb.errors.SmartBugsError(e) |
class VideoRetrievalCollator(object):
def __init__(self, tokenizer, max_length=40):
self.tokenizer = tokenizer
self.max_length = max_length
def collate_batch(self, batch):
v_collate = default_collate
visual_inputs = v_collate([d['vid'] for d in batch])
text_examples = flat_list_of_lists([d['examples'] for d in batch])
n_examples_list = [d['n_examples'] for d in batch]
text_str_list = [d['text_str'] for d in text_examples]
batch_enc = self.tokenizer.batch_encode_plus(text_str_list, max_length=self.max_length, padding='max_length', return_tensors='pt', truncation=True)
text_input_ids = batch_enc.input_ids
text_input_mask = batch_enc.attention_mask
if ('itm_label' in text_examples[0]):
itm_labels = default_collate([d['itm_label'] for d in text_examples])
else:
itm_labels = None
if ('id' in text_examples[0]):
caption_ids = [d['id'] for d in text_examples]
else:
caption_ids = None
collated_batch = dict(visual_inputs=visual_inputs, text_input_ids=text_input_ids, text_input_mask=text_input_mask, caption_ids=caption_ids, labels=itm_labels, n_examples_list=n_examples_list)
if (('vid_id' in batch[0]) and (len(batch) == 1)):
collated_batch['vid_id'] = batch[0]['vid_id']
return collated_batch |
def convert_bn2affine_model(module, process_group=None, channel_last=False, merge=True):
mod = module
if (isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and (not isinstance(module, ops.MixtureBatchNorm2d))):
mod = ops.AffineChannel2d(module.num_features)
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
freeze_params(mod)
if merge:
gamma = module.weight.data.clone().detach().numpy()
beta = module.bias.data.clone().detach().numpy()
mu = module.running_mean.data.clone().detach().numpy()
var = module.running_var.data.clone().detach().numpy()
eps = module.eps
new_gamma = (gamma / np.power((var + eps), 0.5))
new_beta = (beta - ((gamma * mu) / np.power((var + eps), 0.5)))
mod.weight.data = torch.from_numpy(new_gamma)
mod.bias.data = torch.from_numpy(new_beta)
for (name, child) in module.named_children():
mod.add_module(name, convert_bn2affine_model(child, process_group=process_group, channel_last=channel_last, merge=merge))
del module
return mod |
def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction='mean'):
lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float32)
return F.nll_loss(lprobs, target, ignore_index=ignore_index, reduction=reduction) |
def _make_balanced_sampler(labels):
class_counts = np.bincount(labels)
class_weights = (1.0 / class_counts)
weights = class_weights[labels]
return WeightedRandomSampler(weights, len(weights)) |
def exists(S, P):
for x in S:
if P(x):
return (True, x)
return (False, None) |
class Parser(ABC):
def parse_aggregation_answer(self, states: List[Dict], texts: List[str]) -> Union[(Dict, List[Dict])]:
pass
def parse_improve_answer(self, state: Dict, texts: List[str]) -> Dict:
pass
def parse_generate_answer(self, state: Dict, texts: List[str]) -> List[Dict]:
pass
def parse_validation_answer(self, state: Dict, texts: List[str]) -> bool:
pass
def parse_score_answer(self, states: List[Dict], texts: List[str]) -> List[float]:
pass |
class Differential_multigraded(Differential):
def __init__(self, A, im_gens):
Differential.__init__(self, A, im_gens)
diff_deg = []
for x in A.gens():
y = self(x)
if (y != 0):
diff_deg.append((y.degree() - x.degree()))
if (len(set(diff_deg)) > 1):
raise ValueError('the differential does not have a well-defined degree')
self._degree_of_differential = diff_deg[0]
_method
def differential_matrix_multigraded(self, n, total=False):
if (total or (n in ZZ)):
return Differential.differential_matrix(self, total_degree(n))
A = self.domain()
G = AdditiveAbelianGroup(([0] * A._grading_rank))
n = G(vector(n))
dom = A.basis(n)
cod = A.basis((n + self._degree_of_differential))
cokeys = [next(iter(a.lift().dict().keys())) for a in cod]
m = matrix(self.base_ring(), len(dom), len(cod))
for (i, domi) in enumerate(dom):
im = self(domi)
dic = im.lift().dict()
for j in dic.keys():
k = cokeys.index(j)
m[(i, k)] = dic[j]
m.set_immutable()
return m
def coboundaries(self, n, total=False):
if (total or (n in ZZ)):
return Differential.coboundaries(self, total_degree(n))
A = self.domain()
G = AdditiveAbelianGroup(([0] * A._grading_rank))
n = G(vector(n))
F = A.base_ring()
if (total_degree(n) == 0):
return VectorSpace(F, 0)
if (total_degree(n) == 1):
return VectorSpace(F, 0)
M = self.differential_matrix_multigraded((n - self._degree_of_differential))
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.image()
def cocycles(self, n, total=False):
if (total or (n in ZZ)):
return Differential.cocycles(self, total_degree(n))
A = self.domain()
G = AdditiveAbelianGroup(([0] * A._grading_rank))
n = G(vector(n))
F = A.base_ring()
if (total_degree(n) == 0):
return VectorSpace(F, 1)
M = self.differential_matrix_multigraded(n)
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.kernel()
def cohomology_raw(self, n, total=False):
return self.cocycles(n, total).quotient(self.coboundaries(n, total))
def cohomology(self, n, total=False):
H = self.cohomology_raw(n, total)
H_basis_raw = (H.lift(H.basis()[i]) for i in range(H.dimension()))
A = self.domain()
B = A.basis(n, total)
H_basis = (sum(((c * b) for (c, b) in zip(coeffs, B))) for coeffs in H_basis_raw)
H_basis_brackets = [CohomologyClass(b, A) for b in H_basis]
return CombinatorialFreeModule(A.base_ring(), H_basis_brackets, sorting_key=sorting_keys, monomial_reverse=True)
homology = cohomology |
def get_large_hourglass_net(num_layers, heads, head_conv):
model = HourglassNet(heads, 2)
return model |
class GumbelSoftmax(nn.Module):
def __init__(self, ste=False, log_softmax_enable: bool=True, eps: float=1e-06):
super(GumbelSoftmax, self).__init__()
self.eps = eps
self.u = Uniform(0, 1)
self.softmax = nn.Softmax(dim=(- 1))
self.log_softmax_enable = log_softmax_enable
self.log_softmax = nn.LogSoftmax(dim=(- 1))
self.ste = ste
def forward(self, pi, temperature, batch_size: int=1, noise_disable: float=False):
g = torch.zeros([batch_size, *[i for i in pi.shape]])
t = 1
if (not noise_disable):
g = (- torch.log(((- torch.log((self.u.sample([batch_size, *[i for i in pi.shape]]) + self.eps))) + self.eps)))
t = temperature
if pi.is_cuda:
g = g.cuda()
search_matrix = pi.reshape(1, (- 1))
if self.log_softmax_enable:
search_matrix = self.log_softmax(search_matrix)
g = g.reshape(batch_size, (- 1))
s = ((search_matrix + g) / t)
p = self.softmax(s).reshape([batch_size, *pi.shape])
if self.ste:
p_flatten = p.reshape([batch_size, (- 1)])
p_onehot = torch.FloatTensor(batch_size, p_flatten.shape[1]).cuda()
p_onehot.zero_()
p_onehot.scatter_(1, p_flatten.argmax(dim=(- 1)).reshape([(- 1), 1]), 1)
p_onehot = p_onehot.reshape([batch_size, *pi.shape])
error = (p_onehot - p).detach()
p = (p + error)
return p |
_toolkit()
class Amazon(FunctionToolkit):
name_for_human = 'Amazon'
description_for_human = 'Toolkit for common online shopping tasks on Amazon.'
name_for_model = 'Amazon'
description_for_model = 'An Amazon toolkit to perform common online shopping tasks like searching for products, viewing product details, managing the shopping cart and wish list, placing orders, and posting reviews. It also allows users to view their saved addresses and payment methods, and search their order history.'
tool_classes = [AmazonSearchProducts, AmazonGetProductDetails, AmazonAddToCart, AmazonViewCart, AmazonRemoveFromCart, AmazonPlaceOrder, AmazonSearchOrderHistory, AmazonViewOrderDetails, AmazonManageWishlist, AmazonViewSavedAddresses, AmazonViewSavedPaymentMethods, AmazonPostReview] |
_model
def swsl_resnet50(pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
model.default_cfg = default_cfgs['swsl_resnet50']
if pretrained:
load_pretrained(model, num_classes=kwargs.get('num_classes', 0), in_chans=kwargs.get('in_chans', 3))
return model |
class BaseFeatureExtractor(nn.Module):
def forward(self, *input):
pass
def __init__(self):
super(BaseFeatureExtractor, self).__init__()
def output_num(self):
pass |
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return ':'.join((text_type(x) for x in key)) |
def load_url(url, model_dir='~/.torch/proxyless_nas', map_location=None):
cached_file = download_url(url, model_dir)
map_location = ('cpu' if ((not torch.cuda.is_available()) and (map_location is None)) else None)
return torch.load(cached_file, map_location=map_location) |
class NumberedListState(Enum):
NO_NUM = 0
CONSECUTIVE = 1
DOWN = 2
UP = 3
UNKNOWN = 4 |
def eval_vehicle_id_(model, valid_loader, query_length, cfg):
metric = Clck_R1_mAP(query_length, max_rank=cfg.test.max_rank, rerank=cfg.test.rerank, remove_junk=cfg.test.remove_junk, feat_norm=cfg.test.feat_norm, output_path=cfg.output_dir, lambda_=cfg.test.lambda_)
model.eval()
with torch.no_grad():
for batch in tqdm(valid_loader):
for (name, item) in batch.items():
if isinstance(item, torch.Tensor):
batch[name] = item.to('cuda')
output = model(**batch)
global_feat = output['global_feat']
local_feat = output['local_feat']
vis_score = output['vis_score']
metric.update((global_feat.detach().cpu(), local_feat.detach().cpu(), vis_score.cpu(), batch['id'].cpu(), batch['cam'].cpu(), batch['image_path']))
mAPs = []
cmcs = []
for i in range(10):
metric.resplit_for_vehicleid()
metric_output = metric.compute()
cmc = metric_output['cmc']
mAP = metric_output['mAP']
mAPs.append(mAP)
cmcs.append(cmc)
mAP = np.mean(mAPs)
cmc = np.mean(cmcs, axis=0)
logger.info(f'mAP: {mAP:.2%}')
for r in [1, 5, 10]:
logger.info(f'CMC curve, Rank-{r:<3}:{cmc[(r - 1)]:.2%}')
return (cmc, mAP) |
def build_loss(opt):
opt = deepcopy(opt)
loss_type = opt.pop('type')
loss = LOSS_REGISTRY.get(loss_type)(**opt)
logger = get_root_logger()
logger.info(f'Loss [{loss.__class__.__name__}] is created.')
return loss |
def read_hdf5(filepath, key='tensor', efficient=False):
assert os.path.exists(filepath), ('file %s not found' % filepath)
if efficient:
h5f = h5py.File(filepath, 'r')
assert (key in [key for key in h5f.keys()]), ('key %s does not exist in %s with keys %s' % (key, filepath, ', '.join(h5f.keys())))
return h5f[key]
else:
with h5py.File(filepath, 'r') as h5f:
assert (key in [key for key in h5f.keys()]), ('key %s does not exist in %s with keys %s' % (key, filepath, ', '.join(h5f.keys())))
return h5f[key][()] |
class TestCaffe2Backend(unittest.TestCase):
('test broken because Lapack was always missing.')
def test_helper(self):
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, (upscale_factor ** 2), (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal(self.conv4.weight)
torch_model = SuperResolutionNet(upscale_factor=3)
fake_input = torch.randn(1, 1, 224, 224, requires_grad=True)
helper = ModelHelper(name='test_model')
start = helper.Sigmoid(['the_input'])
(toutput,) = PyTorchModule(helper, torch_model, (fake_input,), [start])
output = helper.Sigmoid(toutput)
workspace.RunNetOnce(helper.InitProto())
workspace.FeedBlob('the_input', fake_input.data.numpy())
workspace.RunNetOnce(helper.Proto())
c2_out = workspace.FetchBlob(str(output))
torch_out = torch.sigmoid(torch_model(torch.sigmoid(fake_input)))
np.testing.assert_almost_equal(torch_out.data.cpu().numpy(), c2_out, decimal=3) |
def main():
from autopipe.autopipe.api import build_profiled_graph
import torch
from torch.nn import Sequential, Linear
IN_FEATURES = 320
OUT_FEATURES = 8
n_encoder_decoder = 12
l = []
for i in range(n_encoder_decoder):
l.append(Linear(IN_FEATURES, IN_FEATURES))
l.append(Linear(IN_FEATURES, OUT_FEATURES))
for i in range(n_encoder_decoder):
l.append(Linear(OUT_FEATURES, OUT_FEATURES))
model = Sequential(*l)
inputs = torch.randn(IN_FEATURES, IN_FEATURES)
model = model.cuda()
inputs = inputs.cuda()
graph = build_profiled_graph(model, model_args=(inputs,), n_iter=50)
node_weight_function = NodeWeightFunction(bwd_to_fwd_ratio=1, MULT_FACTOR=100000)
edge_weight_function = EdgeWeightFunction(bw_GBps=12, bwd_to_fwd_ratio=0, MULT_FACTOR=100000, penalty=100000)
(graph, stage_to_gpu_map) = partition_mpipe(model, graph=graph, num_gpus=2, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_layers_graph=True) |
def hook_layernorm(m, x, y):
num_ele = y.numel()
flops = (2 * num_ele)
if m.elementwise_affine:
flops += (2 * num_ele)
return int(flops) |
def _wait_n_rounds(collection):
n = 0
for _ in range(RETRIES):
query = {'type': 'INFERENCE'}
n = collection.count_documents(query)
if (n == N_CLIENTS):
return n
_eprint(f'Succeded cleints {n}. Sleeping for {SLEEP}.')
sleep(SLEEP)
_eprint(f'Succeded clients: {n}. Giving up.')
return n |
class QResult():
def __init__(self, name):
self._result = defaultdict(list)
self._name = name
self._recorder_paths = []
self._date2ICs = []
def append(self, key, value):
self._result[key].append(value)
def append_path(self, xpath):
self._recorder_paths.append(xpath)
def append_date2ICs(self, date2IC):
if self._date2ICs:
keys = sorted(list(date2IC.keys()))
pre_keys = sorted(list(self._date2ICs[0].keys()))
assert (len(keys) == len(pre_keys))
for (i, (x, y)) in enumerate(zip(keys, pre_keys)):
assert (x == y), '[{:}] {:} vs {:}'.format(i, x, y)
self._date2ICs.append(date2IC)
def find_all_dates(self):
dates = self._date2ICs[(- 1)].keys()
return sorted(list(dates))
def get_IC_by_date(self, date, scale=1.0):
values = []
for date2IC in self._date2ICs:
values.append((date2IC[date] * scale))
return (float(np.mean(values)), float(np.std(values)))
def name(self):
return self._name
def paths(self):
return self._recorder_paths
def result(self):
return self._result
def keys(self):
return list(self._result.keys())
def __len__(self):
return len(self._result)
def __repr__(self):
return '{name}({xname}, {num} metrics)'.format(name=self.__class__.__name__, xname=self.name, num=len(self.result))
def __getitem__(self, key):
if (key not in self._result):
raise ValueError('Invalid key {:}, please use one of {:}'.format(key, self.keys))
values = self._result[key]
return float(np.mean(values))
def update(self, metrics, filter_keys=None):
for (key, value) in metrics.items():
if ((filter_keys is not None) and (key in filter_keys)):
key = filter_keys[key]
elif (filter_keys is not None):
continue
self.append(key, value)
def full_str(xstr, space):
xformat = (('{:' + str(space)) + 's}')
return xformat.format(str(xstr))
def merge_dict(dict_list):
new_dict = dict()
for xkey in dict_list[0].keys():
values = [x for xdict in dict_list for x in xdict[xkey]]
new_dict[xkey] = values
return new_dict
def info(self, keys: List[Text], separate: Text='& ', space: int=20, verbose: bool=True, version: str='v1'):
avaliable_keys = []
for key in keys:
if (key not in self.result):
print('There are invalid key [{:}].'.format(key))
else:
avaliable_keys.append(key)
head_str = separate.join([self.full_str(x, space) for x in avaliable_keys])
values = []
for key in avaliable_keys:
if ('IR' in key):
current_values = [(x * 100) for x in self._result[key]]
else:
current_values = self._result[key]
mean = np.mean(current_values)
std = np.std(current_values)
if (version == 'v0'):
values.append('{:.2f} $\\pm$ {:.2f}'.format(mean, std))
elif (version == 'v1'):
values.append(((('{:.2f}'.format(mean) + ' \\subs{') + '{:.2f}'.format(std)) + '}'))
else:
raise ValueError('Unknown version')
value_str = separate.join([self.full_str(x, space) for x in values])
if verbose:
print(head_str)
print(value_str)
return (head_str, value_str) |
def test_stats(model, X, Y, cutoff=None, fpr=None):
x_test_tensor = torch.from_numpy(X).float()
y_test_tensor = torch.from_numpy(Y).float()
test_data = TensorDataset(x_test_tensor, y_test_tensor)
test_loader = DataLoader(dataset=test_data, batch_size=args.size, shuffle=True)
all_preds = []
all_y = []
for (x_batch, y_batch) in test_loader:
x_batch = x_batch.cuda()
all_y.extend(y_batch.numpy())
y_batch = y_batch.cuda()
scores = model(x_batch, y_batch)
all_preds.extend(scores.cpu().detach().numpy())
print('CLN performance:')
(th, tpr, fpr, auc) = print_stats(all_y, all_preds, cutoff=cutoff, fpr=fpr)
return (tpr, auc) |
def buildcallback(rout, um):
global cb_map
from . import capi_maps
outmess(('\tConstructing call-back function "cb_%s_in_%s"\n' % (rout['name'], um)))
(args, depargs) = getargs(rout)
capi_maps.depargs = depargs
var = rout['vars']
vrd = capi_maps.cb_routsign2map(rout, um)
rd = dictappend({}, vrd)
cb_map[um].append([rout['name'], rd['name']])
for r in cb_rout_rules:
if ((('_check' in r) and r['_check'](rout)) or ('_check' not in r)):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
savevrd = {}
for (i, a) in enumerate(args):
vrd = capi_maps.cb_sign2map(a, var[a], index=i)
savevrd[a] = vrd
for r in cb_arg_rules:
if ('_depend' in r):
continue
if (('_optional' in r) and isoptional(var[a])):
continue
if ((('_check' in r) and r['_check'](var[a])) or ('_check' not in r)):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if ('_break' in r):
break
for a in args:
vrd = savevrd[a]
for r in cb_arg_rules:
if ('_depend' in r):
continue
if (('_optional' not in r) or (('_optional' in r) and isrequired(var[a]))):
continue
if ((('_check' in r) and r['_check'](var[a])) or ('_check' not in r)):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if ('_break' in r):
break
for a in depargs:
vrd = savevrd[a]
for r in cb_arg_rules:
if ('_depend' not in r):
continue
if ('_optional' in r):
continue
if ((('_check' in r) and r['_check'](var[a])) or ('_check' not in r)):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if ('_break' in r):
break
if (('args' in rd) and ('optargs' in rd)):
if isinstance(rd['optargs'], list):
rd['optargs'] = (rd['optargs'] + ['\n#ifndef F2PY_CB_RETURNCOMPLEX\n,\n#endif\n'])
rd['optargs_nm'] = (rd['optargs_nm'] + ['\n#ifndef F2PY_CB_RETURNCOMPLEX\n,\n#endif\n'])
rd['optargs_td'] = (rd['optargs_td'] + ['\n#ifndef F2PY_CB_RETURNCOMPLEX\n,\n#endif\n'])
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(replace('#docreturn#', {'docreturn': rd['docreturn']}))
optargs = stripcomma(replace('#docsignopt#', {'docsignopt': rd['docsignopt']}))
if (optargs == ''):
rd['docsignature'] = stripcomma(replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignature'] = replace('#docsign#[#docsignopt#]', {'docsign': rd['docsign'], 'docsignopt': optargs})
rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_')
rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ')
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if ((k in rd) and isinstance(rd[k], list)):
rd['docstrsigns'] = (rd['docstrsigns'] + rd[k])
k = ('latex' + k)
if ((k in rd) and isinstance(rd[k], list)):
rd['latexdocstrsigns'] = ((((rd['latexdocstrsigns'] + rd[k][0:1]) + ['\\begin{description}']) + rd[k][1:]) + ['\\end{description}'])
if ('args' not in rd):
rd['args'] = ''
rd['args_td'] = ''
rd['args_nm'] = ''
if (not (rd.get('args') or rd.get('optargs') or rd.get('strarglens'))):
rd['noargs'] = 'void'
ar = applyrules(cb_routine_rules, rd)
cfuncs.callbacks[rd['name']] = ar['body']
if isinstance(ar['need'], str):
ar['need'] = [ar['need']]
if ('need' in rd):
for t in cfuncs.typedefs.keys():
if (t in rd['need']):
ar['need'].append(t)
cfuncs.typedefs_generated[(rd['name'] + '_typedef')] = ar['cbtypedefs']
ar['need'].append((rd['name'] + '_typedef'))
cfuncs.needs[rd['name']] = ar['need']
capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], 'nofoptargs': ar['nofoptargs'], 'docstr': ar['docstr'], 'latexdocstr': ar['latexdocstr'], 'argname': rd['argname']}
outmess(('\t %s\n' % ar['docstrshort']))
return |
def get_nmnist(data_path, network_config):
n_steps = network_config['n_steps']
batch_size = network_config['batch_size']
print('loading NMNIST')
if (not os.path.exists(data_path)):
os.mkdir(data_path)
train_path = (data_path + '/Train')
test_path = (data_path + '/Test')
trainset = NMNIST(train_path, n_steps)
testset = NMNIST(test_path, n_steps)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)
return (trainloader, testloader) |
def sauvola(img, window_size=WS_SAUVOLA):
th = threshold_sauvola(img, window_size)
bin_img = (img > th)
return bin_img |
('detection', 'ets', ETSDetectorParams)
class ETSDetector(AnomalyDetectionAlgo):
def __init__(self, params: ETSDetectorParams):
ets_config = ETSDetectorConfig(max_forecast_steps=params.max_forecast_steps, target_seq_index=params.target_seq_index, error=params.error, trend=params.trend, damped_trend=params.damped_trend, seasonal=params.seasonal, seasonal_periods=params.seasonal_periods, refit=params.refit, **params.kwargs)
self.model = MerlionETSDetector(ets_config)
def fit(self, log_features: pd.DataFrame):
self._is_valid_ts_df(log_features)
index = log_features.index
time_series = pd_to_timeseries(log_features)
train_scores = self.model.train(time_series).to_pd()
train_scores = train_scores.loc[time_series.to_pd().index]
train_scores[constants.LOG_TIMESTAMPS] = train_scores.index
train_scores['trainval'] = True
train_scores.index = index
return train_scores
def predict(self, log_features: pd.DataFrame):
self._is_valid_ts_df(log_features)
index = log_features.index
time_series = pd_to_timeseries(log_features)
test_scores = self.model.get_anomaly_label(time_series).to_pd()
test_scores = test_scores.loc[time_series.to_pd().index]
test_scores[constants.LOG_TIMESTAMPS] = test_scores.index
test_scores['trainval'] = False
test_scores.index = index
return test_scores
def _is_valid_ts_df(log_feature):
columns = log_feature.columns.values
for c in columns:
if (c not in [constants.LOG_TIMESTAMPS, constants.LOG_COUNTS]):
raise ValueError(("log feature dataframe must only contain two columns ['{}': datetime, '{}': int]".format(constants.LOG_TIMESTAMPS, constants.LOG_COUNTS) + 'Current columns: {}'.format(columns)))
if (constants.LOG_TIMESTAMPS not in columns):
raise ValueError('dataframe must contain {} column'.format(constants.LOG_TIMESTAMPS))
if (constants.LOG_COUNTS not in columns):
raise ValueError('dataframe must contain {} column'.format(constants.LOG_COUNTS))
for ts in log_feature[constants.LOG_TIMESTAMPS]:
if (not isinstance(ts, datetime)):
raise ValueError('{} must be datetime'.format(constants.LOG_TIMESTAMPS)) |
def cross_entropy2d(input, target, weight=None, size_average=True):
(n, c, h, w) = input.size()
if (LooseVersion(torch.__version__) < LooseVersion('0.3')):
log_p = F.log_softmax(input)
else:
log_p = F.log_softmax(input, dim=1)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
log_p = log_p[(target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0)]
log_p = log_p.view((- 1), c)
mask = (target >= 0)
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, reduction='sum')
if size_average:
loss /= mask.data.sum()
return loss |
def createInstanceImage(annotation, encoding):
size = (annotation.imgWidth, annotation.imgHeight)
if (encoding == 'ids'):
backgroundId = name2label['unlabeled'].id
elif (encoding == 'trainIds'):
backgroundId = name2label['unlabeled'].trainId
else:
print("Unknown encoding '{}'".format(encoding))
return None
instanceImg = Image.new('I', size, backgroundId)
drawer = ImageDraw.Draw(instanceImg)
nbInstances = {}
for labelTuple in labels:
if labelTuple.hasInstances:
nbInstances[labelTuple.name] = 0
for obj in annotation.objects:
label = obj.label
polygon = obj.polygon
isGroup = False
if ((not (label in name2label)) and label.endswith('group')):
label = label[:(- len('group'))]
isGroup = True
if (not (label in name2label)):
printError("Label '{}' not known.".format(label))
labelTuple = name2label[label]
if (encoding == 'ids'):
id = labelTuple.id
elif (encoding == 'trainIds'):
id = labelTuple.trainId
if (labelTuple.hasInstances and (not isGroup)):
id = ((id * 1000) + nbInstances[label])
nbInstances[label] += 1
if (id < 0):
continue
try:
drawer.polygon(polygon, fill=id)
except:
print('Failed to draw polygon with label {} and id {}: {}'.format(label, id, polygon))
raise
return instanceImg |
def makeStubAsWithHosts(emu: Emulator, base: Base, asn: int, exchange: int, hosts_total: int):
network = 'net0'
stub_as = base.createAutonomousSystem(asn)
stub_as.createNetwork(network)
router = stub_as.createRouter('router0')
router.joinNetwork(network)
router.joinNetwork('ix{}'.format(exchange))
for counter in range(hosts_total):
name = 'host_{}'.format(counter)
host = stub_as.createHost(name)
host.joinNetwork(network) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.