code stringlengths 101 5.91M |
|---|
def test_rnd_paper_count():
rnd_entries = rldb.find_all({'source-title': 'Exploration by Random Network Distillation'})
assert (len(rnd_entries) == (((0 + 6) + 6) + 6)) |
class FreeModuleAltForm(FreeModuleTensor):
def __init__(self, fmodule, degree, name=None, latex_name=None):
FreeModuleTensor.__init__(self, fmodule, (0, degree), name=name, latex_name=latex_name, antisym=range(degree), parent=fmodule.dual_exterior_power(degree))
def _repr_(self):
if (self._tensor_rank == 1):
description = 'Linear form '
if (self._name is not None):
description += (self._name + ' ')
else:
description = 'Alternating form '
if (self._name is not None):
description += (self._name + ' ')
description += 'of degree {} '.format(self._tensor_rank)
description += 'on the {}'.format(self._fmodule)
return description
def _new_instance(self):
return self.__class__(self._fmodule, self._tensor_rank)
def _new_comp(self, basis):
fmodule = self._fmodule
if (self._tensor_rank == 1):
return Components(fmodule._ring, basis, 1, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter)
return CompFullyAntiSym(fmodule._ring, basis, self._tensor_rank, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter)
def degree(self):
return self._tensor_rank
def _display_expansion(self, basis=None, format_spec=None):
from sage.misc.latex import latex
from sage.typeset.unicode_characters import unicode_wedge
from .format_utilities import is_atomic, FormattedExpansion
(basis, format_spec) = self._preparse_display(basis=basis, format_spec=format_spec)
cobasis = basis.dual_basis()
comp = self.comp(basis)
terms_txt = []
terms_latex = []
for ind in comp.non_redundant_index_generator():
ind_arg = (ind + (format_spec,))
coef = comp[ind_arg]
if hasattr(coef, 'is_trivial_zero'):
zero_coef = coef.is_trivial_zero()
else:
zero_coef = (coef == 0)
if (not zero_coef):
bases_txt = []
bases_latex = []
for k in range(self._tensor_rank):
bases_txt.append(cobasis[ind[k]]._name)
bases_latex.append(latex(cobasis[ind[k]]))
basis_term_txt = unicode_wedge.join(bases_txt)
basis_term_latex = '\\wedge '.join(bases_latex)
coef_txt = repr(coef)
if (coef_txt == '1'):
terms_txt.append(basis_term_txt)
terms_latex.append(basis_term_latex)
elif (coef_txt == '-1'):
terms_txt.append(('-' + basis_term_txt))
terms_latex.append(('-' + basis_term_latex))
else:
coef_latex = latex(coef)
if is_atomic(coef_txt):
terms_txt.append(((coef_txt + ' ') + basis_term_txt))
else:
terms_txt.append(((('(' + coef_txt) + ') ') + basis_term_txt))
if is_atomic(coef_latex):
terms_latex.append((coef_latex + basis_term_latex))
else:
terms_latex.append(((('\\left(' + coef_latex) + '\\right)') + basis_term_latex))
if (not terms_txt):
expansion_txt = '0'
else:
expansion_txt = terms_txt[0]
for term in terms_txt[1:]:
if (term[0] == '-'):
expansion_txt += (' - ' + term[1:])
else:
expansion_txt += (' + ' + term)
if (not terms_latex):
expansion_latex = '0'
else:
expansion_latex = terms_latex[0]
for term in terms_latex[1:]:
if (term[0] == '-'):
expansion_latex += term
else:
expansion_latex += ('+' + term)
return FormattedExpansion(expansion_txt, expansion_latex)
def display(self, basis=None, format_spec=None):
from sage.misc.latex import latex
from sage.tensor.modules.format_utilities import FormattedExpansion
exp = self._display_expansion(basis=basis, format_spec=format_spec)
if (self._name is None):
resu_txt = repr(exp)
else:
resu_txt = ((self._name + ' = ') + repr(exp))
if (self._latex_name is None):
resu_latex = latex(exp)
else:
resu_latex = ((latex(self) + ' = ') + latex(exp))
return FormattedExpansion(resu_txt, resu_latex)
disp = display
def wedge(self, other):
from sage.typeset.unicode_characters import unicode_wedge
from .format_utilities import is_atomic
if (not isinstance(other, FreeModuleAltForm)):
raise TypeError(('the second argument for the exterior product ' + 'must be an alternating form'))
if (other._tensor_rank == 0):
return (other * self)
if (self._tensor_rank == 0):
return (self * other)
fmodule = self._fmodule
rank_r = (self._tensor_rank + other._tensor_rank)
if (rank_r > fmodule._rank):
return fmodule.dual_exterior_power(rank_r).zero()
if (self._is_zero or other._is_zero):
return fmodule.dual_exterior_power(rank_r).zero()
if ((self is other) and ((self._tensor_rank % 2) == 1)):
return fmodule.dual_exterior_power(rank_r).zero()
basis = self.common_basis(other)
if (basis is None):
raise ValueError('no common basis for the exterior product')
cmp_s = self._components[basis]
cmp_o = other._components[basis]
cmp_r = CompFullyAntiSym(fmodule._ring, basis, rank_r, start_index=fmodule._sindex, output_formatter=fmodule._output_formatter)
for (ind_s, val_s) in cmp_s._comp.items():
for (ind_o, val_o) in cmp_o._comp.items():
ind_r = (ind_s + ind_o)
if (len(ind_r) == len(set(ind_r))):
cmp_r[[ind_r]] += (val_s * val_o)
result = fmodule.alternating_form(rank_r)
result._components[basis] = cmp_r
if ((self._name is not None) and (other._name is not None)):
sname = self._name
oname = other._name
if (not is_atomic(sname)):
sname = (('(' + sname) + ')')
if (not is_atomic(oname)):
oname = (('(' + oname) + ')')
result._name = ((sname + unicode_wedge) + oname)
if ((self._latex_name is not None) and (other._latex_name is not None)):
slname = self._latex_name
olname = other._latex_name
if (not is_atomic(slname)):
slname = (('(' + slname) + ')')
if (not is_atomic(olname)):
olname = (('(' + olname) + ')')
result._latex_name = ((slname + '\\wedge ') + olname)
return result
def interior_product(self, alt_tensor):
from .format_utilities import is_atomic
from .alternating_contr_tensor import AlternatingContrTensor
if (not isinstance(alt_tensor, AlternatingContrTensor)):
raise TypeError(('{} is not an alternating '.format(alt_tensor) + 'contravariant tensor'))
p_res = (alt_tensor._tensor_rank - self._tensor_rank)
if (self._tensor_rank == 1):
res = self.contract(alt_tensor)
else:
if (alt_tensor._fmodule != self._fmodule):
raise ValueError(('{} is not defined on '.format(alt_tensor) + 'the same module as the {}'.format(self)))
if (alt_tensor._tensor_rank < self._tensor_rank):
raise ValueError(('the degree of the {} '.format(alt_tensor) + 'is lower than that of the {}'.format(self)))
basis = self.common_basis(alt_tensor)
if (basis is None):
raise ValueError('no common basis for the interior product')
comp = self._components[basis].interior_product(alt_tensor._components[basis])
if (p_res == 0):
res = comp
else:
res = self._fmodule.tensor_from_comp((p_res, 0), comp)
res_name = None
if ((self._name is not None) and (alt_tensor._name is not None)):
sname = self._name
oname = alt_tensor._name
if (not is_atomic(sname)):
sname = (('(' + sname) + ')')
if (not is_atomic(oname)):
oname = (('(' + oname) + ')')
res_name = ((('i_' + sname) + ' ') + oname)
res_latex_name = None
if ((self._latex_name is not None) and (alt_tensor._latex_name is not None)):
slname = self._latex_name
olname = alt_tensor._latex_name
if (not is_atomic(olname)):
olname = (('\\left(' + olname) + '\\right)')
res_latex_name = ((('\\iota_{' + slname) + '} ') + olname)
if res_name:
try:
res.set_name(res_name, latex_name=res_latex_name)
except (AttributeError, TypeError, ValueError):
pass
return res |
def test_nested_ListArray_NumpyArray():
v2a = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 1, 4], dtype=np.int64)), ak.contents.listarray.ListArray(ak.index.Index(np.array([999, 4, 100, 1], np.int64)), ak.index.Index(np.array([999, 7, 100, 3, 200], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8]))))
def f(out, array):
obj = array[1]
out[0] = len(obj)
out[1] = len(obj[0])
out[2] = obj[0][0]
out[3] = obj[0][1]
out[4] = obj[0][2]
out[5] = len(obj[1])
out[6] = len(obj[2])
out[7] = obj[2][0]
out[8] = obj[2][1]
out = np.zeros(9, dtype=np.float64)
f(out, ak.highlevel.Array(v2a))
assert (out.tolist() == [3.0, 3.0, 1.1, 2.2, 3.3, 0.0, 2.0, 4.4, 5.5]) |
class DBSCAN(ClusterMixin, BaseEstimator):
_parameter_constraints: dict = {'eps': [Interval(Real, 0.0, None, closed='neither')], 'min_samples': [Interval(Integral, 1, None, closed='left')], 'metric': [StrOptions((set(_VALID_METRICS) | {'precomputed'})), callable], 'metric_params': [dict, None], 'algorithm': [StrOptions({'auto', 'ball_tree', 'kd_tree', 'brute'})], 'leaf_size': [Interval(Integral, 1, None, closed='left')], 'p': [Interval(Real, 0.0, None, closed='left'), None], 'n_jobs': [Integral, None]}
def __init__(self, eps=0.5, *, min_samples=5, metric='euclidean', metric_params=None, algorithm='auto', leaf_size=30, p=None, n_jobs=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y=None, sample_weight=None):
X = self._validate_data(X, accept_sparse='csr')
if (sample_weight is not None):
sample_weight = _check_sample_weight(sample_weight, X)
if ((self.metric == 'precomputed') and sparse.issparse(X)):
X = X.copy()
with warnings.catch_warnings():
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(radius=self.eps, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs)
neighbors_model.fit(X)
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if (sample_weight is None):
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods])
labels = np.full(X.shape[0], (- 1), dtype=np.intp)
core_samples = np.asarray((n_neighbors >= self.min_samples), dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
self.components_ = X[self.core_sample_indices_].copy()
else:
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
self.fit(X, sample_weight=sample_weight)
return self.labels_
def _more_tags(self):
return {'pairwise': (self.metric == 'precomputed')} |
class Model(nn.Module):
def __init__(self, use_pytorch_checkpoint=False, use_fairseq_checkpoint=False):
super().__init__()
torch.manual_seed(0)
self.use_pytorch_checkpoint = use_pytorch_checkpoint
self.ffn = nn.Sequential(nn.Linear(32, 128), nn.Dropout(p=0.5), nn.Linear(128, 32))
if use_fairseq_checkpoint:
self.ffn = checkpoint_wrapper(self.ffn)
self.out = nn.Linear(32, 1)
def forward(self, x):
if self.use_pytorch_checkpoint:
x = checkpoint(self.ffn, x)
else:
x = self.ffn(x)
return self.out(x) |
def prepro_for_zhang(dataname, split, seed, args):
balance = args.balance
k = args.k
np.random.seed(seed)
data = defaultdict(list)
label_set = set()
with open(os.path.join(args.data_dir, 'TextClassificationDatasets', DATA_DICT[dataname], '{}.csv'.format(split)), 'r') as f:
for dp in csv.reader(f, delimiter=','):
if ('yelp' in dataname):
assert (len(dp) == 2)
(label, sent) = (dp[0], dp[1])
elif ('yahoo' in dataname):
assert (len(dp) == 4)
label = dp[0]
dp[3] = dp[3].replace('\t', ' ').replace('\\n', ' ')
sent = ' '.join(dp[1:])
else:
assert (len(dp) == 3)
if ('\t' in dp[2]):
dp[2] = dp[2].replace('\t', ' ')
(label, sent) = (dp[0], ((dp[1] + ' ') + dp[2]))
label = str((int(label) - 1))
label_set.add(label)
if balance:
data[label].append((sent, label))
else:
data['all'].append((sent, label))
n_classes = len(label_set)
save_base_dir = os.path.join(args.output_dir, dataname)
if (not os.path.exists(save_base_dir)):
os.mkdir(save_base_dir)
labels = set(list(data.keys()))
if (split != 'test'):
for label in data:
np.random.shuffle(data[label])
save_dir = os.path.join(save_base_dir, '{}-{}'.format(k, seed))
if (not os.path.exists(save_dir)):
os.mkdir(save_dir)
save_path = os.path.join(save_dir, '{}.tsv'.format(split))
with open(save_path, 'w') as f:
f.write('sentence\tlabel\n')
for sents in data.values():
if (split == 'test'):
pass
elif balance:
sents = sents[:k]
else:
sents = sents[:k]
for (sent, label) in sents:
assert ('\t' not in sent), sent
f.write(('%s\t%s\n' % (sent, label))) |
_model_architecture('delight_transformer_lm', 'delight_transformer_lm')
def base_lm_architecture(args):
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', ADAPTIVE_SCALE_FACTOR)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.delight_emb_map_dim = getattr(args, 'delight_emb_map_dim', 64)
args.delight_emb_out_dim = getattr(args, 'delight_emb_out_dim', 128)
assert ((args.delight_emb_out_dim % MIN_ELEMENTS_PER_GROUP) == 0), 'remainder({}, {}) should be equal to 0'.format(args.delight_emb_out_dim, MIN_ELEMENTS_PER_GROUP)
max_groups = (2 ** math.ceil(math.log((args.delight_emb_out_dim // MIN_ELEMENTS_PER_GROUP), 2)))
args.delight_emb_dropout = getattr(args, 'delight_emb_dropout', DEFAULT_DROPOUT)
args.delight_emb_depth = getattr(args, 'delight_emb_depth', DEFAULT_MIN_DEXTRA_LAYERS)
args.delight_emb_width_mult = getattr(args, 'delight_emb_width_mult', DEFAULT_WIDTH_MULTIPLIER)
args.delight_emb_max_groups = getattr(args, 'delight_emb_max_groups', max_groups)
args.delight_dec_scaling = getattr(args, 'delight_dec_scaling', 'block')
args.delight_dec_layers = getattr(args, 'delight_dec_layers', DEFAULT_MAX_DEXTRA_LAYERS)
args.delight_dec_min_depth = getattr(args, 'delight_dec_min_depth', DEFAULT_MIN_DEXTRA_LAYERS)
args.delight_dec_max_depth = getattr(args, 'delight_dec_max_depth', DEFAULT_MAX_DEXTRA_LAYERS)
args.delight_dec_width_mult = getattr(args, 'delight_dec_width_mult', DEFAULT_WIDTH_MULTIPLIER)
args.delight_dec_max_groups = getattr(args, 'delight_dec_max_groups', max_groups)
args.delight_dec_ffn_red = getattr(args, 'delight_dec_ffn_red', DEFAULT_FFN_RED_FACTOR)
args.no_glt_shuffle = getattr(args, 'no_glt_shuffle', False)
args.glt_shuffle = (not args.no_glt_shuffle)
args.define_iclr = getattr(args, 'define_iclr', False)
args.delight_dropout = getattr(args, 'delight_dropout', DEFAULT_DROPOUT)
args.norm_type = getattr(args, 'norm_type', 'ln')
args.act_type = getattr(args, 'act_type', 'swish')
args.dropout = getattr(args, 'dropout', DEFAULT_DROPOUT)
args.attention_dropout = getattr(args, 'attention_dropout', DEFAULT_DROPOUT)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', DEFAULT_DROPOUT)
args.pe_dropout = getattr(args, 'pe_dropout', DEFAULT_DROPOUT)
args.ffn_dropout = getattr(args, 'ffn_dropout', DEFAULT_DROPOUT)
if hasattr(args, 'no_tie_adaptive_proj'):
args.no_decoder_final_norm = True
if (args.no_tie_adaptive_proj is False):
args.tie_adaptive_proj = True
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = (not args.decoder_final_norm)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', ADAPTIVE_SCALE_FACTOR)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
args.print_stats = getattr(args, 'print_stats', False)
args.tgt_len_ps = getattr(args, 'tgt_len_ps', 20)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.activation_fn = getattr(args, 'activation_fn', 'swish')
args.add_bos_token = getattr(args, 'add_bos_token', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False) |
def get_env_info():
run_lambda = run
(pip_version, pip_list_output) = get_pip_packages(run_lambda)
return SystemEnv(torch_version=torch.__version__, is_debug_build=torch.version.debug, python_version='{}.{}'.format(sys.version_info[0], sys.version_info[1]), is_cuda_available=torch.cuda.is_available(), cuda_compiled_version=torch.version.cuda, cuda_runtime_version=get_running_cuda_version(run_lambda), nvidia_gpu_models=get_gpu_info(run_lambda), nvidia_driver_version=get_nvidia_driver_version(run_lambda), cudnn_version=get_cudnn_version(run_lambda), pip_version=pip_version, pip_packages=pip_list_output, conda_packages=get_conda_packages(run_lambda), os=get_os(run_lambda), gcc_version=get_gcc_version(run_lambda), cmake_version=get_cmake_version(run_lambda)) |
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(Conv3x3BNReLU(in_channels, out_channels, stride=1), Conv3x3BNReLU(out_channels, out_channels, stride=1))
def forward(self, x):
return self.double_conv(x) |
class RemBertForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_ast_resolver_wrong_ti():
import taichi
taichi.init()
fake_ti = namedtuple('FakeTi', ['kernel'])
ti = fake_ti(kernel='fake')
node = ast.parse('ti.kernel', mode='eval').body
assert (not ASTResolver.resolve_to(node, taichi.kernel, locals())) |
def _fit_sag(X, y, eta, alpha, loss, max_iter, rng):
if sparse.issparse(X):
X = X.toarray()
(n_samples, n_features) = X.shape
n_vectors = y.shape[1]
g = np.empty((n_samples, n_features))
coef_ = np.zeros((n_vectors, n_features))
for i in range(n_samples):
p = coef_.dot(X[i])
g[i] = ((- loss.get_update(p, y[i])) * X[i])
d = np.sum(g, axis=0)
for _ in range(max_iter):
for _ in range(n_samples):
i = rng.randint((n_samples - 1))
p = coef_.dot(X[i])
gi = ((- loss.get_update(p, y[i])) * X[i])
coef_ -= (eta * ((((gi - g[i]) + d) / n_samples) + (alpha * coef_)))
d += (gi - g[i])
g[i] = gi
return coef_ |
class RelativeRiskResult():
relative_risk: float
exposed_cases: int
exposed_total: int
control_cases: int
control_total: int
def confidence_interval(self, confidence_level=0.95):
if (not (0 <= confidence_level <= 1)):
raise ValueError('confidence_level must be in the interval [0, 1].')
if ((self.exposed_cases == 0) and (self.control_cases == 0)):
return ConfidenceInterval(low=np.nan, high=np.nan)
elif (self.exposed_cases == 0):
return ConfidenceInterval(low=0.0, high=np.nan)
elif (self.control_cases == 0):
return ConfidenceInterval(low=np.nan, high=np.inf)
alpha = (1 - confidence_level)
z = ndtri((1 - (alpha / 2)))
rr = self.relative_risk
se = np.sqrt(((((1 / self.exposed_cases) - (1 / self.exposed_total)) + (1 / self.control_cases)) - (1 / self.control_total)))
delta = (z * se)
katz_lo = (rr * np.exp((- delta)))
katz_hi = (rr * np.exp(delta))
return ConfidenceInterval(low=katz_lo, high=katz_hi) |
def get_document(document_lines, args):
document_state = OntoNotesDocumentState(document_lines[0])
tokenizer = args.tokenizer
word_idx = (- 1)
orig_word_idx = (- 1)
last_speaker = '-'
for line in document_lines[1]:
row = line.split()
sentence_end = (len(row) == 0)
if (not sentence_end):
assert (len(row) >= 12)
if args.add_speaker:
speaker = row[9]
if (speaker != last_speaker):
word_idx += 1
speaker_str = process_speaker(speaker)
document_state.tokens.extend([SPEAKER_START, speaker_str, SPEAKER_END])
speaker_subtokens = []
speaker_subtokens.extend(tokenizer.convert_tokens_to_ids([SPEAKER_START]))
(speaker_subtokens.extend(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(speaker_str))),)
speaker_subtokens.extend(tokenizer.convert_tokens_to_ids([SPEAKER_END]))
document_state.token_end += (([False] * (len(speaker_subtokens) - 1)) + [True])
for (sidx, subtoken) in enumerate(speaker_subtokens):
document_state.subtokens.append(subtoken)
document_state.info.append(None)
document_state.sentence_end.append(False)
document_state.subtoken_map.append(word_idx)
document_state.orig_subtoken_map.append((- 1))
last_speaker = speaker
word_idx += 1
orig_word_idx += 1
word = normalize_word(row[3])
subtokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word))
document_state.tokens.append(word)
document_state.token_end += (([False] * (len(subtokens) - 1)) + [True])
for (sidx, subtoken) in enumerate(subtokens):
document_state.subtokens.append(subtoken)
info = (None if (sidx != 0) else (row + [len(subtokens)]))
document_state.info.append(info)
document_state.sentence_end.append(False)
document_state.subtoken_map.append(word_idx)
document_state.orig_subtoken_map.append(orig_word_idx)
else:
document_state.sentence_end[(- 1)] = True
split_into_segments(document_state, args.seg_len, document_state.sentence_end, document_state.token_end)
document = document_state.finalize()
return document |
class genextreme_gen(rv_continuous):
def _argcheck(self, c):
return np.isfinite(c)
def _shape_info(self):
return [_ShapeInfo('c', False, ((- np.inf), np.inf), (False, False))]
def _get_support(self, c):
_b = np.where((c > 0), (1.0 / np.maximum(c, _XMIN)), np.inf)
_a = np.where((c < 0), (1.0 / np.minimum(c, (- _XMIN))), (- np.inf))
return (_a, _b)
def _loglogcdf(self, x, c):
return _lazywhere(((x == x) & (c != 0)), (x, c), (lambda x, c: (sc.log1p(((- c) * x)) / c)), (- x))
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere(((x == x) & (c != 0)), (x, c), (lambda x, c: (c * x)), 0.0)
logex2 = sc.log1p((- cx))
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
np.putmask(logpex2, ((c == 0) & (x == (- np.inf))), 0.0)
logpdf = _lazywhere((~ ((cx == 1) | (cx == (- np.inf)))), (pex2, logpex2, logex2), (lambda pex2, lpex2, lex2: (((- pex2) + lpex2) - lex2)), fillvalue=(- np.inf))
np.putmask(logpdf, ((c == 1) & (x == 1)), 0.0)
return logpdf
def _logcdf(self, x, c):
return (- np.exp(self._loglogcdf(x, c)))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return (- sc.expm1(self._logcdf(x, c)))
def _ppf(self, q, c):
x = (- np.log((- np.log(q))))
return _lazywhere(((x == x) & (c != 0)), (x, c), (lambda x, c: ((- sc.expm1(((- c) * x))) / c)), x)
def _isf(self, q, c):
x = (- np.log((- sc.log1p((- q)))))
return _lazywhere(((x == x) & (c != 0)), (x, c), (lambda x, c: ((- sc.expm1(((- c) * x))) / c)), x)
def _stats(self, c):
def g(n):
return sc.gamma(((n * c) + 1))
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where((abs(c) < 1e-07), (((c * np.pi) ** 2.0) / 6.0), (g2 - (g1 ** 2.0)))
gam2k = np.where((abs(c) < 1e-07), ((np.pi ** 2.0) / 6.0), (sc.expm1((sc.gammaln(((2.0 * c) + 1.0)) - (2 * sc.gammaln((c + 1.0))))) / (c ** 2.0)))
eps = 1e-14
gamk = np.where((abs(c) < eps), (- _EULER), (sc.expm1(sc.gammaln((c + 1))) / c))
m = np.where((c < (- 1.0)), np.nan, (- gamk))
v = np.where((c < (- 0.5)), np.nan, ((g1 ** 2.0) * gam2k))
sk1 = _lazywhere((c >= ((- 1.0) / 3)), (c, g1, g2, g3, g2mg12), (lambda c, g1, g2, g3, g2gm12: ((np.sign(c) * ((- g3) + ((g2 + (2 * g2mg12)) * g1))) / (g2mg12 ** 1.5))), fillvalue=np.nan)
sk = np.where((abs(c) <= (eps ** 0.29)), (((12 * np.sqrt(6)) * _ZETA3) / (np.pi ** 3)), sk1)
ku1 = _lazywhere((c >= ((- 1.0) / 4)), (g1, g2, g3, g4, g2mg12), (lambda g1, g2, g3, g4, g2mg12: ((g4 + ((((- 4) * g3) + ((3 * (g2 + g2mg12)) * g1)) * g1)) / (g2mg12 ** 2))), fillvalue=np.nan)
ku = np.where((abs(c) <= (eps ** 0.23)), (12.0 / 5.0), (ku1 - 3.0))
return (m, v, sk, ku)
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
g = _skew(data)
if (g < 0):
a = 0.5
else:
a = (- 0.5)
return super()._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, (n + 1))
vals = ((1.0 / (c ** n)) * np.sum(((sc.comb(n, k) * ((- 1) ** k)) * sc.gamma(((c * k) + 1))), axis=0))
return np.where(((c * n) > (- 1)), vals, np.inf)
def _entropy(self, c):
return ((_EULER * (1 - c)) + 1) |
class ZFilter(Filter):
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update:
self.rs.push(x)
if self.demean:
x = (x - self.rs.mean)
if self.destd:
x = (x / (self.rs.std + 1e-08))
if self.clip:
x = np.clip(x, (- self.clip), self.clip)
return x
def output_shape(self, input_space):
return input_space.shape |
_connect.numpy.implements('prod')
def _nep_18_impl_prod(a, axis=None, dtype=UNSUPPORTED, out=UNSUPPORTED, keepdims=False, initial=UNSUPPORTED, where=UNSUPPORTED):
return prod(a, axis=axis, keepdims=keepdims) |
def result_folder():
import sbibm.third_party.kgof.config as config
results_path = config.expr_configs['expr_results_path']
return results_path |
def unwrap_model(model_wrapper):
if hasattr(model_wrapper, 'module'):
model = model_wrapper.module
else:
model = model_wrapper
return model |
def compute_metrics_on_files(gt, pred, ifhd=True, ifasd=True):
def metrics(img_gt, img_pred, ifhd=True, ifasd=True):
if (img_gt.ndim != img_pred.ndim):
raise ValueError("The arrays 'img_gt' and 'img_pred' should have the same dimension, {} against {}".format(img_gt.ndim, img_pred.ndim))
res = []
for c in [500, 600, 200]:
gt_c_i = np.copy(img_gt)
gt_c_i[(gt_c_i != c)] = 0
pred_c_i = np.copy(img_pred)
pred_c_i[(pred_c_i != c)] = 0
gt_c_i = np.clip(gt_c_i, 0, 1)
pred_c_i = np.clip(pred_c_i, 0, 1)
dice = dc(gt_c_i, pred_c_i)
(h_d, a_sd) = ((- 1), (- 1))
if (ifhd or ifasd):
if ((np.sum(gt_c_i) == 0) or (np.sum(pred_c_i) == 0)):
dice = (- 1)
h_d = (- 1)
a_sd = (- 1)
else:
h_d = (hd(gt_c_i, pred_c_i) if ifhd else h_d)
a_sd = (asd(gt_c_i, pred_c_i) if ifasd else a_sd)
res += [dice, h_d, a_sd]
return res
res = metrics(gt, pred, ifhd=ifhd, ifasd=ifasd)
res_str = ['{:.3f}'.format(r) for r in res]
formatting = 'Endo {:>8} , {:>8} , {:>8} , RV {:>8} , {:>8} , {:>8} , Myo {:>8} , {:>8} , {:>8}'
print(formatting.format(*res_str))
return res |
class ElastodynamicsLinearTSC(ElastodynamicsBasicTSC):
name = 'tsc.ed_linear'
_parameters = (ElastodynamicsBasicTSC._parameters + [('fred', 'float', 1.0, False, 'Additional step size reduction factor w.r.t. `tsc.ed_basic`.'), ('inc_wait', 'int', 10, False, 'The number of consecutive accepted steps to wait before increasing\n the step size.'), ('min_finc', 'float >= 1', 1.5, False, 'Minimum step size increase factor.')])
def __init__(self, conf, **kwargs):
ElastodynamicsBasicTSC.__init__(self, conf=conf, **kwargs)
if (self.conf.min_finc < 1.0):
raise ValueError(f'min_finc must be >= 1! (is {conf.min_finc})')
self.count = 0
def __call__(self, ts, vec0, vec1, unpack, **kwargs):
conf = self.conf
dt = ts.dt
(new_dt, status) = ElastodynamicsBasicTSC.__call__(self, ts, vec0, vec1, unpack, **kwargs)
if (status.result == 'reject'):
self.count = 0
new_dt *= conf.fred
elif (self.count >= conf.inc_wait):
if ((new_dt / dt) >= conf.min_finc):
self.count = 0
else:
new_dt = dt
else:
self.count += 1
new_dt = dt
return (new_dt, status) |
class Evaluater(BaseTrainer):
def __init__(self, model, loss, metrics, config, data_loader):
super().__init__(model, loss, metrics, None, config)
self.config = config
self.data_loader = data_loader
self.log_step = config['evaluater'].get('log_step', int(np.sqrt(data_loader.batch_size)))
self.model = model
self.loss = loss
self.metrics = metrics
self.len_data = len(self.data_loader)
if isinstance(loss, torch.nn.Module):
self.loss.to(self.device)
if (len(self.device_ids) > 1):
self.loss = torch.nn.DataParallel(self.loss, self.device_ids)
self.roi = config['evaluater'].get('roi', None)
self.alpha = config['evaluater'].get('alpha', None)
self.max_distance = config['evaluater'].get('max_distance', None)
self.correct_length = config['evaluater'].get('correct_length', False)
self.median_scaling = config['evaluater'].get('median_scaling', False)
self.eval_mono = config['evaluater'].get('eval_mono', False)
def _eval_metrics(self, data_dict):
acc_metrics = np.zeros(len(self.metrics))
acc_metrics_mv = np.zeros(len(self.metrics))
for (i, metric) in enumerate(self.metrics):
if self.median_scaling:
data_dict = median_scaling(data_dict)
acc_metrics[i] += metric(data_dict, self.roi, self.max_distance, eval_mono=self.eval_mono)
acc_metrics_mv[i] += metric(data_dict, self.roi, self.max_distance, use_cvmask=True, eval_mono=self.eval_mono)
if np.any(np.isnan(acc_metrics)):
acc_metrics = np.zeros(len(self.metrics))
valid = np.zeros(len(self.metrics))
else:
valid = np.ones(len(self.metrics))
if np.any(np.isnan(acc_metrics_mv)):
acc_metrics_mv = np.zeros(len(self.metrics))
valid_mv = np.zeros(len(self.metrics))
else:
valid_mv = np.ones(len(self.metrics))
return (acc_metrics, valid, acc_metrics_mv, valid_mv)
def eval(self, model_index):
self.model.eval()
total_loss = 0
total_loss_dict = {}
total_metrics = np.zeros(len(self.metrics))
total_metrics_valid = np.zeros(len(self.metrics))
total_metrics_mv = np.zeros(len(self.metrics))
total_metrics_valid_mv = np.zeros(len(self.metrics))
total_metrics_runningavg = np.zeros(len(self.metrics))
num_samples = 0
for (batch_idx, (data, target)) in enumerate(self.data_loader):
(data, target) = (to(data, self.device), to(target, self.device))
data['target'] = target
with torch.no_grad():
data = self.model(data)
loss_dict = {'loss': torch.tensor([0])}
loss = loss_dict['loss']
output = data['result']
total_loss += loss.item()
total_loss_dict = operator_on_dict(total_loss_dict, loss_dict, operator.add)
(metrics, valid, metrics_mv, valid_mv) = self._eval_metrics(data)
total_metrics += metrics
total_metrics_valid += valid
total_metrics_mv += metrics_mv
total_metrics_valid_mv += valid_mv
batch_size = target.shape[0]
if (num_samples == 0):
total_metrics_runningavg += metrics
else:
total_metrics_runningavg = ((total_metrics_runningavg * (num_samples / (num_samples + batch_size))) + (metrics * (batch_size / (num_samples + batch_size))))
num_samples += batch_size
if ((batch_idx % self.log_step) == 0):
self.logger.debug(f'Evaluating {self._progress(batch_idx)} Loss: {(loss.item() / (batch_idx + 1)):.6f} Metrics: {list((total_metrics / (batch_idx + 1)))}')
if (batch_idx == self.len_data):
break
log = {'loss': (total_loss / self.len_data), 'metrics': self.save_digits((total_metrics / total_metrics_valid).tolist()), 'metrics_mv': self.save_digits((total_metrics_mv / total_metrics_valid_mv).tolist()), 'metrics_correct': self.save_digits(total_metrics_runningavg.tolist()), 'valid_batches': total_metrics_valid[0], 'valid_batches_mv': total_metrics_valid_mv[0]}
for (loss_component, v) in total_loss_dict.items():
log[f'loss_{loss_component}'] = (v.item() / self.len_data)
return log
def save_digits(self, input_list):
return [float('{:.3f}'.format(i)) for i in input_list]
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = (batch_idx * self.data_loader.batch_size)
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_data
return base.format(current, total, ((100.0 * current) / total)) |
def test_ufunc_isnan_c():
_numpy_output(check_dtype=True)
def ufunc_isnan_c(A: dace.complex64[10]):
A[0] = np.inf
A[1] = np.NaN
return np.isnan(A)
args = dace.Config.get('compiler', 'cpu', 'args')
print(args)
if (args.find('-ffast-math') >= 0):
new_args = args.replace('-ffast-math', '-fno-finite-math-only')
print(new_args)
dace.Config.set('compiler', 'cpu', 'args', value=new_args)
print(dace.Config.get('compiler', 'cpu', 'args'))
ufunc_isnan_c()
dace.Config.set('compiler', 'cpu', 'args', value=args) |
def readJSON(url):
response = request.urlopen(url)
data = json.loads(response.read())
return data |
class GatherRecord(ModelLayer):
def __init__(self, model, input_record, name='gather_record', **kwargs):
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert ('indices' in input_record)
assert ('record' in input_record)
self.output_schema = schema.NewRecord(model.net, input_record.record.clone_schema())
self._indices = self.input_record.indices()
def _gather_scalar(self, net, record, lengths_blob, output_record):
if (lengths_blob is None):
net.Gather([record(), self._indices], output_record())
else:
net.LengthsGather([record(), lengths_blob, self._indices], output_record())
def _gather_struct(self, net, record, lengths_blob, output_record):
for (name, field) in record.get_children():
self._dispatch(net, field, lengths_blob, output_record[name])
def _gather_list(self, net, record, lengths_blob, output_record):
self._gather_scalar(net, record.lengths, lengths_blob, output_record.lengths)
if (lengths_blob is None):
lengths_blob = record.lengths()
else:
lengths_float = net.Cast(record.lengths(), net.NextScopedBlob((str(record.lengths()) + '_float')), to=core.DataType.FLOAT)
lengths_blob_float = net.LengthsSum([lengths_float, lengths_blob], net.NextScopedBlob((str(record.lengths()) + '_nested_float')))
lengths_blob = net.Cast(lengths_blob_float, net.NextScopedBlob((str(record.lengths()) + '_nested')), to=core.DataType.INT32)
self._dispatch(net, record._items, lengths_blob, output_record._items)
def _dispatch(self, net, record, lengths_blob, output_record):
if isinstance(record, schema.Scalar):
self._gather_scalar(net, record, lengths_blob, output_record)
elif isinstance(record, schema.Struct):
self._gather_struct(net, record, lengths_blob, output_record)
elif isinstance(record, schema.List):
self._gather_list(net, record, lengths_blob, output_record)
else:
raise NotImplementedError
def add_ops(self, net):
self._dispatch(net, self.input_record.record, None, self.output_schema) |
(scope='function')
def function_analysis() -> FunctionAnalysisVisitor:
return FunctionAnalysisVisitor() |
class Detect():
def __init__(self, nc: int=80, ch: List[int]=(), name: str=''):
self.nc = nc
self.nl = len(ch)
self.reg_max = 16
self.no = (nc + (self.reg_max * 4))
self.feat_sizes = [80, 40, 20]
self.stride_sizes = [8, 16, 32]
img_size = 640
(nd0, nd1, nd2) = np.cumsum([(sz ** 2) for sz in self.feat_sizes])
(c2, c3) = (max((16, (ch[0] // 4), (self.reg_max * 4))), max(ch[0], self.nc))
(a, b) = ([], [])
for s in self.stride_sizes:
a.append(initializers.Constant(1.0))
b.append(initializers.Constant(math.log(((5 / self.nc) / ((img_size / s) ** 2)))))
self.cv2 = [[Conv(x, c2, 3, name=f'{name}.cv2.{i}.0'), Conv(c2, c2, 3, name=f'{name}.cv2.{i}.1'), layers.Conv2D((4 * self.reg_max), 1, bias_initializer=a[i], name=f'{name}.cv2.{i}.2')] for (i, x) in enumerate(ch)]
self.cv3 = [[Conv(x, c3, 3, name=f'{name}.cv3.{i}.0'), Conv(c3, c3, 3, name=f'{name}.cv3.{i}.1'), layers.Conv2D(self.nc, 1, bias_initializer=b[i], name=f'{name}.cv3.{i}.2')] for (i, x) in enumerate(ch)]
self.dfl = DFL(self.reg_max, name=name)
(self.anchors, self.strides) = (x.transpose(0, 1) for x in make_anchors(self.feat_sizes, self.stride_sizes, 0.5))
self.strides = (self.strides / img_size)
self.split_strides = [self.strides[:nd0], self.strides[nd0:nd1], self.strides[nd1:nd2]]
self.anchors = (self.anchors * self.strides)
def __call__(self, x):
feat = self.feat_sizes
(xbox, xcls) = ([0, 0, 0], [0, 0, 0])
for i in range(self.nl):
x0 = self.cv2[i][0](x[i])
x0 = self.cv2[i][1](x0)
x0 = self.cv2[i][2](x0)
x1 = self.cv3[i][0](x[i])
x1 = self.cv3[i][1](x1)
x1 = self.cv3[i][2](x1)
(xbox[i], xcls[i]) = (x0, x1)
cls = Concatenate(axis=1)([tf.reshape(xi, ((- 1), (feat[i] ** 2), self.nc)) for (i, xi) in enumerate(xcls)])
y_cls = tf.math.sigmoid(cls)
box = [tf.reshape(xi, ((- 1), (feat[i] ** 2), (self.reg_max * 4))) for (i, xi) in enumerate(xbox)]
dist = Concatenate(axis=1)([tf.math.multiply(self.dfl(b), self.split_strides[i]) for (i, b) in enumerate(box)])
anchors = tf.expand_dims(self.anchors, 0)
y_bb = dist2bbox(anchors, dist)
y_bb = tf.expand_dims(y_bb, 2)
return [y_bb, y_cls] |
class Macaulay2FunctionElement(FunctionElement):
def _instancedoc_(self):
P = self._obj.parent()
r = P.eval('help prepend({0}, select(methods {0}, m->instance({1}, m#1)))'.format(self._name, self._obj._name))
end = r.rfind('\n\nDIV')
if (end != (- 1)):
r = r[:end]
return AsciiArtString(('nodetex,noreplace\n' + r))
def _sage_src_(self):
return self._obj.parent().eval(('code select(methods %s, m->instance(%s, m#1))' % (self._name, self._obj._name))) |
_kl(Uniform, Exponential)
def _kl_uniform_exponetial(p, q):
result = (((q.rate * (p.high + p.low)) / 2) - ((p.high - p.low) * q.rate).log())
result[(p.low < q.support.lower_bound)] = inf
return result |
def spawn_3D_maze(map, base_pos=5):
blocks = []
for k in range(len(map)):
for j in range(len(map[k])):
for i in range(len(map[k][j])):
item = get_tile(map[k][j][i])
blocks.append(Block(position=Point(x=i, y=(k + 5), z=j), type=item, orientation=NORTH))
CLIENT.spawnBlocks(Blocks(blocks=blocks))
return |
class TwitterManagerReplyToTweet(VirtualFunctionTool):
name = 'TwitterManagerReplyToTweet'
summary = 'Reply to a tweet by its ID.'
parameters: List[ArgParameter] = [{'name': 'tweet_id', 'type': 'string', 'description': 'The unique identifier of the tweet to reply to.', 'required': True}, {'name': 'content', 'type': 'string', 'description': 'The content of the reply, max length 280 characters.', 'required': True}, {'name': 'media_paths', 'type': 'array', 'description': 'An optional list of media file paths to attach to the tweet, max 4 items. Each item must be a valid image or video file path in the local file system.', 'required': False}]
returns: List[ArgReturn] = [{'name': 'reply_tweet_id', 'type': 'string', 'description': 'The unique identifier of the posted reply tweet.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'content' parameter is too long, or the 'media_paths' parameter contains too many items, or at least one of the media files is not a valid image or video file."}, {'name': 'NotFoundException', 'description': "The 'tweet_id' parameter is not found, or at least one of the media files does not exist in the local file system."}] |
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if (filename in self.extra_files):
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith(('.pyc', '.pyo', '.py')):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
def on_moved(self, event):
_check_modification(event.src_path)
_check_modification(event.dest_path)
def on_deleted(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith('observer'):
reloader_name = reloader_name[:(- 8)]
reloader_name += ' reloader'
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
self.should_reload = True
self.log_reload(filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
try:
while (not self.should_reload):
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if (path not in watches):
try:
watches[path] = observer.schedule(self.event_handler, path, recursive=True)
except OSError:
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if (watch is not None):
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
finally:
observer.stop()
observer.join()
sys.exit(3) |
.parametrize('inspecs', pairwise_inspecs_params())
.parametrize('op', ['logical_and', 'logical_or', 'logical_xor', 'greater', 'greater_equal', 'less', 'less_equal', 'equal', 'not_equal'])
def test_pairwise_logical(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(func, inspecs, [], {}, nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer) |
def is_in_span(index, all_entity_spans):
for span in all_entity_spans:
if (span[0] <= index < span[1]):
return True
return False |
class OPRAVideoPath(Dataset):
def __init__(self, root, split, save_dir, num_gpus):
super().__init__()
videos = get_opra_videos(root, split)
save_dirs = to_affominer_dirs(videos, save_dir)
(self.videos, self.save_dirs) = ([], [])
for (video, save_dir) in zip(videos, save_dirs):
self.videos.append(video)
self.save_dirs.append(save_dir)
print(f'no process video data size: {len(self.videos)}')
n = ((len(self.videos) // num_gpus) + 1)
self.videos = [self.videos[(n * i):(n * (i + 1))] for i in range(num_gpus)]
self.save_dirs = [self.save_dirs[(n * i):(n * (i + 1))] for i in range(num_gpus)]
self.num_gpus = num_gpus
def __getitem__(self, index):
return (self.videos[index], self.save_dirs[index])
def __len__(self):
return self.num_gpus |
class ReverseLSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(ReverseLSTMLayer, self).__init__()
self.cell = cell(*cell_args)
_method
def forward(self, input, state):
inputs = reverse(input.unbind(0))
outputs = jit.annotate(List[Tensor], [])
for i in range(len(inputs)):
(out, state) = self.cell(inputs[i], state)
outputs += [out]
return (torch.stack(reverse(outputs)), state) |
def is_model_only_checkpoint(checkpoint):
if is_pl_trainer_checkpoint(checkpoint):
return ('state_dict' not in checkpoint)
else:
return ('model' not in checkpoint) |
_warnings(category=ConvergenceWarning)
def process_single_scan(args, dataset, gt_path: Path, cat_id_to_feature, valid_ids):
scan = os.path.splitext(gt_path.name)[0]
print(f'Start processing scan = {scan!r}')
scan_path = (dataset / scan)
scene_pcd = o3d.io.read_point_cloud(str((scan_path / f'{scan}_vh_clean_2.ply')))
scene_graph_path = ((((scan_path / args.detic_output_folder) / args.detic_exp) / 'predictions') / args.prediction_file)
if (not scene_graph_path.is_file()):
print('')
print(f"scene_graph_path = {scene_graph_path!r} doesn't exist!")
print('')
return
output_path = ((((scan_path / args.detic_output_folder) / args.detic_exp) / 'results') / f'{args.feature_name}_{args.prediction_file}')
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(scene_graph_path, 'rb') as fp:
scene_graph = pickle.load(fp)
scene_graph.graph['scan'] = scan
scene_graph.graph['n_pts'] = len(scene_pcd.points)
if (args.feature_name == 'representative_feature'):
detection_folder = (((scan_path / args.detic_output_folder) / args.detic_exp) / 'instances')
frame_to_features = dict()
for instance_file in detection_folder.iterdir():
frame_id = instance_file.stem.split('-')[0]
with open(instance_file, 'rb') as fp:
instance = pickle.load(fp)
features = instance.pred_box_features.numpy()
frame_to_features[frame_id] = features
for i in scene_graph.nodes:
node = scene_graph.nodes[i]
node['features'] = []
for detection in node['detections']:
(frame, mask_idx, _) = detection
node['features'].append(frame_to_features[frame][mask_idx])
node['features'] = np.stack(node['features'], axis=0)
if (node['features'].shape[0] <= args.K):
rand_indices = np.random.choice(node['features'].shape[0], args.K)
node['representative_features'] = node['features'][rand_indices]
else:
k_means = KMeans(n_clusters=args.K, n_init='auto', random_state=0)
k_means.fit(node['features'])
node['representative_features'] = k_means.cluster_centers_
gt_instance_ids = np.loadtxt(gt_path, dtype=np.int64)
(gt_cat_ids, gt_masks) = get_gt_instances(gt_instance_ids, valid_ids=valid_ids)
(pred_features, pred_masks) = get_predicted_instances(scene_graph, feature_name=args.feature_name)
if (args.feature_name == 'representative_features'):
assert ((pred_features.shape[0] // args.K) == pred_masks.shape[0]), f'pred_features.shape[0] = {pred_features.shape[0]!r}, pred_masks.shape[0] = {pred_masks.shape[0]!r}'
ap_results = compute_ap_for_each_scan(pred_features, pred_masks, gt_cat_ids, gt_masks, cat_id_to_feature)
with open(output_path, 'wb') as fp:
pickle.dump(ap_results, fp)
print(f'Processed scan = {scan!r}. Results saved to: {output_path}')
return ap_results |
class GOT10kVideo(Video):
def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect, attr, load_img=False):
super(GOT10kVideo, self).__init__(name, root, video_dir, init_rect, img_names, gt_rect, attr, load_img) |
def vectorize_sequence(sequence: str, feature_length: int, w2v_model: Word2Vec) -> np.ndarray:
vector = np.zeros((feature_length, W2V_VEC_LENGTH))
for (i, word) in enumerate(sequence.split()):
if (i >= feature_length):
break
try:
vector[i] = w2v_model.wv[word]
except KeyError:
pass
return vector |
class Benchmark(srdata.SRData):
def __init__(self, args, name='', train=True, benchmark=True):
super(Benchmark, self).__init__(args, name=name, train=train, benchmark=True)
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, 'benchmark', self.name)
self.dir_hr = os.path.join(self.apath, 'HR')
if self.input_large:
self.dir_lr = os.path.join(self.apath, 'LR_bicubicL')
else:
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
self.ext = ('', '.png') |
(dace.uint32[(H + 1)], dace.uint32[nnz], dace.float32[nnz], dace.float32[W], dace.float32[H])
def spmv(A_row, A_col, A_val, x, b):
(_[0:H])
def compute_row(i):
(_[A_row[i]:A_row[(i + 1)]])
def compute(j):
(a << A_val[j])
(in_x << x[A_col[j]])
(out >> b(1, (lambda x, y: (x + y)))[i])
out = (a * in_x) |
def _combine_images_with_annotations(dataset_name: str, image_root: str, img_datas: Iterable[Dict[(str, Any)]], ann_datas: Iterable[Iterable[Dict[(str, Any)]]]):
dataset_dicts = []
def get_file_name(img_root, img_dict):
(split_folder, file_name) = img_dict['coco_url'].split('/')[(- 2):]
return os.path.join((img_root + split_folder), file_name)
for (img_dict, ann_dicts) in zip(img_datas, ann_datas):
record = {}
record['file_name'] = get_file_name(image_root, img_dict)
record['height'] = img_dict['height']
record['width'] = img_dict['width']
record['not_exhaustive_category_ids'] = img_dict.get('not_exhaustive_category_ids', [])
record['neg_category_ids'] = img_dict.get('neg_category_ids', [])
record['image_id'] = img_dict['id']
record['dataset'] = dataset_name
objs = []
for ann_dict in ann_dicts:
assert (ann_dict['image_id'] == record['image_id'])
obj = {}
_maybe_add_bbox(obj, ann_dict)
obj['iscrowd'] = ann_dict.get('iscrowd', 0)
obj['category_id'] = ann_dict['category_id']
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
return dataset_dicts |
def bmprofile_analyze(input_dir: str, output_dir: str, out_format: str='html', options={}):
parser = BMProfileParser()
parsed_data = parser.parse(input_dir)
generator = BMProfileGenerator()
generator.generate(parsed_data, output_dir, out_format, options) |
def _forward(config):
assert config.load
test_data = read_data(config, config.forward_name, True)
update_config(config, [test_data])
_config_debug(config)
if config.use_glove_for_unk:
word2vec_dict = (test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec'])
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for (word, idx) in new_word2idx_dict.items()}
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
evaluator = ForwardEvaluator(config, model)
graph_handler = GraphHandler(config, model)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_batches = math.ceil((test_data.num_examples / config.batch_size))
if (0 < config.test_num_batches < num_batches):
num_batches = config.test_num_batches
e = evaluator.get_evaluation_from_batches(sess, tqdm(test_data.get_batches(config.batch_size, num_batches=num_batches), total=num_batches))
print(e)
if config.dump_answer:
print('dumping answer ...')
graph_handler.dump_answer(e, path=config.answer_path)
if config.dump_eval:
print('dumping eval ...')
graph_handler.dump_eval(e, path=config.eval_path) |
def parse_videos(html):
try:
page_info_str = [l for l in html.split('\n') if ('ytInitialData = ' in l)][0]
page_info_str = page_info_str.split('ytInitialData = ')[1].split('</script>')[0].rstrip(';')
page_info_d = json.loads(page_info_str)
vid_l = []
for tab_d in page_info_d['contents']['twoColumnBrowseResultsRenderer']['tabs']:
try:
vid_l = tab_d['tabRenderer']['content']['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents'][0]['gridRenderer']['items']
break
except:
continue
except:
print('VID PARSE ERROR.')
return []
vid_out_l = []
for vid_d in vid_l:
try:
vid_id = vid_d['gridVideoRenderer']['videoId']
if ('simpleText' in vid_d['gridVideoRenderer']['title']):
title = vid_d['gridVideoRenderer']['title']['simpleText']
else:
title = vid_d['gridVideoRenderer']['title']['runs'][0]['text']
except:
continue
try:
views_raw = vid_d['gridVideoRenderer']['viewCountText']['simpleText']
views = views_raw.replace(' views', '').replace(',', '')
except:
views = ''
try:
vid_age_raw = vid_d['gridVideoRenderer']['publishedTimeText']['simpleText']
except:
vid_age_raw = ''
vid_out_l.append((vid_id, views, vid_age_raw, title))
return vid_out_l |
class MIRNet_v2(nn.Module):
def __init__(self, inp_channels=3, out_channels=3, n_feat=80, chan_factor=1.5, n_RRG=4, n_MRB=2, height=3, width=2, scale=1, bias=False, task=None):
super(MIRNet_v2, self).__init__()
kernel_size = 3
self.task = task
self.conv_in = nn.Conv2d(inp_channels, n_feat, kernel_size=3, padding=1, bias=bias)
modules_body = []
modules_body.append(RRG(n_feat, n_MRB, height, width, chan_factor, bias, groups=1))
modules_body.append(RRG(n_feat, n_MRB, height, width, chan_factor, bias, groups=2))
modules_body.append(RRG(n_feat, n_MRB, height, width, chan_factor, bias, groups=4))
modules_body.append(RRG(n_feat, n_MRB, height, width, chan_factor, bias, groups=4))
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=3, padding=1, bias=bias)
def forward(self, inp_img):
shallow_feats = self.conv_in(inp_img)
deep_feats = self.body(shallow_feats)
if (self.task == 'defocus_deblurring'):
deep_feats += shallow_feats
out_img = self.conv_out(deep_feats)
else:
out_img = self.conv_out(deep_feats)
out_img += inp_img
return out_img |
def main():
print(__doc__)
print()
stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) for x in stirling_series(8)[::(- 1)]]
taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) for x in taylor_series_at_1(23)[::(- 1)]]
print('Stirling series coefficients')
print('')
print('\n'.join(stirling_coeffs))
print()
print('Taylor series coefficients')
print('')
print('\n'.join(taylor_coeffs))
print() |
def test_jagged_axis1():
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [4, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [5, 4, 3]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[], [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [6, 5, 4]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [5, 4, 3]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [5, 4, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [5, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0], []]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [4, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1, 999, 999], [1.1, 2.2, 999], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [4, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1, 999, 999, 999], [1.1, 2.2, 999], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3, 999]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[4, 3, 2], [4, 3, 2, 0]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [4, 3, 2]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [5, 4, 3]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[], [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [6, 5, 4]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [5, 4, 3]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [5, 4, 2]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [5, 3, 2]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0], []]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [4, 3, 2]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1, 999, 999], [1.1, 2.2, 999], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [4, 3, 2]])
array = ak.highlevel.Array([[[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]], [[1.1, 999, 999, 999], [1.1, 2.2, 999], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3, 999]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 3], [4, 3, 2, 0]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [4, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [5, 4, 3]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[], [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [6, 5, 4]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [5, 4, 3]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [5, 4, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [5, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [999, 2.0], [1.0], []]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [4, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1, 999, 999], [1.1, 2.2, 999], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [4, 3, 2]])
array = ak.highlevel.Array([[[1.1], [1.1, 2.2], [1.1, 2.2, 3.3], [], [999, 2.0], [1.0]], [[1.1, 999, 999, 999], [1.1, 2.2, 999], [1.1, 2.2, 3.3], [999, 2.0], [1.0]]])
assert (ak.operations.min(array, axis=1).to_list() == [[1, 2, 3.3], [1, 2, 3.3, 999]])
assert (ak.operations.argmin(array, axis=1).to_list() == [[5, 4, 2], [4, 3, 2, 0]]) |
def generate_y_true_calibrated(y_prob: NDArray, random_state: int=1) -> NDArray:
generator = check_random_state(random_state)
uniform = generator.uniform(size=len(y_prob))
y_true = (uniform <= y_prob).astype(float)
return y_true |
()
('--network', 'network_pkl', help='Network pickle filename', required=True)
('--rows', 'row_seeds', type=legacy.num_range, help='Random seeds to use for image rows', required=True)
('--cols', 'col_seeds', type=legacy.num_range, help='Random seeds to use for image columns', required=True)
('--styles', 'col_styles', type=legacy.num_range, help='Style layer range', default='0-6', show_default=True)
('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=0.8, show_default=True)
('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
('--outdir', type=str, required=True, default='outputs/stylemixing')
def generate_style_mix(network_pkl: str, row_seeds: List[int], col_seeds: List[int], col_styles: List[int], truncation_psi: float, noise_mode: str, outdir: str):
print(('Loading networks from "%s"...' % network_pkl))
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device)
os.makedirs(outdir, exist_ok=True)
print('Generating W vectors...')
all_seeds = list(set((row_seeds + col_seeds)))
all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])
all_w = G.mapping(torch.from_numpy(all_z).to(device), None)
w_avg = G.mapping.w_avg
all_w = (w_avg + ((all_w - w_avg) * truncation_psi))
w_dict = {seed: w for (seed, w) in zip(all_seeds, list(all_w))}
print('Generating images...')
all_images = G.synthesis(all_w, noise_mode=noise_mode)
all_images = ((all_images.permute(0, 2, 3, 1) * 127.5) + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
image_dict = {(seed, seed): image for (seed, image) in zip(all_seeds, list(all_images))}
print('Generating style-mixed images...')
for row_seed in row_seeds:
for col_seed in col_seeds:
w = w_dict[row_seed].clone()
w[col_styles] = w_dict[col_seed][col_styles]
image = G.synthesis(w[np.newaxis], noise_mode=noise_mode)
image = ((image.permute(0, 2, 3, 1) * 127.5) + 128).clamp(0, 255).to(torch.uint8)
image_dict[(row_seed, col_seed)] = image[0].cpu().numpy()
os.makedirs(outdir, exist_ok=True)
print('Saving image grid...')
W = (G.img_resolution // 2)
H = G.img_resolution
canvas = PIL.Image.new('RGB', ((W * (len(col_seeds) + 1)), (H * (len(row_seeds) + 1))), 'black')
for (row_idx, row_seed) in enumerate(([0] + row_seeds)):
for (col_idx, col_seed) in enumerate(([0] + col_seeds)):
if ((row_idx == 0) and (col_idx == 0)):
continue
key = (row_seed, col_seed)
if (row_idx == 0):
key = (col_seed, col_seed)
if (col_idx == 0):
key = (row_seed, row_seed)
canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), ((W * col_idx), (H * row_idx)))
canvas.save(f'{outdir}/grid.png') |
def NodesGTEDegree_PUndirNet(Graph, Threshold=0):
return _snap.NodesGTEDegree_PUndirNet(Graph, Threshold) |
class TemplateModel(BaseModel):
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(dataset_mode='aligned')
if is_train:
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
self.loss_names = ['loss_G']
self.visual_names = ['data_A', 'data_B', 'output']
self.model_names = ['G']
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
if self.isTrain:
self.criterionLoss = torch.nn.L1Loss()
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer]
def set_input(self, input):
AtoB = (self.opt.direction == 'AtoB')
self.data_A = input[('A' if AtoB else 'B')].to(self.device)
self.data_B = input[('B' if AtoB else 'A')].to(self.device)
self.image_paths = input[('A_paths' if AtoB else 'B_paths')]
def forward(self):
self.output = self.netG(self.data_A)
def backward(self):
self.loss_G = (self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression)
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.optimizer.zero_grad()
self.backward()
self.optimizer.step() |
class Tfidf(TransformBase):
def __init__(self, **kwargs):
super().__init__()
self.tfidf = sklearn.feature_extraction.text.TfidfVectorizer(**kwargs)
def fit(self, x: Text, **kwargs):
self.tfidf.fit(x.values)
return self
def transform(self, x: Text):
assert (self.tfidf is not None), 'The TFIDF model is not trained.'
return self.tfidf.transform(x.values)
def invert(self, x):
raise RuntimeError("The TFIDF transformer doesn't support `invert`.")
def get_feature_names(self):
return self.tfidf.get_feature_names() |
def test_float_input_holes():
float_test = np.random.rand(5, 5)
with testing.raises(TypeError):
remove_small_holes(float_test) |
def main():
if (len(sys.argv) <= 1):
print('Usage: {:s} path/to/your/video/file.mp4'.format(sys.argv[0]))
sys.exit(1)
movie_path = sys.argv[1]
print('Detecting objects in movie {}'.format(movie_path))
movie_name = os.path.splitext(os.path.basename(movie_path))[0]
sc = sp.Client()
stride = 1
input_stream = sp.NamedVideoStream(sc, movie_name, path=movie_path)
frame = sc.io.Input([input_stream])
strided_frame = sc.streams.Stride(frame, [stride])
model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
model_url = MODEL_TEMPLATE_URL.format(model_name)
objdet_frame = sc.ops.ObjDetect(frame=strided_frame, dnn_url=model_url, device=(sp.DeviceType.GPU if sc.has_gpu() else sp.DeviceType.CPU), batch=2)
detect_stream = sp.NamedVideoStream(sc, (movie_name + '_detect'))
output_op = sc.io.Output(objdet_frame, [detect_stream])
sc.run(output_op, sp.PerfParams.estimate(), cache_mode=sp.CacheMode.Overwrite)
print('Extracting data from Scanner output...')
bundled_data_list = list(tqdm(detect_stream.load()))
print('Successfully extracted data from Scanner output!')
bundled_np_list = kernels.nms_bulk(bundled_data_list)
bundled_np_list = kernels.smooth_box(bundled_np_list, min_score_thresh=0.5)
print('Writing frames to {:s}_obj_detect.mp4'.format(movie_name))
frame = sc.io.Input([input_stream])
bundled_data = sc.io.Input([PythonStream(bundled_np_list)])
strided_frame = sc.streams.Stride(frame, [stride])
drawn_frame = sc.ops.TFDrawBoxes(frame=strided_frame, bundled_data=bundled_data, min_score_thresh=0.5)
drawn_stream = sp.NamedVideoStream(sc, (movie_name + '_drawn_frames'))
output_op = sc.io.Output(drawn_frame, [drawn_stream])
sc.run(output_op, sp.PerfParams.estimate(), cache_mode=sp.CacheMode.Overwrite)
drawn_stream.save_mp4((movie_name + '_obj_detect'))
input_stream.delete(sc)
detect_stream.delete(sc)
drawn_stream.delete(sc)
print('Successfully generated {:s}_obj_detect.mp4'.format(movie_name)) |
def run_demo(cfg, frame_provider):
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
logging.setup_logging(cfg.OUTPUT_DIR)
logger.info('Run demo with config:')
logger.info(cfg)
common_classes = (cfg.DEMO.COMMON_CLASS_NAMES if (len(cfg.DEMO.LABEL_FILE_PATH) != 0) else None)
video_vis = VideoVisualizer(num_classes=cfg.MODEL.NUM_CLASSES, class_names_path=cfg.DEMO.LABEL_FILE_PATH, top_k=cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS, thres=cfg.DEMO.COMMON_CLASS_THRES, lower_thres=cfg.DEMO.UNCOMMON_CLASS_THRES, common_class_names=common_classes, colormap=cfg.TENSORBOARD.MODEL_VIS.COLORMAP, mode=cfg.DEMO.VIS_MODE)
async_vis = AsyncVis(video_vis, n_workers=cfg.DEMO.NUM_VIS_INSTANCES)
if (cfg.NUM_GPUS <= 1):
model = ActionPredictor(cfg=cfg, async_vis=async_vis)
else:
model = AsyncDemo(cfg=cfg, async_vis=async_vis)
seq_len = (cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE)
assert (cfg.DEMO.BUFFER_SIZE <= (seq_len // 2)), 'Buffer size cannot be greater than half of sequence length.'
num_task = 0
frame_provider.start()
for (able_to_read, task) in frame_provider:
if (not able_to_read):
break
if (task is None):
time.sleep(0.02)
continue
num_task += 1
model.put(task)
try:
task = model.get()
num_task -= 1
(yield task)
except IndexError:
continue
while (num_task != 0):
try:
task = model.get()
num_task -= 1
(yield task)
except IndexError:
continue |
class TreeVisitor(object):
def __init__(self):
super(TreeVisitor, self).__init__()
self.dispatch_table = {}
self.access_path = []
def dump_node(self, node):
ignored = (list((node.child_attrs or [])) + [u'child_attrs', u'pos', u'gil_message', u'cpp_message', u'subexprs'])
values = []
pos = getattr(node, 'pos', None)
if pos:
source = pos[0]
if source:
import os.path
source = os.path.basename(source.get_description())
values.append((u'%s:%s:%s' % (source, pos[1], pos[2])))
attribute_names = dir(node)
for attr in attribute_names:
if (attr in ignored):
continue
if (attr.startswith('_') or attr.endswith('_')):
continue
try:
value = getattr(node, attr)
except AttributeError:
continue
if ((value is None) or (value == 0)):
continue
elif isinstance(value, list):
value = (u'[...]/%d' % len(value))
elif (not isinstance(value, _PRINTABLE)):
continue
else:
value = repr(value)
values.append((u'%s = %s' % (attr, value)))
return (u'%s(%s)' % (node.__class__.__name__, u',\n '.join(values)))
def _find_node_path(self, stacktrace):
import os.path
last_traceback = stacktrace
nodes = []
while hasattr(stacktrace, 'tb_frame'):
frame = stacktrace.tb_frame
node = frame.f_locals.get(u'self')
if isinstance(node, Nodes.Node):
code = frame.f_code
method_name = code.co_name
pos = (os.path.basename(code.co_filename), frame.f_lineno)
nodes.append((node, method_name, pos))
last_traceback = stacktrace
stacktrace = stacktrace.tb_next
return (last_traceback, nodes)
def _raise_compiler_error(self, child, e):
trace = ['']
for (parent, attribute, index) in self.access_path:
node = getattr(parent, attribute)
if (index is None):
index = ''
else:
node = node[index]
index = (u'[%d]' % index)
trace.append((u'%s.%s%s = %s' % (parent.__class__.__name__, attribute, index, self.dump_node(node))))
(stacktrace, called_nodes) = self._find_node_path(sys.exc_info()[2])
last_node = child
for (node, method_name, pos) in called_nodes:
last_node = node
trace.append((u"File '%s', line %d, in %s: %s" % (pos[0], pos[1], method_name, self.dump_node(node))))
raise Errors.CompilerCrash(getattr(last_node, 'pos', None), self.__class__.__name__, u'\n'.join(trace), e, stacktrace)
def find_handler(self, obj):
cls = type(obj)
pattern = 'visit_%s'
mro = inspect.getmro(cls)
for mro_cls in mro:
handler_method = getattr(self, (pattern % mro_cls.__name__), None)
if (handler_method is not None):
return handler_method
print(type(self), cls)
if self.access_path:
print(self.access_path)
print(self.access_path[(- 1)][0].pos)
print(self.access_path[(- 1)][0].__dict__)
raise RuntimeError(('Visitor %r does not accept object: %s' % (self, obj)))
def visit(self, obj):
return self._visit(obj)
def _visit(self, obj):
try:
try:
handler_method = self.dispatch_table[type(obj)]
except KeyError:
handler_method = self.find_handler(obj)
self.dispatch_table[type(obj)] = handler_method
return handler_method(obj)
except Errors.CompileError:
raise
except Errors.AbortError:
raise
except Exception as e:
if DebugFlags.debug_no_exception_intercept:
raise
self._raise_compiler_error(obj, e)
def _visitchild(self, child, parent, attrname, idx):
self.access_path.append((parent, attrname, idx))
result = self._visit(child)
self.access_path.pop()
return result
def visitchildren(self, parent, attrs=None):
return self._visitchildren(parent, attrs)
(idx=cython.Py_ssize_t)
def _visitchildren(self, parent, attrs):
if (parent is None):
return None
result = {}
for attr in parent.child_attrs:
if ((attrs is not None) and (attr not in attrs)):
continue
child = getattr(parent, attr)
if (child is not None):
if (type(child) is list):
childretval = [self._visitchild(x, parent, attr, idx) for (idx, x) in enumerate(child)]
else:
childretval = self._visitchild(child, parent, attr, None)
assert (not isinstance(childretval, list)), ('Cannot insert list here: %s in %r' % (attr, parent))
result[attr] = childretval
return result |
_context()
class Task(object):
TASK_SETUP = 'task_setup'
TASK_INSTANCE_SETUP = 'task_instance_setup'
REPORT_STEP = 'report_step'
_global_names_used = set()
def _get_next_name(node, group, name):
basename = ((str(node) + '/') + str(name))
names_used = (Task._global_names_used if (group is None) else set((t.name for t in group._tasks_to_add)))
cur_name = basename
i = 0
while (cur_name in names_used):
i += 1
cur_name = ('%s:%d' % (basename, i))
return cur_name
def __init__(self, step=None, outputs=None, workspace_type=None, group=None, node=None, name=None, num_instances=None):
if ((not name) and isinstance(step, core.ExecutionStep)):
name = step.Proto().name
if (not name):
name = 'task'
self.node = str(Node.current((None if (node is None) else Node(node))))
self.group = TaskGroup.current(group, required=False)
self.name = Task._get_next_name(self.node, self.group, name)
if (self.group is not None):
self.group._tasks_to_add.append(self)
self._already_used = False
self._step = None
self._step_with_setup = None
self._outputs = []
if (step is not None):
self.set_step(step)
if (outputs is not None):
self.add_outputs(outputs)
self._pipeline = None
self._is_pipeline_context = False
self._workspace_type = workspace_type
self._report_net = None
self._num_instances = num_instances
def __enter__(self):
if (self.group is not None):
self.group._tasks_to_add.remove(self)
self._assert_not_used()
assert (self._step is None), 'This Task already has an execution step.'
from caffe2.python import net_builder
self._net_builder = net_builder.NetBuilder(_fullname=self.name)
self._net_builder.__enter__()
return self
def __exit__(self, type, value, traceback):
self._net_builder.__exit__(type, value, traceback)
if (type is None):
self.set_step(self._net_builder)
if (self.group is not None):
self.group._tasks_to_add.append(self)
self._net_builder = None
def workspace_type(self):
return self._workspace_type
def _assert_not_used(self):
assert (not self._already_used), 'Cannot modify task since it is already been used.'
def add_output(self, output):
self._assert_not_used()
output = (output if isinstance(output, TaskOutput) else TaskOutput(output))
self._outputs.append(output)
return output
def add_outputs(self, outputs):
self._assert_not_used()
if (type(outputs) not in (list, tuple)):
return self.add_output(outputs)
else:
return [self.add_output(output) for output in outputs]
def set_step(self, step):
self._assert_not_used()
self._step = core.to_execution_step(step)
def get_step(self):
if (self._step_with_setup is not None):
return self._step_with_setup
if (self._step is None):
self._step_with_setup = core.execution_step(self.name, [])
return self._step_with_setup
report_steps = [s for s in self._step.get_all_attributes(Task.REPORT_STEP) if (not hasattr(s, '_report_step_used'))]
for step in report_steps:
step._report_step_used = True
if (not step.Proto().run_every_ms):
step.RunEveryMillis(1000)
(task_init_nets, task_exit_nets) = get_setup_nets(Task.TASK_SETUP, ([self._step] + report_steps), self)
(instance_init_nets, instance_exit_nets) = get_setup_nets(Task.TASK_INSTANCE_SETUP, ([self._step] + report_steps), self)
if (len(self._outputs) == 0):
output_net = core.Net(('%s:output' % self.name))
self.add_output(output_net.ConstantFill([], 1, dtype=core.DataType.INT32, value=0))
task_exit_nets.append(output_net)
body = (self._step if (not report_steps) else core.execution_step(('%s:body' % self.name), (report_steps + [self._step])))
step_with_instance_setup = add_setup_steps(body, instance_init_nets, instance_exit_nets, (self.name + ':instance'))
if (self._num_instances and (self._num_instances > 1)):
step_with_instance_setup.SetCreateWorkspace(True)
step_with_instance_setup = core.execution_step('%s:parallel', [step_with_instance_setup], num_concurrent_instances=self._num_instances)
self._step_with_setup = add_setup_steps(step_with_instance_setup, task_init_nets, task_exit_nets, self.name)
return self._step_with_setup
def output_list(self):
return TaskOutputList(self._outputs)
def outputs(self):
return self._outputs
def _notify_used(self):
self.get_step()
self._already_used = True
def __repr__(self):
return 'Task(name={}, node={}, outputs={})'.format(self.name, self.node, self.outputs()) |
def log_likelihood(covariance, precision):
assert (covariance.shape == precision.shape)
(dim, _) = precision.shape
log_likelihood_ = (((- np.sum((covariance * precision))) + fast_logdet(precision)) - (dim * np.log((2 * np.pi))))
log_likelihood_ /= 2.0
return log_likelihood_ |
def _get_bases_name(m: nn.Module) -> List[str]:
return [b.__name__ for b in m.__class__.__bases__] |
class TestResult(SnipsTest):
def test_should_serialize_results(self):
input_ = 'hello world'
intent = intent_classification_result('world', 0.5)
slots = [unresolved_slot([3, 5], 'slot_value', 'slot_entity', 'slot_name')]
result = parsing_result(input=input_, intent=intent, slots=slots)
msg = 'Result dict should be json serializable'
with self.fail_if_exception(msg):
json.dumps(result)
expected_result = {RES_INTENT: {RES_INTENT_NAME: 'world', RES_PROBA: 0.5}, RES_SLOTS: [{RES_MATCH_RANGE: {'start': 3, 'end': 5}, RES_ENTITY: 'slot_entity', RES_SLOT_NAME: 'slot_name', RES_VALUE: 'slot_value'}], RES_INPUT: input_}
self.assertDictEqual(expected_result, result) |
def unpack(path, dest='.'):
with WheelFile(path) as wf:
namever = wf.parsed_filename.group('namever')
destination = os.path.join(dest, namever)
print('Unpacking to: {}...'.format(destination), end='')
sys.stdout.flush()
wf.extractall(destination)
print('OK') |
class NoneOf(object):
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if (values_formatter is None):
values_formatter = self.default_values_formatter
self.values_formatter = values_formatter
def __call__(self, form, field):
if (field.data in self.values):
message = self.message
if (message is None):
message = field.gettext("Invalid value, can't be any of: %(values)s.")
raise ValidationError((message % dict(values=self.values_formatter(self.values))))
def default_values_formatter(v):
return ', '.join((text_type(x) for x in v)) |
(frozen=True)
class TorchMiniBatch():
observations: TorchObservation
actions: torch.Tensor
rewards: torch.Tensor
next_observations: TorchObservation
returns_to_go: torch.Tensor
terminals: torch.Tensor
intervals: torch.Tensor
device: str
numpy_batch: Optional[TransitionMiniBatch] = None
def from_batch(cls, batch: TransitionMiniBatch, device: str, observation_scaler: Optional[ObservationScaler]=None, action_scaler: Optional[ActionScaler]=None, reward_scaler: Optional[RewardScaler]=None) -> 'TorchMiniBatch':
observations = convert_to_torch_recursively(batch.observations, device)
actions = convert_to_torch(batch.actions, device)
rewards = convert_to_torch(batch.rewards, device)
next_observations = convert_to_torch_recursively(batch.next_observations, device)
returns_to_go = convert_to_torch(batch.returns_to_go, device)
terminals = convert_to_torch(batch.terminals, device)
intervals = convert_to_torch(batch.intervals, device)
if observation_scaler:
observations = observation_scaler.transform(observations)
next_observations = observation_scaler.transform(next_observations)
if action_scaler:
actions = action_scaler.transform(actions)
if reward_scaler:
rewards = reward_scaler.transform(rewards)
returns_to_go = reward_scaler.transform(returns_to_go)
return TorchMiniBatch(observations=observations, actions=actions, rewards=rewards, next_observations=next_observations, returns_to_go=returns_to_go, terminals=terminals, intervals=intervals, device=device, numpy_batch=batch) |
def conv_nd(dims, *args, **kwargs):
if (dims == 1):
return nn.Conv1d(*args, **kwargs)
elif (dims == 2):
return nn.Conv2d(*args, **kwargs)
elif (dims == 3):
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}') |
def imageList(path, multiDir=False, imageExtension=['*.jpg', '*.png', '*.jpeg', '*.tif', '*.bmp']):
imageList = []
for ext in imageExtension:
if (multiDir == True):
imageList.extend(glob.glob(((path + '*/') + ext)))
else:
imageList.extend(glob.glob((path + ext)))
imageList
return imageList |
def find_parent_directory_containing_directory(base: Path, target: str) -> Optional[Path]:
def is_directory(path: pathlib.Path) -> bool:
return path.is_dir()
return _find_parent_directory_containing(base, target, predicate=is_directory) |
class InternalError(ExecutionEvent):
is_terminal = True
type: InternalErrorType
subtype: (SchemaErrorType | None)
title: str
message: str
extras: list[str]
exception_type: str
exception: str
exception_with_traceback: str
thread_id: int = field(default_factory=threading.get_ident)
def from_schema_error(cls, error: SchemaError) -> InternalError:
return cls.with_exception(error, type_=InternalErrorType.SCHEMA, subtype=error.type, title='Schema Loading Error', message=error.message, extras=error.extras)
def from_exc(cls, exc: Exception) -> InternalError:
return cls.with_exception(exc, type_=InternalErrorType.OTHER, subtype=None, title='Test Execution Error', message='An internal error occurred during the test run', extras=[])
def with_exception(cls, exc: Exception, type_: InternalErrorType, subtype: (SchemaErrorType | None), title: str, message: str, extras: list[str]) -> InternalError:
exception_type = f'{exc.__class__.__module__}.{exc.__class__.__qualname__}'
exception = format_exception(exc)
exception_with_traceback = format_exception(exc, include_traceback=True)
return cls(type=type_, subtype=subtype, title=title, message=message, extras=extras, exception_type=exception_type, exception=exception, exception_with_traceback=exception_with_traceback) |
def hook_adapavgpool1d(m, x, y):
x = x[0]
out_size = m.output_size
k = math.ceil((x.size(2) / out_size))
flops_per_ele = k
flops = (flops_per_ele * y.numel())
return int(flops) |
class HeckeModule_free_module(HeckeModule_generic):
def __init__(self, base_ring, level, weight, category=None):
HeckeModule_generic.__init__(self, base_ring, level, category=category)
self.__weight = weight
def _repr_(self):
return repr(type(self))
def __getitem__(self, n):
n = int(n)
D = self.decomposition()
if ((n < 0) or (n >= len(D))):
raise IndexError(('index (=%s) must be between 0 and %s' % (n, (len(D) - 1))))
return D[n]
def __hash__(self):
return hash((self.__weight, self.level(), self.base_ring()))
def __len__(self):
return len(self.decomposition())
def _eigen_nonzero(self):
try:
return self.__eigen_nonzero
except AttributeError:
pass
V = self.dual_free_module()
B = V.basis()
for i in range(V.degree()):
for b in B:
if (b[i] != 0):
self.__eigen_nonzero = i
return i
assert False, 'bug in _eigen_nonzero'
def _eigen_nonzero_element(self, n=1):
if (self.rank() == 0):
raise ArithmeticError('the rank of self must be positive')
A = self.ambient_hecke_module()
i = self._eigen_nonzero()
return A._hecke_image_of_ith_basis_vector(n, i)
def _hecke_image_of_ith_basis_vector(self, n, i):
T = self.hecke_operator(n)
return T.apply_sparse(self.gen(i))
def _element_eigenvalue(self, x, name='alpha'):
if (not element.is_HeckeModuleElement(x)):
raise TypeError('x must be a Hecke module element.')
if (x not in self.ambient_hecke_module()):
raise ArithmeticError('x must be in the ambient Hecke module.')
v = self.dual_eigenvector(names=name)
return v.dot_product(x.element())
def _is_hecke_equivariant_free_module(self, submodule):
verbose('Determining if free module is Hecke equivariant.')
bound = self.hecke_bound()
for p in primes((bound + 1)):
try:
self.T(p).matrix().restrict(submodule, check=True)
except ArithmeticError:
return False
return True
def _set_factor_number(self, i):
self.__factor_number = i
def ambient(self):
return self.ambient_hecke_module()
def ambient_module(self):
return self.ambient_hecke_module()
def ambient_hecke_module(self):
raise NotImplementedError
def atkin_lehner_operator(self, d=None):
if (d is None):
d = self.level()
d = int(d)
if (self.level() % d):
raise ArithmeticError(('d (=%s) must be a divisor of the level (=%s)' % (d, self.level())))
N = self.level()
for (p, e) in factor(d):
v = valuation(N, p)
if (e < v):
d *= (p ** (v - e))
d = int(d)
try:
return self.__atkin_lehner_operator[d]
except AttributeError:
self.__atkin_lehner_operator = {}
except KeyError:
pass
Wmat = self._compute_atkin_lehner_matrix(d)
H = self.endomorphism_ring()
W = H(Wmat, ('Atkin-Lehner operator W_%s' % d))
self.__atkin_lehner_operator[d] = W
return W
def basis(self):
try:
return self.__basis
except AttributeError:
self.__basis = self.gens()
return self.__basis
def basis_matrix(self):
return self.free_module().basis_matrix()
def coordinate_vector(self, x):
return self.free_module().coordinate_vector(x.element())
def decomposition(self, bound=None, anemic=True, height_guess=1, sort_by_basis=False, proof=None):
if (not isinstance(anemic, bool)):
raise TypeError('anemic must be of type bool.')
key = (bound, anemic)
try:
if (self.__decomposition[key] is not None):
return self.__decomposition[key]
except AttributeError:
self.__decomposition = {}
except KeyError:
pass
if (self.rank() == 0):
self.__decomposition[key] = Sequence([], immutable=True, cr=True)
return self.__decomposition[key]
is_rational = (self.base_ring() == QQ)
time = verbose(('Decomposing %s' % self))
T = self.ambient_hecke_module().hecke_algebra()
if (bound is None):
bound = self.ambient_hecke_module().hecke_bound()
D = Sequence([], cr=True)
U = [self.free_module()]
p = 2
while (U and (p <= bound)):
verbose(mesg=('p=%s' % p), t=time)
if anemic:
while (GCD(p, self.level()) != 1):
p = next_prime(p)
verbose(('Decomposition using p=%s' % p))
t = T.hecke_operator(p).matrix()
Uprime = []
for i in range(len(U)):
is_diagonalizable = ((not self.base_ring().characteristic()) and (self.level() % p))
if is_rational:
X = t.decomposition_of_subspace(U[i], check_restrict=False, algorithm='multimodular', height_guess=height_guess, proof=proof)
else:
X = t.decomposition_of_subspace(U[i], check_restrict=False, is_diagonalizable=is_diagonalizable)
for Xi in X:
(W, is_irred) = Xi
if is_irred:
A = self.submodule(W, check=False)
D.append(A)
else:
Uprime.append(W)
p = next_prime(p)
U = Uprime
for i in range(len(U)):
A = self.submodule(U[i], check=False)
D.append(A)
for A in D:
if anemic:
A.__is_splittable_anemic = False
A.__is_splittable = False
else:
A.__is_splittable = False
self.__is_splittable = (len(D) > 1)
if anemic:
self.__is_splittable_anemic = (len(D) > 1)
from sage.modules.free_module import EchelonMatrixKey
D.sort(key=(None if (not sort_by_basis) else (lambda ss: EchelonMatrixKey(ss.free_module()))))
D.set_immutable()
self.__decomposition[key] = D
for i in range(len(D)):
self.__decomposition[key][i]._set_factor_number(i)
return self.__decomposition[key]
def degree(self):
return self.free_module().degree()
def dual_eigenvector(self, names='alpha', lift=True, nz=None):
try:
(w, w_lift) = self.__dual_eigenvector[(names, nz)]
if lift:
return w_lift
else:
return w
except KeyError:
pass
except AttributeError:
self.__dual_eigenvector = {}
if (not self.is_simple()):
raise ArithmeticError('self must be simple')
p = 2
t = self.dual_hecke_matrix(p)
while True:
f = t.charpoly('x')
if f.is_irreducible():
break
p = next_prime(p)
t += (random.choice([(- 2), (- 1), 1, 2]) * self.dual_hecke_matrix(p))
n = f.degree()
if (n > 1):
R = f.parent()
K = R.base_ring().extension(f, names=names)
alpha = K.gen()
beta = (~ alpha)
c = [((- f[0]) * beta)]
for i in range(1, (n - 1)):
c.append(((c[(i - 1)] - f[i]) * beta))
c.append(K.one())
else:
K = self.base_ring()
c = [1]
V = FreeModule(K, n)
t = t.change_ring(K)
for j in range(n):
v = V.gen(j)
I = t.iterates(v, n)
w = V(0)
for i in range(n):
w += (c[i] * V(I.row(i).list()))
if (w != 0):
break
Vdual = self.dual_free_module().change_ring(K)
w_lift = Vdual.linear_combination_of_basis(w)
if (nz is not None):
x = self.ambient().gen(nz)
else:
x = self._eigen_nonzero_element()
alpha = w_lift.dot_product(x.element())
beta = (~ alpha)
w_lift = (w_lift * beta)
w = (w * beta)
self.__dual_eigenvector[(names, nz)] = (w, w_lift)
if lift:
return w_lift
else:
return w
def dual_hecke_matrix(self, n):
n = int(n)
try:
self._dual_hecke_matrices
except AttributeError:
self._dual_hecke_matrices = {}
if (n not in self._dual_hecke_matrices):
T = self._compute_dual_hecke_matrix(n)
self._dual_hecke_matrices[n] = T
return self._dual_hecke_matrices[n]
def eigenvalue(self, n, name='alpha'):
if (not self.is_simple()):
raise ArithmeticError('self must be simple')
n = int(n)
try:
return self.__eigenvalues[n][name]
except AttributeError:
self.__eigenvalues = {}
except KeyError:
pass
if (n <= 0):
raise IndexError('n must be a positive integer')
ev = self.__eigenvalues
if ((n == 1) or is_prime(n)):
Tn_e = self._eigen_nonzero_element(n)
an = self._element_eigenvalue(Tn_e, name=name)
_dict_set(ev, n, name, an)
return an
F = factor(n)
prod = None
for (p, r) in F:
(p, r) = (int(p), int(r))
pow = (p ** r)
if (not ((pow in ev) and (name in ev[pow]))):
eps = self.character()
if (eps is None):
Tn_e = self._eigen_nonzero_element(pow)
_dict_set(ev, pow, name, self._element_eigenvalue(Tn_e, name=name))
else:
ap = self.eigenvalue(p, name=name)
if (r == 1):
apow = ap
else:
apr1 = self.eigenvalue((pow // p), name=name)
k = self.weight()
apr2 = self.eigenvalue((pow // (p * p)), name=name)
apow = ((ap * apr1) - ((eps(p) * (p ** (k - 1))) * apr2))
_dict_set(ev, pow, name, apow)
if (prod is None):
prod = ev[pow][name]
else:
prod *= ev[pow][name]
_dict_set(ev, n, name, prod)
return prod
def factor_number(self):
try:
return self.__factor_number
except AttributeError:
return (- 1)
def gens(self):
return tuple((self(x) for x in self.free_module().gens()))
def gen(self, n):
return self(self.free_module().gen(n))
def hecke_matrix(self, n):
n = int(n)
if (n <= 0):
raise IndexError('n must be positive.')
if (n not in self._hecke_matrices):
T = self._compute_hecke_matrix(n)
T.set_immutable()
self._hecke_matrices[n] = T
return self._hecke_matrices[n]
def hecke_operator(self, n):
return self.hecke_algebra().hecke_operator(n)
def diamond_bracket_matrix(self, d):
d = (int(d) % self.level())
if (d not in self._diamond_matrices):
if (self.character() is not None):
D = MatrixSpace(self.base_ring(), self.rank())(self.character()(d))
else:
D = self._compute_diamond_matrix(d)
D.set_immutable()
self._diamond_matrices[d] = D
return self._diamond_matrices[d]
def diamond_bracket_operator(self, d):
return self.hecke_algebra().diamond_bracket_operator(d)
def T(self, n):
return self.hecke_operator(n)
def hecke_polynomial(self, n, var='x'):
return self.hecke_operator(n).charpoly(var)
def is_simple(self):
raise NotImplementedError
def is_splittable(self):
if (not hasattr(self, '__is_splittable')):
self.decomposition(anemic=False)
return self.__is_splittable
def is_submodule(self, other):
if (not isinstance(other, HeckeModule_free_module)):
return False
return ((self.ambient_free_module() == other.ambient_free_module()) and self.free_module().is_submodule(other.free_module()))
def is_splittable_anemic(self):
if (not hasattr(self, '__is_splittable_anemic')):
self.decomposition(anemic=True)
return self.__is_splittable_anemic
def ngens(self):
return self.rank()
def projection(self):
try:
return self.__projection
except AttributeError:
i = self.factor_number()
if (i == (- 1)):
raise NotImplementedError('Computation of projection only implemented for decomposition factors.')
A = self.ambient_hecke_module()
B = A.decomposition_matrix_inverse()
i = A.decomposition().index(self)
n = sum([A[j].rank() for j in range(i)])
C = B.matrix_from_columns(range(n, (n + self.rank())))
H = A.Hom(self)
pi = H(C, ('Projection' % self))
self.__projection = pi
return self.__projection
def system_of_eigenvalues(self, n, name='alpha'):
return [self.eigenvalue(m, name=name) for m in range(1, (n + 1))]
def weight(self):
return self.__weight
def zero_submodule(self):
return self.submodule(self.free_module().zero_submodule(), check=False) |
def query_on_triplane(query, feature, min_, max_, use_ste=False, boundary_check=False, ctx=None):
func = LanczosQueryOnTriplane(ctx, min_, max_, use_ste, boundary_check)
return func(query, feature) |
def delete_Image(i):
global remfileNames
print(remfileNames)
try:
os.remove(('Keypoints\\' + remfileNames[(i - 1)]))
os.remove((('gui\\captured_images\\' + str(i)) + '.jpg'))
except:
print('file not found')
pass |
def LoadData(training_data_path, file_list, split=0.15, workers=4, batch_size=1, transforms=None):
if (not os.path.exists(training_data_path)):
error_message = (('Folder ' + os.path.abspath(training_data_path)) + ' does not exist.')
raise OSError(error_message)
(training_samples, validation_samples) = train_test_split(file_list, test_size=split, random_state=393939)
training_dataset = FaultPrep(training_data_path, training_samples, transforms=transforms)
validation_dataset = FaultPrep(training_data_path, validation_samples, transforms=None)
training_loader = data.DataLoader(training_dataset, batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True)
validation_loader = data.DataLoader(validation_dataset, batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True)
return (training_loader, validation_loader) |
def load_language_model(path: str, device: torch.device):
model = torch.load(path, map_location=(lambda storage, loc: storage)).to(device)
if isinstance(model, nn.DataParallel):
model = model.module
model.device = device
return model |
_toolkit()
class EpicFHIR(FunctionToolkit):
name_for_human = 'Epic FHIR'
description_for_human = 'Toolkit for managing and sharing patient data in healthcare organizations.'
name_for_model = 'EpicFHIR'
description_for_model = 'The EpicFHIR toolkit provides a comprehensive set of tools for healthcare organizations to manage and share patient data, including demographics, clinical data, appointments, clinical documents, patient records, and diagnostic reports.'
tool_classes = [EpicFHIRSearchPatients, EpicFHIRGetPatientDetails, EpicFHIRSearchDoctors, EpicFHIRManageClinicalDocuments, EpicFHIRManageAppointments, EpicFHIRManagePatientRecords, EpicFHIRManageDiagnosticReports, EpicFHIRDownloadFiles] |
.parametrize('ti_func,np_func', unary_func_table)
def test_python_scope_vector_unary(ti_func, np_func):
ti.init()
x = ti.Vector(([2, 3] if (ti_func in [ops.invert, ti.lang.ops.logical_not]) else [0.2, 0.3]))
result = ti_func(x).to_numpy()
if (ti_func in [ti.lang.ops.logical_not]):
result = result.astype(bool)
expected = np_func(x.to_numpy())
assert test_utils.allclose(result, expected) |
class TestSingleProcessFileTensorStorage(unittest.TestCase):
def test_read_write_1(self):
schema = {'tf': SizeData(dtype='float32', shape=(112, 112)), 'ti': SizeData(dtype='int32', shape=(4, 64, 64))}
data_elts = []
torch.manual_seed(23)
for _i in range(3):
data_elt = {'tf': torch.rand((112, 112), dtype=torch.float32), 'ti': (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32)}
data_elts.append(data_elt)
with tempfile.NamedTemporaryFile() as hFile:
storage = SingleProcessFileTensorStorage(schema, hFile.name, 'wb')
for i in range(3):
record_id = storage.put(data_elts[i])
self.assertEqual(record_id, i)
hFile.seek(0)
storage = SingleProcessFileTensorStorage(schema, hFile.name, 'rb')
for i in range(3):
record = storage.get(i)
self.assertEqual(len(record), len(schema))
for field_name in schema:
self.assertTrue((field_name in record))
self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape)
self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype)
self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name])) |
class DistModule(Module):
def __init__(self, module):
super(DistModule, self).__init__()
self.module = module
broadcast_params(self.module)
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def train(self, mode=True):
super(DistModule, self).train(mode)
self.module.train(mode) |
def get_cat_id(label_list, cat):
for i in range(len(label_list)):
if (cat == label_list[i][0]):
return i |
class JamendoJsonifier(DatasetJsonifier):
def load_raw_data(self):
assert self.split, 'is split implemented for this dataset?'
fields_to_use = ('genre', 'instrument', 'mood/theme')
data = []
tsv_file = os.path.join(self.input_dir, 'autotagging.tsv')
(tracks, tags, extra) = mtg_jamendo_read_file(tsv_file)
for (track_id, track_annotations) in tqdm(tracks.items(), total=len(tracks)):
track_data = {k: list(track_annotations[k]) for k in fields_to_use}
track_data['id'] = str(track_id)
data.append(track_data)
self.data = data
print(f'[INFO] loaded {len(data)} tracks')
return |
def postprocess_pose(pose_folder, scene_id):
print('postprocessing pose...')
for fid in range(999999):
path_reloc = '{}/{}'.format(pose_folder, reloc_mask).format(fid)
path_reloc_bm = '{}/{}'.format(pose_folder, reloc_bm_mask).format(fid)
if (not os.path.exists(path_reloc)):
break
Rt_reloc = np.loadtxt(path_reloc)
Rt_reloc_bm = np.dot(trans[(scene_id - 1)], Rt_reloc)
np.savetxt(path_reloc_bm, Rt_reloc_bm)
path_icp = '{}/{}'.format(pose_folder, icp_mask).format(fid)
path_icp_bm = '{}/{}'.format(pose_folder, icp_bm_mask).format(fid)
if (not os.path.exists(path_icp)):
continue
Rt_icp = np.loadtxt(path_icp)
Rt_icp_bm = np.dot(trans[(scene_id - 1)], Rt_icp)
np.savetxt(path_icp_bm, Rt_icp_bm)
print('done.') |
class simulator():
def __init__(self):
self.conf = cfg()
self.topo = topo()
self.top_path = './'
self.verbose = True
self.save_trace = True
self.num_layers = 0
self.single_layer_sim_object_list = []
self.params_set_flag = False
self.all_layer_run_done = False
def set_params(self, config_obj=cfg(), topo_obj=topo(), top_path='./', verbosity=True, save_trace=True):
self.conf = config_obj
self.topo = topo_obj
self.top_path = top_path
self.verbose = verbosity
self.save_trace = save_trace
self.num_layers = self.topo.get_num_layers()
self.params_set_flag = True
def run(self):
assert self.params_set_flag, 'Simulator parameters are not set'
for i in range(self.num_layers):
this_layer_sim = layer_sim()
this_layer_sim.set_params(layer_id=i, config_obj=self.conf, topology_obj=self.topo, verbose=self.verbose)
self.single_layer_sim_object_list.append(this_layer_sim)
if (not os.path.isdir(self.top_path)):
os.mkdir(self.top_path)
report_path = ((self.top_path + '/') + self.conf.get_run_name())
if (not os.path.isdir(report_path)):
os.mkdir(report_path)
self.top_path = report_path
for single_layer_obj in self.single_layer_sim_object_list:
if self.verbose:
layer_id = single_layer_obj.get_layer_id()
print(('\nRunning Layer ' + str(layer_id)))
single_layer_obj.run()
if self.verbose:
comp_items = single_layer_obj.get_compute_report_items()
comp_cycles = comp_items[0]
stall_cycles = comp_items[1]
util = comp_items[2]
mapping_eff = comp_items[3]
print(('Compute cycles: ' + str(comp_cycles)))
print(('Stall cycles: ' + str(stall_cycles)))
print((('Overall utilization: ' + '{:.2f}'.format(util)) + '%'))
print((('Mapping efficiency: ' + '{:.2f}'.format(mapping_eff)) + '%'))
avg_bw_items = single_layer_obj.get_bandwidth_report_items()
avg_ifmap_bw = avg_bw_items[3]
avg_filter_bw = avg_bw_items[4]
avg_ofmap_bw = avg_bw_items[5]
print((('Average IFMAP DRAM BW: ' + '{:.3f}'.format(avg_ifmap_bw)) + ' words/cycle'))
print((('Average Filter DRAM BW: ' + '{:.3f}'.format(avg_filter_bw)) + ' words/cycle'))
print((('Average OFMAP DRAM BW: ' + '{:.3f}'.format(avg_ofmap_bw)) + ' words/cycle'))
if self.save_trace:
if self.verbose:
print('Saving traces: ', end='')
single_layer_obj.save_traces(self.top_path)
if self.verbose:
print('Done!')
self.all_layer_run_done = True
self.generate_reports()
def generate_reports(self):
assert self.all_layer_run_done, 'Layer runs are not done yet'
compute_report_name = (self.top_path + '/COMPUTE_REPORT.csv')
compute_report = open(compute_report_name, 'w')
header = 'LayerID, Total Cycles, Stall Cycles, Overall Util %, Mapping Efficiency %, Compute Util %,\n'
compute_report.write(header)
bandwidth_report_name = (self.top_path + '/BANDWIDTH_REPORT.csv')
bandwidth_report = open(bandwidth_report_name, 'w')
header = 'LayerID, Avg IFMAP SRAM BW, Avg FILTER SRAM BW, Avg OFMAP SRAM BW, '
header += 'Avg IFMAP DRAM BW, Avg FILTER DRAM BW, Avg OFMAP DRAM BW,\n'
bandwidth_report.write(header)
detail_report_name = (self.top_path + '/DETAILED_ACCESS_REPORT.csv')
detail_report = open(detail_report_name, 'w')
header = 'LayerID, '
header += 'SRAM IFMAP Start Cycle, SRAM IFMAP Stop Cycle, SRAM IFMAP Reads, '
header += 'SRAM Filter Start Cycle, SRAM Filter Stop Cycle, SRAM Filter Reads, '
header += 'SRAM OFMAP Start Cycle, SRAM OFMAP Stop Cycle, SRAM OFMAP Writes, '
header += 'DRAM IFMAP Start Cycle, DRAM IFMAP Stop Cycle, DRAM IFMAP Reads, '
header += 'DRAM Filter Start Cycle, DRAM Filter Stop Cycle, DRAM Filter Reads, '
header += 'DRAM OFMAP Start Cycle, DRAM OFMAP Stop Cycle, DRAM OFMAP Writes,\n'
detail_report.write(header)
for lid in range(len(self.single_layer_sim_object_list)):
single_layer_obj = self.single_layer_sim_object_list[lid]
compute_report_items_this_layer = single_layer_obj.get_compute_report_items()
log = (str(lid) + ', ')
log += ', '.join([str(x) for x in compute_report_items_this_layer])
log += ',\n'
compute_report.write(log)
bandwidth_report_items_this_layer = single_layer_obj.get_bandwidth_report_items()
log = (str(lid) + ', ')
log += ', '.join([str(x) for x in bandwidth_report_items_this_layer])
log += ',\n'
bandwidth_report.write(log)
detail_report_items_this_layer = single_layer_obj.get_detail_report_items()
log = (str(lid) + ', ')
log += ', '.join([str(x) for x in detail_report_items_this_layer])
log += ',\n'
detail_report.write(log)
compute_report.close()
bandwidth_report.close()
detail_report.close()
def get_total_cycles(self):
assert self.all_layer_run_done, 'Layer runs are not done yet'
total_cycles = 0
for layer_obj in self.single_layer_sim_object_list:
cycles_this_layer = int(layer_obj.get_compute_report_items[0])
total_cycles += cycles_this_layer
return total_cycles |
class ELU(Module):
__constants__ = ['alpha', 'inplace']
alpha: float
inplace: bool
def __init__(self, alpha: float=1.0, inplace: bool=False) -> None:
super(ELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.elu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = (', inplace=True' if self.inplace else '')
return 'alpha={}{}'.format(self.alpha, inplace_str) |
def Trainer(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, train_dl, valid_dl, test_dl, device, logger, config, experiment_log_dir, training_mode):
logger.debug('Training started ....')
criterion = nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')
for epoch in range(1, (config.num_epoch + 1)):
(train_loss, train_acc) = model_train(model, temporal_contr_model, model_optimizer, temp_cont_optimizer, criterion, train_dl, config, device, training_mode)
(valid_loss, valid_acc, _, _) = model_evaluate(model, temporal_contr_model, valid_dl, device, training_mode)
if (training_mode != 'self_supervised'):
scheduler.step(valid_loss)
logger.debug(f'''
Epoch : {epoch}
Train Loss : {train_loss:.4f} | Train Accuracy : {train_acc:2.4f}
Valid Loss : {valid_loss:.4f} | Valid Accuracy : {valid_acc:2.4f}
''')
os.makedirs(os.path.join(experiment_log_dir, 'saved_models'), exist_ok=True)
chkpoint = {'model_state_dict': model.state_dict(), 'temporal_contr_model_state_dict': temporal_contr_model.state_dict()}
torch.save(chkpoint, os.path.join(experiment_log_dir, 'saved_models', f'ckp_last.pt'))
if (training_mode != 'self_supervised'):
logger.debug('\nEvaluate on the Test set:')
(test_loss, test_acc, _, _) = model_evaluate(model, temporal_contr_model, test_dl, device, training_mode)
logger.debug(f'Test loss :{test_loss:0.4f} | Test Accuracy : {test_acc:0.4f}')
logger.debug('\n Training is Done! ') |
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError as e:
if (msg is None):
msg = 'Expected an int, got %s'
raise ValueError((msg % value)) from e |
def advance_past_unaries(gold_sequence, cur_index):
while (((cur_index + 2) < len(gold_sequence)) and isinstance(gold_sequence[cur_index], OpenConstituent) and isinstance(gold_sequence[(cur_index + 1)], CloseConstituent)):
cur_index += 2
return cur_index |
def recorder(out, pred_list):
NEG_WORDS = ['No', 'not', 'no', 'NO']
for line in out:
line = line.replace('.', '')
line = line.replace(',', '')
words = line.split(' ')
if (any(((word in NEG_WORDS) for word in words)) or any((word.endswith("n't") for word in words))):
pred_list.append(0)
else:
pred_list.append(1)
return pred_list |
def main():
set_seeds(2020)
args = vars(parser.parse_args())
alphabet = Protein()
cfgs = []
data_cfg = config.DataConfig(args['data_config'])
cfgs.append(data_cfg)
if (args['lm_model_config'] is None):
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), num_classes=2)
cfgs += [model_cfg]
else:
lm_model_cfg = config.ModelConfig(args['lm_model_config'], idx='lm_model_config', input_dim=len(alphabet))
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), lm_dim=((lm_model_cfg.num_layers * lm_model_cfg.hidden_dim) * 2), num_classes=2)
cfgs += [model_cfg, lm_model_cfg]
if (model_cfg.model_type == 'RNN'):
pr_model_cfg = config.ModelConfig(args['pr_model_config'], idx='pr_model_config', model_type='MLP', num_classes=2)
if pr_model_cfg.projection:
pr_model_cfg.set_input_dim(model_cfg.embedding_dim)
else:
pr_model_cfg.set_input_dim((model_cfg.hidden_dim * 2))
cfgs.append(pr_model_cfg)
run_cfg = config.RunConfig(args['run_config'], sanity_check=args['sanity_check'])
cfgs.append(run_cfg)
(output, save_prefix) = set_output(args, 'eval_solubility_log', test=True)
os.environ['CUDA_VISIBLE_DEVICES'] = (args['device'] if (args['device'] is not None) else '')
(device, data_parallel) = (torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), (torch.cuda.device_count() > 1))
config.print_configs(args, cfgs, device, output)
flag_rnn = (model_cfg.model_type == 'RNN')
flag_lm_model = (args['lm_model_config'] is not None)
start = Print(' '.join(['start loading a test dataset', data_cfg.path['test']]), output)
dataset_test = solubility.load_solubility(data_cfg, 'test', alphabet, args['sanity_check'])
dataset_test = dataset.Seq_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
collate_fn = (dataset.collate_sequences if flag_rnn else None)
iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn)
end = Print(' '.join(['loaded', str(len(dataset_test)), 'sequences']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start initializing a model', output)
models_list = []
if (not flag_rnn):
model = plus_tfm.PLUS_TFM(model_cfg)
elif (not flag_lm_model):
model = plus_rnn.PLUS_RNN(model_cfg)
else:
model = p_elmo.P_ELMo(model_cfg)
models_list.append([model, '', flag_lm_model, flag_rnn, False])
if flag_lm_model:
lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
models_list.append([lm_model, 'lm', True, False, False])
if flag_rnn:
pr_model = mlp.MLP(pr_model_cfg, per_seq=True)
models_list.append([pr_model, 'pr', False, True, False])
(params, pr_params) = ([], [])
for (model, idx, frz, _, _) in models_list:
if frz:
continue
elif (idx != 'pr'):
params += [p for p in model.parameters() if p.requires_grad]
else:
pr_params += [p for p in model.parameters() if p.requires_grad]
load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn)
get_loss = (plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss)
end = Print('end initializing a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start setting trainer configurations', output)
tasks_list = []
tasks_list.append(['cls', [], ['acc', 'mcc']])
if (not flag_lm_model):
tasks_list.append(['lm', [], ['acc']])
trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)
trainer_args = {}
trainer_args['data_parallel'] = data_parallel
trainer_args['paired'] = False
if flag_rnn:
trainer_args['projection'] = pr_model_cfg.projection
if flag_rnn:
trainer_args['evaluate_cls'] = plus_rnn.evaluate_cls_protein
else:
trainer_args['evaluate_cls'] = plus_tfm.evaluate_cls_protein
end = Print('end setting trainer configurations', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start evaluating a model', output)
Print(trainer.get_headline(test=True), output)
dataset_test.set_augment(False)
trainer.set_exec_flags(['cls', 'lm'], [True, False])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# cls {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
if (not flag_lm_model):
dataset_test.set_augment(True)
trainer.set_exec_flags(['cls', 'lm'], [False, True])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# lm {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
Print(trainer.get_log(test_idx='Solubility', args=trainer_args), output)
trainer.reset()
end = Print('end evaluating a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
output.close() |
(goos.CylinderFlow)
class CylinderFlowImpl(GeometryImpl):
def eval(self, grid: gridlock.Grid, params: RenderParams):
radius = self.shape.radius.item()
num_points = int(np.ceil((((params.pts_per_arclen * 2) * np.pi) * radius)))
grid.draw_cylinder(self.shape.pos, radius, self.shape.height.item(), num_points, self.shape.material.permittivity(params.wlen)) |
def write_with_generator_and_metadata(datasource, table, gen, metadata):
with connect_with_data_source(datasource) as conn:
with SQLFSWriter(conn, table) as w:
w.write(_encode_metadata(metadata))
for d in gen():
w.write(d) |
def main():
if (len(sys.argv) < 3):
sys.exit('Needs args: hook_name, control_dir')
hook_name = sys.argv[1]
control_dir = sys.argv[2]
if (hook_name not in HOOK_NAMES):
sys.exit(('Unknown hook: %s' % hook_name))
hook = globals()[hook_name]
hook_input = read_json(pjoin(control_dir, 'input.json'))
json_out = {'unsupported': False, 'return_val': None}
try:
json_out['return_val'] = hook(**hook_input['kwargs'])
except BackendUnavailable as e:
json_out['no_backend'] = True
json_out['traceback'] = e.traceback
except BackendInvalid as e:
json_out['backend_invalid'] = True
json_out['backend_error'] = e.message
except GotUnsupportedOperation as e:
json_out['unsupported'] = True
json_out['traceback'] = e.traceback
except HookMissing:
json_out['hook_missing'] = True
write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) |
def _densenet(arch: str, growth_rate: int, block_config: Tuple[(int, int, int, int)], num_init_features: int, pretrained: bool, progress: bool, **kwargs: Any) -> DenseNet:
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model |
class ATR(Dataset):
CLASSES = ['background', 'hat', 'hair', 'sunglass', 'upper-clothes', 'skirt', 'pants', 'dress', 'belt', 'left-shoe', 'right-shoe', 'face', 'left-leg', 'right-leg', 'left-arm', 'right-arm', 'bag', 'scarf']
PALETTE = torch.tensor([[0, 0, 0], [127, 0, 0], [254, 0, 0], [0, 84, 0], [169, 0, 50], [254, 84, 0], [255, 0, 84], [0, 118, 220], [84, 84, 0], [0, 84, 84], [84, 50, 0], [51, 85, 127], [0, 127, 0], [0, 0, 254], [50, 169, 220], [0, 254, 254], [84, 254, 169], [169, 254, 84]])
def __init__(self, root: str, split: str='train', transform=None) -> None:
super().__init__()
assert (split in ['train', 'val', 'test'])
self.transform = transform
self.n_classes = len(self.CLASSES)
self.ignore_label = 255
img_path = ((Path(root) / 'humanparsing') / 'JPEGImages')
self.files = list(img_path.glob('*.jpg'))
if (split == 'train'):
self.files = self.files[:16000]
elif (split == 'val'):
self.files = self.files[16000:16700]
else:
self.files = self.files[16700:17700]
if (not self.files):
raise Exception(f'No images found in {img_path}')
print(f'Found {len(self.files)} {split} images.')
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index: int) -> Tuple[(Tensor, Tensor)]:
img_path = str(self.files[index])
lbl_path = str(self.files[index]).replace('JPEGImages', 'SegmentationClassAug').replace('.jpg', '.png')
image = io.read_image(img_path)
label = io.read_image(lbl_path)
if self.transform:
(image, label) = self.transform(image, label)
return (image, label.squeeze().long()) |
def LF_pseudo_negation_exclusion(span):
left_rgx = "(inadequate\\s+to|does\\s+not|cannot|can't)\\s+exclude"
right_rgx = "(cannot\\s+be|not\\s+be|doesn't|not|to)\\s+exclude[d]*"
left = get_left_span(span)
trigger = match_regex(left_rgx, left)
if (trigger and (token_distance(trigger, span) <= 3)):
return NON_NEGATED
right = get_right_span(span)
trigger = match_regex(right_rgx, right)
if (trigger and (token_distance(trigger, span) <= 3)):
return NON_NEGATED
return ABSTAIN |
def visualize(settings):
settings.check_data_exists()
np.random.seed()
with h5py.File(settings.hdf5_file_name, 'r') as hf:
SNID_idxs = np.random.permutation(hf['SNID'].shape[0])[:16]
SNIDs = hf['SNID'][:][SNID_idxs]
SNIDs = [i for i in np.array([k for k in SNIDs]).astype(str)]
plot_random_preprocessed_lightcurves(settings, SNIDs)
plot_lightcurves_from_hdf5(settings, SNID_idxs) |
class VAEBaselineView(nn.Module):
def __init__(self):
super(VAEBaseline, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
print('Total model parameters {}'.format(self.count_parameters()))
def encode(self, x):
h1 = F.relu(self.fc1(x))
return (self.fc21(h1), self.fc22(h1))
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, x):
(mu, logvar) = self.encode(x.view((- 1), 784))
z = self.reparameterize(mu, logvar)
return (self.decode(z).view((- 1), 1, 28, 28), mu.view((- 1), 10, 1, 1), logvar.view((- 1), 10, 1, 1))
def count_parameters(self):
return sum((p.numel() for p in self.parameters() if p.requires_grad)) |
class SummarizationMetric(Metric):
def __init__(self, task: str, device: str='cpu'):
self.rouge_fns = {'rouge_1': get_rouge_function('rouge1'), 'rouge_2': get_rouge_function('rouge2'), 'rouge_l': get_rouge_function('rougeL')}
if (not spacy.util.is_package('en_core_web_sm')):
spacy.cli.download('en_core_web_sm')
try:
from summ_eval.data_stats_metric import DataStatsMetric
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ['metrics'])
self.data_stats_metric = DataStatsMetric()
self.task: str = task
self.qa_fact_eval: Optional[Dict] = None
self.humaneval: Optional[Dict] = None
if (device == 'cpu'):
self.compute_faithfulness = False
self.compute_bertscore = False
else:
self.compute_bertscore = True
self.bert_scorer = BERTScorer(model_type='microsoft/deberta-large-mnli', lang='en', rescale_with_baseline=True, device=device)
self.compute_faithfulness = True
self.summac = SummaCZS(granularity='sentence', model_name='vitc', imager_load_cache=False, device=device)
def _load_qafacteval(self, eval_cache_path: str):
target_path: str = os.path.join(eval_cache_path, 'qafacteval.pk')
ensure_file_downloaded(source_url=QAFACTEVAL_URL, target_path=target_path)
with open(target_path, 'rb') as fin:
qafacteval_scores = pickle.load(fin)
self.qa_fact_eval = qafacteval_scores[self.task]
def _load_humaneval(self, eval_cache_path: str) -> Dict:
if ('cnndm' in self.task):
dataset = 'cnndm'
elif ('xsum' in self.task):
dataset = 'xsum'
else:
raise ValueError
all_humaneval_scores = dict()
for shots in [0, 5]:
score_analyzer = SummarizationHumanEvalAnalyzer(dataset, eval_cache_path, shots=shots)
for ((model_name, input_id, output_text), score) in score_analyzer.faithfulness_full.items():
if isinstance(output_text, float):
output_text = ''
all_humaneval_scores[('faithfulness', model_name, input_id, output_text)] = score
for ((model_name, input_id, output_text), score) in score_analyzer.relevance_full.items():
if isinstance(output_text, float):
output_text = ''
all_humaneval_scores[('relevance', model_name, input_id, output_text)] = score
for ((model_name, input_id, output_text), score) in score_analyzer.coherence_full.items():
if isinstance(output_text, float):
output_text = ''
all_humaneval_scores[('coherence', model_name, input_id, output_text)] = score
return all_humaneval_scores
def evaluate(self, scenario_state: ScenarioState, metric_service: MetricService, eval_cache_path: str, parallelism: int) -> MetricResult:
if self.compute_faithfulness:
hlog(f'Setting parallelism from {parallelism} to 1, since evaluating faithfulness with parallelism > 1 errors.')
parallelism = 1
return super().evaluate(scenario_state, metric_service, eval_cache_path, parallelism=parallelism)
def _compute_rouge(self, refs: List[str], pred: str) -> Dict[(str, float)]:
metrics: Dict[(str, float)] = {}
for (metric, metric_fn) in self.rouge_fns.items():
metrics[metric] = np.max([metric_fn(ref, pred) for ref in refs])
return metrics
def _compute_data_stats(self, inp: str, pred: str) -> Dict[(str, float)]:
stats = self.data_stats_metric.evaluate_example(pred, inp)
return {'summarization_coverage': stats['coverage'], 'summarization_density': stats['density'], 'summarization_compression': stats['compression']}
def _compute_faithfulness_scores(self, inp: str, pred: str) -> Dict[(str, float)]:
return {'summac': self.summac.score_one(inp, pred)['score']}
def _compute_bert_score(self, refs: List[str], pred: str) -> Dict[(str, float)]:
(p, r, f) = self.bert_scorer.score([pred], [refs])
return {'BERTScore-P': p[0].item(), 'BERTScore-R': r[0].item(), 'BERTScore-F': f[0].item()}
def _remove_braces(self, text: str) -> str:
if text.startswith('{'):
text = text[1:]
if text.endswith('}'):
text = text[:(- 1)]
return text
def evaluate_generation(self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService, eval_cache_path: str) -> List[Stat]:
refs: List[str] = [self._remove_braces(ref.output.text) for ref in request_state.instance.references]
inp: str = self._remove_braces(request_state.instance.input.text)
assert (request_state.result is not None)
pred: str = self._remove_braces(request_state.result.completions[0].text.strip())
result: List[Stat] = []
try:
if (self.humaneval is None):
self.humaneval = self._load_humaneval(eval_cache_path)
deployment = adapter_spec.model_deployment.replace('/', '_')
for metric_name in ['faithfulness', 'relevance', 'coherence']:
val = self.humaneval[(metric_name, deployment, request_state.instance.id, pred)]
result.append(Stat(MetricName(f'HumanEval-{metric_name}')).add(float(val)))
except KeyError:
pass
except ValueError:
pass
try:
if (self.qa_fact_eval is None):
self._load_qafacteval(eval_cache_path)
assert (self.qa_fact_eval is not None)
deployment = adapter_spec.model_deployment.replace('/', '_')
val = self.qa_fact_eval[deployment][(request_state.instance.id, pred)]
result.append(Stat(MetricName('QAFactEval')).add(float(val)))
except KeyError:
pass
except ValueError:
pass
result.extend([Stat(MetricName(name)).add(float(val)) for (name, val) in self._compute_rouge(refs, pred).items()])
result.extend([Stat(MetricName(name)).add(float(val)) for (name, val) in self._compute_data_stats(inp, pred).items()])
if self.compute_faithfulness:
result.extend([Stat(MetricName(name)).add(float(val)) for (name, val) in self._compute_faithfulness_scores(inp, pred).items()])
if self.compute_bertscore:
result.extend([Stat(MetricName(name)).add(float(val)) for (name, val) in self._compute_bert_score(refs, pred).items()])
return result |
class GModel(nn.Module):
def __init__(self, opt):
super(GModel, self).__init__()
self.opt = opt
self.fc = nn.Sequential(nn.Linear(2, 32), nn.ReLU(), nn.Linear(32, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 2))
def forward(self, data):
return self.fc(data) |
def _wrap_result(result, is_complex, shape=None):
if is_complex:
z = _real2complex(result)
else:
z = result
if (shape is not None):
z = z.reshape(shape)
return z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.