code stringlengths 101 5.91M |
|---|
def _process_file(wav_dir, txt_dir, base_filename, root_dir):
full_recording_path = os.path.join(root_dir, base_filename)
assert (os.path.exists(full_recording_path) and os.path.exists(root_dir))
wav_recording_path = os.path.join(wav_dir, base_filename.replace('.flac', '.wav'))
subprocess.call(['sox {} -r {} -b 16 -c 1 {}'.format(full_recording_path, str(args.sample_rate), wav_recording_path)], shell=True)
txt_transcript_path = os.path.join(txt_dir, base_filename.replace('.flac', '.txt'))
transcript_file = os.path.join(root_dir, ('-'.join(base_filename.split('-')[:(- 1)]) + '.trans.txt'))
assert os.path.exists(transcript_file), 'Transcript file {} does not exist.'.format(transcript_file)
transcriptions = open(transcript_file).read().strip().split('\n')
transcriptions = {t.split()[0].split('-')[(- 1)]: ' '.join(t.split()[1:]) for t in transcriptions}
with open(txt_transcript_path, 'w') as f:
key = base_filename.replace('.flac', '').split('-')[(- 1)]
assert (key in transcriptions), '{} is not in the transcriptions'.format(key)
f.write(_preprocess_transcript(transcriptions[key]))
f.flush() |
def init_array(imgIn, imgOut):
w = W.get()
h = H.get()
for i in range(w):
for j in range(h):
imgIn[(i, j)] = (datatype((((313 * i) + (991 * j)) % 65536)) / 65535.0) |
def get_axis_size(array: Union[(NDArray, Sequence[NDArray])], axis: int) -> int:
if isinstance(array, np.ndarray):
return int(array.shape[axis])
elif isinstance(array, (list, tuple)):
sizes = list(map((lambda v: v.shape[axis]), array))
size = sizes[axis]
assert np.all((np.array(sizes) == size))
return int(size)
else:
raise ValueError(f'invalid array type: {type(array)}') |
def ep_req_func1(protocols, args: Arguments) -> 'BBPSSW':
remote0 = args['remote0']
remote1 = args['remote1']
_protocols = []
for protocol in protocols:
if (not isinstance(protocol, BBPSSW)):
continue
if (protocol.kept_memo.name == remote0):
_protocols.insert(0, protocol)
if (protocol.kept_memo.name == remote1):
_protocols.insert(1, protocol)
if (len(_protocols) != 2):
return None
protocols.remove(_protocols[1])
_protocols[1].rule.protocols.remove(_protocols[1])
_protocols[1].kept_memo.detach(_protocols[1])
_protocols[0].meas_memo = _protocols[1].kept_memo
_protocols[0].memories = [_protocols[0].kept_memo, _protocols[0].meas_memo]
_protocols[0].name = ((_protocols[0].name + '.') + _protocols[0].meas_memo.name)
_protocols[0].meas_memo.attach(_protocols[0])
return _protocols[0] |
class MaskLoss():
def __call__(self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any) -> torch.Tensor:
if (not len(proposals_with_gt)):
return self.fake_value(densepose_predictor_outputs)
with torch.no_grad():
mask_loss_data = extract_data_for_mask_loss_from_matches(proposals_with_gt, densepose_predictor_outputs.coarse_segm)
if ((mask_loss_data.masks_gt is None) or (mask_loss_data.masks_est is None)):
return self.fake_value(densepose_predictor_outputs)
return F.cross_entropy(mask_loss_data.masks_est, mask_loss_data.masks_gt.long())
def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
return (densepose_predictor_outputs.coarse_segm.sum() * 0) |
class LinearAnnealedWeight(Decay):
def __init__(self, init_val, end_val, max_epochs, sigma):
super(LinearAnnealedWeight, self).__init__(init_val, end_val, max_epochs, sigma)
self._count = 0.0
self._anneal_start = init_val
self._anneal_end = end_val
msg = "'init_val' must be >= 'end_val'"
assert (init_val >= end_val), msg
self._max_epochs = max_epochs
self.anneal_rate = ((init_val - end_val) / float(max_epochs))
self.weight = init_val
self._count = 0
def __call__(self):
if (self._count == 0):
self._count += 1
return self.weight
else:
return self.get_current_weight()
def get_current_weight(self):
self.weight = (self.weight - self.anneal_rate)
return max(self._anneal_end, self.weight)
def __repr__(self):
return '{}(init_val={}, end_val={}, max_epochs={})'.format(self.__class__.__name__, self._anneal_start, self._anneal_end, self._max_epochs) |
def main(unused_argv=None):
dataset = FlowersData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
inception_eval.evaluate(dataset) |
def cache_glove(glove_prefix):
stoi = {}
itos = []
vectors = []
fname = (glove_prefix + '.txt')
with open(fname, 'rb') as f:
for l in f:
l = l.strip().split(b' ')
(word, vector) = (l[0], l[1:])
try:
word = word.decode()
except:
print('non-UTF8 token', repr(word), 'ignored')
continue
stoi[word] = len(itos)
itos.append(word)
vectors.append([float(x) for x in vector])
d = {'stoi': stoi, 'itos': itos, 'vectors': torch.FloatTensor(vectors)}
torch.save(d, (glove_prefix + '.pt')) |
def register_Ns3FfMacCschedSapUserCschedCellConfigCnfParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::FfMacCschedSapUser::CschedCellConfigCnfParameters const &', 'arg0')])
cls.add_instance_attribute('m_result', 'ns3::Result_e', is_const=False)
cls.add_instance_attribute('m_vendorSpecificList', 'std::vector< ns3::VendorSpecificListElement_s >', is_const=False)
return |
class DocstringSignatureMixin():
_new_docstrings: List[List[str]] = None
_signatures: List[str] = None
def _find_signature(self) -> Tuple[(str, str)]:
valid_names = [self.objpath[(- 1)]]
if isinstance(self, ClassDocumenter):
valid_names.append('__init__')
if hasattr(self.object, '__mro__'):
valid_names.extend((cls.__name__ for cls in self.object.__mro__))
docstrings = self.get_doc()
if (docstrings is None):
return (None, None)
self._new_docstrings = docstrings[:]
self._signatures = []
result = None
for (i, doclines) in enumerate(docstrings):
for (j, line) in enumerate(doclines):
if (not line):
break
if line.endswith('\\'):
line = line.rstrip('\\').rstrip()
match = py_ext_sig_re.match(line)
if (not match):
break
(exmod, path, base, args, retann) = match.groups()
if (base not in valid_names):
break
tab_width = self.directive.state.document.settings.tab_width
self._new_docstrings[i] = prepare_docstring('\n'.join(doclines[(j + 1):]), tab_width)
if (result is None):
result = (args, retann)
else:
self._signatures.append(('(%s) -> %s' % (args, retann)))
if result:
break
return result
def get_doc(self) -> List[List[str]]:
if (self._new_docstrings is not None):
return self._new_docstrings
return super().get_doc()
def format_signature(self, **kwargs: Any) -> str:
if ((self.args is None) and self.config.autodoc_docstring_signature):
result = self._find_signature()
if (result is not None):
(self.args, self.retann) = result
sig = super().format_signature(**kwargs)
if self._signatures:
return '\n'.join(([sig] + self._signatures))
else:
return sig |
class QDExperiment(object):
def __init__(self, config_filename, parallelism_type='concurrent', seed=None, base_config=None):
self._loadConfig(config_filename)
if (base_config is not None):
self.config = {**self.config, **base_config}
self.parallelism_type = parallelism_type
self.config['parallelism_type'] = parallelism_type
self._init_seed(seed)
self.reinit()
def __getstate__(self):
odict = self.__dict__.copy()
del odict['algo']
del odict['container']
return odict
def _loadConfig(self, config_filename):
self.config_filename = config_filename
self.config_name = os.path.splitext(os.path.basename(config_filename))[0]
self.config = yaml.safe_load(open(config_filename))
def _get_features_list(self):
features_list = self.config['features_list']
fitness_type = self.config['fitness_type']
return (features_list, fitness_type)
def _define_domains(self):
(self.features_list, self.fitness_type) = self._get_features_list()
self.config['features_domain'] = []
for feature_name in self.features_list:
val = self.config[('%s%s' % (feature_name, 'Domain'))]
self.config['features_domain'] += [tuple(val)]
self.config['fitness_domain'] = (tuple(self.config[('%s%s' % (self.fitness_type, 'Domain'))]),)
def _init_seed(self, rnd_seed=None):
if (rnd_seed is not None):
seed = rnd_seed
elif ('seed' in self.config):
seed = self.config['seed']
else:
seed = np.random.randint(1000000)
np.random.seed(seed)
random.seed(seed)
print(('Seed: %i' % seed))
def reinit(self):
self.instance_name = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
if (not self.config.get('dataDir')):
resultsBaseDir = (self.config.get('resultsBaseDir') or './results/')
dataDir = os.path.join(os.path.expanduser(resultsBaseDir), os.path.splitext(os.path.basename(self.config_filename))[0])
self.config['dataDir'] = dataDir
pathlib.Path(self.config['dataDir']).mkdir(parents=True, exist_ok=True)
self._define_domains()
default_config = {}
default_config['fitness_domain'] = self.config['fitness_domain']
default_config['features_domain'] = self.config['features_domain']
factory = Factory()
assert ('containers' in self.config), f"Please specify configuration entry 'containers' containing the description of all containers."
factory.build(self.config['containers'], default_config)
assert ('algorithms' in self.config), f"Please specify configuration entry 'algorithms' containing the description of all algorithms."
factory.build(self.config['algorithms'])
assert ('main_algorithm_name' in self.config), f"Please specify configuration entry 'main_algorithm' containing the name of the main algorithm."
self.algo = factory[self.config['main_algorithm_name']]
self.container = self.algo.container
self.batch_mode = self.config.get('batch_mode', False)
self.log_base_path = self.config['dataDir']
self.iteration_filenames = os.path.join(self.log_base_path, (('iteration-%i_' + self.instance_name) + '.p'))
self.final_filename = os.path.join(self.log_base_path, (('final_' + self.instance_name) + '.p'))
self.save_period = self.config.get('save_period', 0)
self.logger = TQDMAlgorithmLogger(self.algo, iteration_filenames=self.iteration_filenames, final_filename=self.final_filename, save_period=self.save_period)
def run(self):
with ParallelismManager(self.parallelism_type) as pMgr:
best = self.algo.optimise(self.eval_fn, executor=pMgr.executor, batch_mode=self.batch_mode)
print('\n\n')
print(self.algo.summary())
if isinstance(self.container, Grid):
grid = self.container
else:
print('\n{:70s}'.format('Transforming the container into a grid, for visualisation...'), end='', flush=True)
grid = Grid(self.container, shape=(10, 10), max_items_per_bin=1, fitness_domain=self.container.fitness_domain, features_domain=self.container.features_domain, storage_type=list)
print('\tDone !')
print(grid.summary())
plot_path = os.path.join(self.log_base_path, f'performancesGrid-{self.instance_name}.pdf')
quality = grid.quality_array[(((slice(None),) * (len(grid.quality_array.shape) - 1)) + (0,))]
plotGridSubplots(quality, plot_path, plt.get_cmap('nipy_spectral'), grid.features_domain, grid.fitness_domain[0], nbTicks=None)
print(("\nA plot of the performance grid was saved in '%s'." % os.path.abspath(plot_path)))
plot_path = os.path.join(self.log_base_path, f'activityGrid-{self.instance_name}.pdf')
plotGridSubplots(grid.activity_per_bin, plot_path, plt.get_cmap('nipy_spectral'), grid.features_domain, [0, np.max(grid.activity_per_bin)], nbTicks=None)
print(("\nA plot of the activity grid was saved in '%s'." % os.path.abspath(plot_path)))
print(("All results are available in the '%s' pickle file." % self.logger.final_filename))
def _removeTmpFiles(self, fileList):
keepTemporaryFiles = self.config.get('keepTemporaryFiles')
if (not keepTemporaryFiles):
for f in fileList:
try:
if os.path.isfile(f):
os.remove(f)
else:
shutil.rmtree(f)
except:
pass
def eval_fn(self, ind):
fitness = [np.random.uniform(x[0], x[1]) for x in self.config['fitness_domain']]
features = [np.random.uniform(x[0], x[1]) for x in self.config['features_domain']]
ind.fitness.values = fitness
ind.features = features
return ind |
class BinaryQuintic(AlgebraicForm):
def __init__(self, n, d, polynomial, *args):
assert ((n == 2) and (d == 5))
super().__init__(2, 5, polynomial, *args)
self._x = self._variables[0]
self._y = self._variables[1]
def from_invariants(cls, invariants, x, z, *args, **kwargs):
coeffs = reconstruction.binary_quintic_coefficients_from_invariants(invariants, *args, **kwargs)
polynomial = sum([((coeffs[i] * (x ** i)) * (z ** (5 - i))) for i in range(6)])
return cls(2, 5, polynomial, *args)
_method
def monomials(self):
x0 = self._x
x1 = self._y
if self._homogeneous:
return ((x1 ** 5), ((x1 ** 4) * x0), ((x1 ** 3) * (x0 ** 2)), ((x1 ** 2) * (x0 ** 3)), (x1 * (x0 ** 4)), (x0 ** 5))
else:
return (self._ring.one(), x0, (x0 ** 2), (x0 ** 3), (x0 ** 4), (x0 ** 5))
_method
def coeffs(self):
return self._extract_coefficients(self.monomials())
def scaled_coeffs(self):
coeff = self.coeffs()
return (coeff[0], (coeff[1] / 5), (coeff[2] / 10), (coeff[3] / 10), (coeff[4] / 5), coeff[5])
_method
def H_covariant(self, as_form=False):
cov = transvectant(self, self, 2)
if as_form:
return cov
else:
return cov.polynomial()
_method
def i_covariant(self, as_form=False):
cov = transvectant(self, self, 4)
if as_form:
return cov
else:
return cov.polynomial()
_method
def T_covariant(self, as_form=False):
H = self.H_covariant(as_form=True)
cov = transvectant(H, self, 1)
if as_form:
return cov
else:
return cov.polynomial()
_method
def j_covariant(self, as_form=False):
x0 = self._x
x1 = self._y
i = self.i_covariant()
minusi = AlgebraicForm(2, 2, (- i), x0, x1)
cov = transvectant(minusi, self, 2)
if as_form:
return cov
else:
return cov.polynomial()
_method
def tau_covariant(self, as_form=False):
j = self.j_covariant(as_form=True)
cov = transvectant(j, j, 2)
if as_form:
return cov
else:
return cov.polynomial()
_method
def theta_covariant(self, as_form=False):
i = self.i_covariant(as_form=True)
tau = self.tau_covariant(as_form=True)
cov = transvectant(i, tau, 1)
if as_form:
return cov
else:
return cov.polynomial()
_method
def alpha_covariant(self, as_form=False):
i = self.i_covariant()
x0 = self._x
x1 = self._y
i2 = AlgebraicForm(2, 4, (i ** 2), x0, x1)
cov = transvectant(i2, self, 4)
if as_form:
return cov
else:
return cov.polynomial()
_method
def beta_covariant(self, as_form=False):
i = self.i_covariant(as_form=True)
alpha = self.alpha_covariant(as_form=True)
cov = transvectant(i, alpha, 1)
if as_form:
return cov
else:
return cov.polynomial()
_method
def gamma_covariant(self, as_form=False):
alpha = self.alpha_covariant(as_form=True)
tau = self.tau_covariant(as_form=True)
cov = transvectant(tau, alpha, 1)
if as_form:
return cov
else:
return cov.polynomial()
_method
def delta_covariant(self, as_form=False):
alpha = self.alpha_covariant(as_form=True)
theta = self.theta_covariant(as_form=True)
cov = transvectant(theta, alpha, 1)
if as_form:
return cov
else:
return cov.polynomial()
_method
def A_invariant(self):
i = self.i_covariant(as_form=True)
A = transvectant(i, i, 2).polynomial()
try:
K = self._ring.base_ring()
return K(A)
except TypeError:
return A
_method
def B_invariant(self):
i = self.i_covariant(as_form=True)
tau = self.tau_covariant(as_form=True)
B = transvectant(i, tau, 2).polynomial()
try:
K = self._ring.base_ring()
return K(B)
except TypeError:
return B
_method
def C_invariant(self):
tau = self.tau_covariant(as_form=True)
C = transvectant(tau, tau, 2).polynomial()
try:
K = self._ring.base_ring()
return K(C)
except TypeError:
return C
_method
def R_invariant(self):
beta = self.beta_covariant(as_form=True)
gamma = self.gamma_covariant(as_form=True)
R = transvectant(beta, gamma, 1).polynomial()
try:
K = self._ring.base_ring()
return K(R)
except TypeError:
return R
_method
def invariants(self, type='clebsch'):
if (type == 'clebsch'):
return self.clebsch_invariants(as_tuple=True)
elif (type == 'arithmetic'):
return self.arithmetic_invariants(as_tuple=True)
else:
raise ValueError('unknown type of invariants {} for a binary quintic'.format(type))
_method
def clebsch_invariants(self, as_tuple=False):
if (self._ring.characteristic() in [2, 3, 5]):
raise NotImplementedError('no invariants implemented for fields of characteristic 2, 3 or 5')
else:
invariants = {}
invariants['A'] = self.A_invariant()
invariants['B'] = self.B_invariant()
invariants['C'] = self.C_invariant()
invariants['R'] = self.R_invariant()
if as_tuple:
return (invariants['A'], invariants['B'], invariants['C'], invariants['R'])
else:
return invariants
_method
def arithmetic_invariants(self):
R = self._ring
clebsch = self.clebsch_invariants()
invariants = {}
invariants['I4'] = (((R(2) ** (- 1)) * (5 ** 4)) * clebsch['A'])
invariants['I8'] = ((5 ** 5) * ((((R(2) ** (- 1)) * 47) * (clebsch['A'] ** 2)) - ((2 ** 2) * clebsch['B'])))
invariants['I12'] = ((5 ** 10) * ((((R(2) ** (- 1)) * 3) * (clebsch['A'] ** 3)) - (((2 ** 5) * (R(3) ** (- 1))) * clebsch['C'])))
invariants['I18'] = ((((2 ** 8) * (R(3) ** (- 1))) * (5 ** 15)) * clebsch['R'])
return invariants
_method
def canonical_form(self, reduce_gcd=False):
clebsch = self.clebsch_invariants(as_tuple=True)
if reduce_gcd:
return invariant_theory.binary_form_from_invariants(5, clebsch, variables=self.variables(), scaling='coprime')
else:
return invariant_theory.binary_form_from_invariants(5, clebsch, variables=self.variables(), scaling='normalized') |
def init_live_plot(dpi: int=400, figsize: Optional[tuple[(int, int)]]=None, xlabel: Optional[str]=None, ylabel: Optional[str]=None, title: Optional[str]=None, **kwargs):
color = kwargs.pop('color', '#0096FF')
xlabel = ('Step' if (xlabel is None) else xlabel)
(fig, ax) = plt.subplots(nrows=1, ncols=1, dpi=dpi, figsize=figsize, constrained_layout=True)
assert isinstance(ax, plt.Axes)
(line,) = ax.plot([0], [0], c=color, animated=True, **kwargs)
if (figsize is None):
dpi = 125
figsize = (9, 3)
if ((title is not None) and (len(title) > 0)):
if isinstance(title, list):
fig.suptitle('\n'.join(title))
else:
fig.suptitle(title)
if (ylabel is not None):
ax.set_ylabel(ylabel, color=color)
ax.tick_params(axis='y', labelcolor=color)
ax.autoscale(True, axis='y')
display_id = display(fig, display_id=True)
return {'ax': ax, 'line': line, 'display_id': display_id} |
def return_dataset_laion_all(img_path, config):
transform = transforms.Compose([transforms.RandomResizedCrop(size=256, scale=(0.9, 1.0)), transforms.ToTensor()])
dataset = capfilt_dataset(img_path, transform)
print(('%d sample in this dataset' % len(dataset)))
bs = config.data.params.batch_size
data_loader = torch.utils.data.DataLoader(dataset, batch_size=bs, num_workers=config.data.params.num_workers, shuffle=True, drop_last=False, pin_memory=False)
return data_loader |
class JitDistAutogradTest(RpcAgentTestFixture):
_init
def test_get_gradients(self):
dst_rank = self.rank
.script
def dist_get_gradients(context_id: int) -> Dict[(Tensor, Tensor)]:
return dist_autograd.get_gradients(context_id)
FileCheck().check('get_gradients').run(str(dist_get_gradients.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
_init
def test_dist_backward(self):
if (self.rank != 0):
return
.script
def dist_backward_script(context_id: int, loss: torch.Tensor):
dist_autograd.backward(context_id, [loss])
FileCheck().check('dist_backward').run(str(dist_backward_script.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
dst_worker_name = worker_name(((self.rank + 1) % self.world_size))
loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum()
dist_backward_script(context_id, loss)
_init
def test_jit_fork_within_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst_worker_name = worker_name(((self.rank + 1) % self.world_size))
res = fork_add(t1, t2, dst_worker_name)
loss = res.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
_init
def test_restore_context_after_swtich_to_jit_thread(self):
if (self.rank != 0):
return
.script
def forward_script(context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor) -> Tuple[(Tensor, Tensor)]:
res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1))
res1 = res1_fut.wait()
loss1 = res1.sum()
res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2))
res2 = res2_fut.wait()
loss2 = res2.sum()
return (loss1, loss2)
with dist_autograd.context() as context_id:
t1 = torch.ones((2, 3), requires_grad=True)
t2 = torch.ones((2, 3), requires_grad=True)
dst_worker_name = worker_name(((self.rank + 1) % self.world_size))
(loss0, loss1) = forward_script(context_id, dst_worker_name, t1, t2)
dist_autograd.backward(context_id, [loss0, loss1])
(grad0, grad1) = dist_autograd.get_gradients(context_id)
self.assertEqual(grad0, grad1) |
def exp_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
y0 = outputs[0]
return (dy * y0) |
def test_docstrings(doc):
assert (doc(UserType) == 'A `py::class_` type for testing')
assert (UserType.__name__ == 'UserType')
assert (UserType.__module__ == 'pybind11_tests')
assert (UserType.get_value.__name__ == 'get_value')
assert (UserType.get_value.__module__ == 'pybind11_tests')
assert (doc(UserType.get_value) == '\n get_value(self: m.UserType) -> int\n\n Get value using a method\n ')
assert (doc(UserType.value) == 'Get/set value using a property')
assert (doc(m.NoConstructor.new_instance) == '\n new_instance() -> m.class_.NoConstructor\n\n Return an instance\n ') |
def get_tokenizer(flags):
if (flags.tokenizer.lower() == 'bpe'):
return nlc_data.bpe_tokenizer
elif (flags.tokenizer.lower() == 'char'):
return nlc_data.char_tokenizer
elif (flags.tokenizer.lower() == 'word'):
return nlc_data.basic_tokenizer
else:
raise
return tokenizer |
def export_mol_highlight(mol, name, hatoms, hbonds, width=100, height=100, color=(0.925, 0.688, 0.355)):
from rdkit.Chem.Draw import rdMolDraw2D
import cairosvg
import io
d = rdMolDraw2D.MolDraw2DSVG(width, height)
rdMolDraw2D.PrepareAndDrawMolecule(d, mol, highlightAtoms=hatoms, highlightBonds=hbonds, highlightBondColors={i: color for i in hbonds}, highlightAtomColors={i: color for i in hatoms})
d.FinishDrawing()
cairosvg.svg2pdf(bytestring=d.GetDrawingText().encode(), write_to=str(name)) |
def test_fix_proj_example():
with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as tempdir:
test_name = os.path.join(tempdir, 'fix.xml')
with open(test_name, 'w', encoding='utf-8') as fout:
fout.write(NONPROJ_EXAMPLE)
sentences = convert_arboretum.read_xml_file(test_name)
assert (len(sentences) == 1)
(tree, words) = convert_arboretum.process_tree(sentences[0])
assert (not convert_arboretum.word_sequence_missing_words(tree))
expected_orig = '(s (fcl (advp (adv s9_1) (adv s9_2)) (vp (v-fin s9_3) (v-pcp2 s9_6)) (prop s9_4) (adv s9_5) (pp (prp s9_7) (np (num s9_8) (n s9_9))) (pu s9_10)))'
expected_proj = '(s (fcl (advp (adv s9_1) (adv s9_2)) (vp (v-fin s9_3) (prop s9_4) (adv s9_5) (v-pcp2 s9_6)) (pp (prp s9_7) (np (num s9_8) (n s9_9))) (pu s9_10)))'
assert (str(tree) == expected_orig)
with tsurgeon.Tsurgeon() as tsurgeon_processor:
assert (str(convert_arboretum.check_words(tree, tsurgeon_processor)) == expected_proj) |
def simGetJointTargetPosition(jointHandle):
position = ffi.new('float *')
lib.simGetJointTargetPosition(jointHandle, position)
return position[0] |
def _get_column_names(output_format: str, split: bool) -> List[str]:
if (not split):
return [name.strip() for name in output_format.split('\t')]
output_tokens = output_format.split()
headers = []
for output_part in output_tokens:
for attr in KEYWORDS:
if (attr in output_part):
headers.append(attr)
break
return headers |
def make_plots(statistics_file):
print('\n Make Plots')
with open(statistics_file, 'r') as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
FILETYPE = 'eps'
latex = io.StringIO()
LATEX_SHOW_STD = False
numStepsizes = len(STEPSIZES)
numTFs = len(CONFIG_FILES)
numClasses = 5
classNames = ['world', 'pos', 'dirP', 'dirS', 'dirF']
def classTags(tf_idx):
return ['world_%d', f'run_screen_{tf_idx}_%d_plain', f'run_screen_{tf_idx}_%d_dirD', f'run_screen_{tf_idx}_%d_dirS', f'run_screen_{tf_idx}_%d_dirF']
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ['ssim', 'lpips']
latex.write(('\\begin{tabular}{{}rr%{}}\n' % ('c' * (len(statNames) * numStepsizes))))
latex.write('\\toprule\n')
latex.write('&')
for (j, s) in enumerate(STEPSIZES):
latex.write((' & \\multicolumn{%d}{c}{Stepsize %s}' % (len(statNames), ('%.0e' % s))))
latex.write('\\\\\n')
for j in range(len(STEPSIZES)):
latex.write(('\\cmidrule(r){%d-%d}' % ((3 + (len(statNames) * j)), (2 + (len(statNames) * (j + 1))))))
latex.write('\n TF & Input ')
for j in range(len(STEPSIZES)):
for s in statNames:
latex.write((' & %s' % s))
latex.write('\\\\\n')
(fig, axs) = plt.subplots(numTFs, 2, squeeze=False, sharex=True, figsize=(6.4, (1 + (2 * numTFs))))
x_offset = np.linspace((- 0.3), (+ 0.3), numClasses, True)
width = (x_offset[1] - x_offset[0])
handles = []
handle_names = []
for row in range(numTFs):
local_stat = stats[row]
axs[(row, 0)].set_ylabel(('TF %d' % (row + 1)))
for (k, (ax, stat, label)) in enumerate(zip(axs[row], statTags, statNames)):
for (i, (cls, tag)) in enumerate(zip(classNames, classTags(row))):
X = []
Y = []
err = []
for (j, s) in enumerate(STEPSIZES):
X.append((j + x_offset[i]))
(y, e) = local_stat[(tag % (1000 * s))][stat]
Y.append(y)
err.append(e)
h = ax.bar(X, Y, width=width, yerr=err)
if (stat == 'ssim'):
handles.append(h)
handle_names.append(cls)
Xlabel = [('%.0e' % s) for s in STEPSIZES]
ax.set_title(label)
ax.set_xticks(np.arange(numStepsizes))
ax.set_xticklabels(Xlabel)
ax.set_xlabel('Stepsize')
latex.write('\\cmidrule(r){1-2}')
for j in range(len(STEPSIZES)):
latex.write(('\\cmidrule(r){%d-%d}' % ((3 + (len(statNames) * j)), (2 + (len(statNames) * (j + 1))))))
latex.write(('\\multirow{%d}{*}{TF %d}' % (numClasses, (row + 1))))
tags = classTags(row)
best_ssim = dict()
best_lpips = dict()
for (j, s) in enumerate(STEPSIZES):
best_ssim[j] = max([local_stat[(tags[i] % (1000 * s))]['ssim'][0] for i in range(len(classNames))])
best_lpips[j] = min([local_stat[(tags[i] % (1000 * s))]['lpips'][0] for i in range(len(classNames))])
best_stats = {'ssim': best_ssim, 'lpips': best_lpips}
for i in range(len(classNames)):
tag = tags[i]
latex.write((' & %s' % classNames[i]))
for (j, s) in enumerate(STEPSIZES):
for (k, stat) in enumerate(statTags):
(y, e) = local_stat[(tag % (1000 * s))][stat]
is_best = (y == best_stats[stat][j])
if is_best:
if LATEX_SHOW_STD:
latex.write((' & $\\bm{%.2f}$ ($\\pm %.2f$)' % (y, e)))
else:
latex.write((' & $\\bm{%.4f}$' % y))
elif LATEX_SHOW_STD:
latex.write((' & $%.2f$ ($\\pm %.2f$)' % (y, e)))
else:
latex.write((' & $%.4f$' % y))
latex.write(' \\\\\n')
lgd = fig.legend(handles, handle_names, bbox_to_anchor=(0.65, 0.7), loc='lower center', borderaxespad=0.0)
fig.savefig(os.path.join(output_folder, ('ScreenVsWorld-SSIM.%s' % FILETYPE)), bbox_inches='tight', bbox_extra_artists=(lgd,))
latex.write('\\bottomrule\n')
latex.write('\\end{tabular}\n')
latex = latex.getvalue()
with open(os.path.join(output_folder, 'screenVsWorld-SSIM.tex'), 'w') as f:
f.write(latex)
print(latex)
print('Done')
plt.show() |
def main(parsed_args, **unused_kwargs):
assert (parsed_args.path is not None), '--path required for evaluation!'
if (torch.cuda.is_available() and (not parsed_args.cpu)):
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
use_cuda = (torch.cuda.is_available() and (not parsed_args.cpu))
task = tasks.setup_task(parsed_args)
logger.info('loading model(s) from {}'.format(parsed_args.path))
(models, args) = checkpoint_utils.load_model_ensemble(parsed_args.path.split(os.pathsep), arg_overrides=eval(parsed_args.model_overrides), task=task, suffix=getattr(parsed_args, 'checkpoint_suffix', ''))
for arg in vars(parsed_args).keys():
if (arg not in {'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token'}):
setattr(args, arg, getattr(parsed_args, arg))
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if (args.context_window > 0):
dataset = LMContextWindowDataset(dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad())
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
assert (len(models) > 0)
logger.info('num. model params: {}'.format(sum((p.numel() for p in models[0].parameters()))))
itr = task.get_batch_iterator(dataset=dataset, max_tokens=(args.max_tokens or 36000), max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(itr, log_format=args.log_format, log_interval=args.log_interval, default_log_format=('tqdm' if (not args.no_progress_bar) else 'none'))
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.0
count = 0
if (args.remove_bpe is not None):
if (args.remove_bpe == 'sentencepiece'):
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont)}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if ('net_input' not in sample):
continue
sample = (utils.move_to_cuda(sample) if use_cuda else sample)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for (i, hypos_i) in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if getattr(args, 'add_bos_token', False):
assert (hypo['tokens'][0].item() == task.target_dictionary.bos())
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if (bpe_toks is not None):
for i in range((tgt_len - 1)):
if (tokens[i].item() in bpe_toks):
skipped_toks += 1
pos_scores[(i + 1)] += pos_scores[i]
pos_scores[i] = 0
inf_scores = (pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf')))
if inf_scores.any():
logger.info('skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()]))
pos_scores = pos_scores[(~ inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += (pos_scores.numel() - skipped_toks)
if (args.output_word_probs or args.output_word_stats):
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if ((bpe_toks is not None) and (w_ind in bpe_toks)):
w = w[:(- bpe_len)]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = (i + 1)
while (ind < len(tokens)):
if (pos_scores[ind].item() != 0):
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(((('S-' + str(int(sample_id))) + '\t') + ' '.join(('{}'.format(x[0]) for x in word_prob))))
logger.info(((('P-' + str(int(sample_id))) + '\t') + ' '.join(('{:.2f}'.format(x[1]) for x in word_prob))))
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = (((- score_sum) / count) / math.log(2))
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, (1.0 / gen_timer.avg)))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, (2 ** avg_nll_loss)))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=(lambda x: x.count), reverse=True):
logger.info(ws) |
class AssertionGenerator(str, enum.Enum):
MUTATION_ANALYSIS = 'MUTATION_ANALYSIS'
CHECKED_MINIMIZING = 'CHECKED_MINIMIZING'
SIMPLE = 'SIMPLE'
NONE = 'NONE' |
def collect_params(model):
params = []
names = []
for (nm, m) in model.named_modules():
if isinstance(m, nn.BatchNorm2d):
for (np, p) in m.named_parameters():
if (np in ['weight', 'bias']):
params.append(p)
names.append(f'{nm}.{np}')
return (params, names) |
def _dml_disambiguate_direction_dependent_views(sdfg: dace.SDFG):
for (n, state) in sdfg.all_nodes_recursive():
if (isinstance(n, nd.AccessNode) and (type(n.desc(sdfg)) is dt.View)):
in_edges = state.in_edges(n)
out_edges = state.out_edges(n)
if ((len(in_edges) == 1) and (len(out_edges) == 1)):
A = in_edges[0].src
y = in_edges[0].data
C = out_edges[0].dst
x = out_edges[0].data
if (isinstance(A, nd.AccessNode) and isinstance(C, nd.AccessNode) and (y.data == A.data) and (x.data == C.data)):
(y.subset, y.other_subset) = (y.other_subset, y.subset)
y.data = n.data
y.try_initialize(sdfg, state, in_edges[0]) |
def find_next_word(index, text, word, output):
idx = 0
word_sofar = ''
yeah = False
while ((index < len(text)) and (idx < len(word))):
if ((text[index] == '\n') and ((index + 1) < len(text)) and (text[(index + 1)] == '\n')):
if (len(word_sofar) > 0):
assert re.match('^\\s+$', word_sofar), "Found non-empty string at the end of a paragraph that doesn't match any token: |{}|".format(word_sofar)
word_sofar = ''
output.write('\n\n')
index += 1
elif (re.match('^\\s$', text[index]) and (not re.match('^\\s$', word[idx]))):
word_sofar += text[index]
else:
word_sofar += text[index]
assert (text[index].replace('\n', ' ') == word[idx]), ('character mismatch: raw text contains |%s| but the next word is |%s|.' % (word_sofar, word))
idx += 1
index += 1
return (index, word_sofar) |
def count_model_size(model):
return (np.sum((np.prod(v.size()) for (name, v) in model.named_parameters())) / 1000000.0) |
((not have_sympy), 'SymPy not installed')
def test_conv2():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
e = (x * y)
assert (e._sympy_() == (sympy.Symbol('x') * sympy.Symbol('y')))
e = ((x * y) * z)
assert (e._sympy_() == ((sympy.Symbol('x') * sympy.Symbol('y')) * sympy.Symbol('z'))) |
class InttoptrInst(ConversionInst):
code = 'inttoptr'
def type_constraints(self, tcs):
tcs.integer(self.arg)
tcs.pointer(self)
tcs.specific(self.arg, self.src_ty)
tcs.specific(self, self.ty) |
def load_question_cache():
if os.path.exists(CACHE_FILE):
with open(CACHE_FILE, 'r') as f:
return json.load(f)
else:
return {} |
def get_cfg():
config = _C.clone()
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true')
parser.add_argument('--dist_init_method', type=str, default=None)
parser.add_argument('--dataset_root', type=str, default=None)
parser.add_argument('--output_root', type=str, default=None)
parser.add_argument('--configuration', type=str, default=None)
parser.add_argument('--cfg_file', type=str, default=None)
parser.add_argument('--pretrain_checkpoint_path', type=str, default=None)
parser.add_argument('--train_checkpoint_path', type=str, default=None)
parser.add_argument('--test_checkpoint_path', type=str, default=None)
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
if (args.cfg_file is not None):
config.merge_from_file(args.cfg_file)
if (args.opts is not None):
config.merge_from_list(args.opts)
if (args.dist_init_method is not None):
config.DIST_INIT_METHOD = args.dist_init_method
if (args.dataset_root is not None):
config.DATASET_ROOT = args.dataset_root
elif (not config.DATASET_ROOT):
config.DATASET_ROOT = dataset_root
if (config.MODEL.TASK in ['VisualClassify', 'AudioClassify', 'MultimodalClassify']):
if (config.TRAIN.DATASET == 'UCF101'):
config.DATASET_DIR = os.path.join(config.DATASET_ROOT, 'ucf101')
elif (config.TRAIN.DATASET == 'ESC50'):
config.DATASET_DIR = os.path.join(config.DATASET_ROOT, 'esc50')
elif (config.TRAIN.DATASET == 'KineticsSounds'):
config.DATASET_DIR = os.path.join(config.DATASET_ROOT, 'kinetics-sounds')
if (args.output_root is not None):
config.OUTPUT_ROOT = args.output_root
elif (not config.OUTPUT_ROOT):
config.OUTPUT_ROOT = output_root
if (args.configuration is not None):
configuration = args.configuration
else:
configuration = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
config.SAVE_DIR = os.path.join(config.OUTPUT_ROOT, configuration, 'checkpoints')
if (not args.test):
Path(config.SAVE_DIR).mkdir(parents=True, exist_ok=True)
config.LOG_DIR = os.path.join(config.OUTPUT_ROOT, configuration, 'logs')
if (not args.test):
Path(config.LOG_DIR).mkdir(parents=True, exist_ok=True)
if (config.NUM_GPUS == (- 1)):
config.NUM_GPUS = torch.cuda.device_count()
if (args.pretrain_checkpoint_path is not None):
config.PRETRAIN.CHECKPOINT_FILE_PATH = args.pretrain_checkpoint_path
if (args.train_checkpoint_path is not None):
config.TRAIN.CHECKPOINT_FILE_PATH = args.train_checkpoint_path
if (args.test_checkpoint_path is not None):
config.TEST.CHECKPOINT_FILE_PATH = args.test_checkpoint_path
return _assert_and_infer_cfg(config) |
def settings(*args, **kwargs):
if (('min_satisfying_examples' in kwargs) and (hypothesis.version.__version_info__ >= (3, 56, 0))):
kwargs.pop('min_satisfying_examples')
if (('deadline' in kwargs) and (hypothesis.version.__version_info__ < (4, 44, 0))):
kwargs.pop('deadline')
if (('timeout' in kwargs) and (hypothesis.version.__version_info__ >= (4, 44, 0))):
if ('deadline' not in kwargs):
kwargs['deadline'] = (kwargs['timeout'] * 1000.0)
kwargs.pop('timeout')
return hypothesis.settings(*args, **kwargs) |
(eq=False)
class Parameter():
definition: Any
def location(self) -> str:
raise NotImplementedError
def name(self) -> str:
raise NotImplementedError
def is_required(self) -> bool:
raise NotImplementedError
def example(self) -> Any:
raise NotImplementedError
def serialize(self, operation: APIOperation) -> str:
raise NotImplementedError |
def _att_dropout_broadcast_default() -> bool:
from returnn.config import get_global_config
from returnn.util.basic import BehaviorVersion
config = get_global_config(raise_exception=False)
if config:
opt = config.bool('rf_att_dropout_broadcast', None)
if (opt is not None):
return opt
opts = config.bool('rf_dropout_broadcast', None)
if (opts is not None):
return opts
if (BehaviorVersion.get() <= 18):
global _att_dropout_broadcast_shown_warning
if (not _att_dropout_broadcast_shown_warning):
_att_dropout_broadcast_shown_warning = True
logging.getLogger('returnn.frontend').warning("Attention dropout uses broadcasting. This is old behavior and likely not what you want. Set config option 'rf_att_dropout_broadcast' to False to disable this, or switch to a new behavior version >= 19. (This warning is only printed once.)")
return True
return False |
def add_argument(group):
with subgroup.SubGroup(group, 'general') as s:
s.add('--dpath', type=str, default=path.join('..', 'dataset'))
s.add('--dpath_test', type=str)
s.add('--dtrain', nargs='+', type=str, default=['sr.div2k.base'])
s.add('--dtest', nargs='+', type=str, default=['sr.div2k.base'])
s.add('--train_range', type=str, default='0-100')
s.add('--val_range', type=str, default='1-10')
s.add('--raw', action='store_true')
s.add('--force_ram', action='store_true')
s.add('--sampler', type=str, default='fixed')
s.add('--use_patch', action='store_true')
s.add('--data_path_train', type=str)
s.add('--data_path_test', type=str)
s.add('--bin_path', type=str)
with subgroup.SubGroup(group, 'property') as s:
s.add('-s', '--scale', type=float, default=4)
s.add('--degradation', type=str, default='bicubic')
s.add('--degradation_test', type=str)
s.add('--camera', type=str, default='Canon')
s.add('--noise', type=str)
s.add('--n_colors', type=int, default=3)
with subgroup.SubGroup(group, 'preprocessing') as s:
s.add('-p', '--patch', type=int, default=48)
s.add('--augmentation', type=str, default='hvr')
s.add('--compression', nargs='+', type=str)
with subgroup.SubGroup(group, 'mixed') as s:
s.add('--use_div2k', action='store_true')
s.add('--use_ost', action='store_true')
s.add('--use_imagenet', action='store_true')
s.add('--use_flickr', action='store_true')
s.add('--no_mask', action='store_true') |
.parametrize('num_inducing_points', [(- 1), 0])
def test_build_sgpr_raises_for_invalid_num_inducing_points(num_inducing_points: int) -> None:
(qp, obs) = mock_data()
data = mk_dataset(qp, obs)
search_space = (Box([0.0], [1.0]) ** qp.shape[(- 1)])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_sgpr(data, search_space, num_inducing_points=num_inducing_points) |
def spin_rec(t, nexts, current, part, weight, length):
if (not current):
return [parent(t).zero()]
tmp = []
partp = part[0].conjugate()
ell = len(partp)
for val in current:
perms = val[1]
perm = [(((partp[i] + ell) - (i + 1)) - perms[i]) for i in reversed(range(ell))]
perm = to_standard(perm)
tmp.append(((weight[(- 1)] * (length - 1)) - perm.number_of_inversions()))
if nexts:
return [sum((sum((((t ** tval) * nval) for nval in nexts[i])) for (i, tval) in enumerate(tmp)))]
return [sum(((t ** val) for val in tmp))] |
class MetricTestCase(unittest.TestCase):
def setUpClass(cls) -> None:
cls.paired_metric_dict = register_metrics(types=('ssim', 'psnr', 'lps'), device=DEVICE)
cls.unpaired_metric_dict = register_metrics(types=('is', 'fid', 'SSPE', 'OS-CS-reid', 'OS-freid'), device=DEVICE)
cls.face_metric_dict = register_metrics(types=('face-CS',), device=DEVICE)
def test_03_face_metric_all_have_face(self):
pred_img_list = ['./data/pred_.jpg', './data/pred_.jpg', './data/pred_.jpg', './data/pred_.jpg']
ref_img_list = ['./data/pred_.jpg', './data/pred_.jpg', './data/pred_.jpg', './data/pred_.jpg']
pred_imgs = []
for img_path in pred_img_list:
img = load_image(img_path)
pred_imgs.append(img)
pred_imgs = torch.stack(pred_imgs)
ref_imgs = []
for img_path in ref_img_list:
img = load_image(img_path)
ref_imgs.append(img)
ref_imgs = torch.stack(ref_imgs)
face_cs = self.face_metric_dict['face-CS'].calculate_score(pred_imgs, ref_imgs)
print('face-cs', face_cs) |
class sage__rings__real_double(PythonModule):
def __init__(self):
PythonModule.__init__(self, 'sage.rings.real_double', type='standard') |
class Sampler_uni(torch.utils.data.sampler.Sampler):
def __init__(self, num1, num2, num3, batchsize, balance_id=None):
self.num1 = num1
self.num2 = num2
self.num3 = num3
self.batchsize = batchsize
self.balance_id = balance_id
def __iter__(self):
if (self.balance_id is not None):
rlist = shuffle_cus_balance(self.num1, self.num2, self.num3, self.batchsize, balance_index=self.balance_id)
else:
rlist = shuffle_cus(self.num1, self.num2, self.num3, self.batchsize)
return iter(rlist)
def __len__(self):
if (self.balance_id is not None):
return (self.num1 * 3)
return ((self.num1 + self.num2) + self.num3) |
class VNPRModel(keras.Model):
def __init__(self, num_users, num_items, embed_mf_size, l_w, l_v, mlp_hidden_size, dropout, learning_rate=0.01, num_image_feature=128, random_seed=42, name='VNPR', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(random_seed)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.num_image_feature = num_image_feature
self.l_w = l_w
self.l_v = l_v
self.mlp_hidden_size = mlp_hidden_size
self.dropout = dropout
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='U_MF', dtype=tf.float32)
self.item_mf_embedding_1 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='I_MF_1', dtype=tf.float32)
self.item_mf_embedding_2 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='I_MF_2', dtype=tf.float32)
self.user_v_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.num_image_feature, embeddings_initializer=self.initializer, name='U_V', dtype=tf.float32)
self.mlp_layers_1 = keras.Sequential()
for units in mlp_hidden_size:
self.mlp_layers_1.add(keras.layers.Dropout(dropout))
self.mlp_layers_1.add(keras.layers.Dense(units, activation='relu'))
self.mlp_layers_2 = keras.Sequential()
for units in mlp_hidden_size:
self.mlp_layers_2.add(keras.layers.Dropout(dropout))
self.mlp_layers_2.add(keras.layers.Dense(units, activation='relu'))
self.optimizer = tf.optimizers.Adam(learning_rate)
def call(self, inputs, training=None, mask=None):
(user, item1, feature_e_1, item2, feature_e_2) = inputs
user_mf_e = self.user_mf_embedding(user)
user_v_e = self.user_v_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item1)
item_mf_e_2 = self.item_mf_embedding_2(item2)
embedding_input_1 = tf.concat([(user_mf_e * item_mf_e_1), (user_v_e * feature_e_1)], axis=1)
mlp_output_1 = self.mlp_layers_1(embedding_input_1, training)
embedding_input_2 = tf.concat([(user_mf_e * item_mf_e_2), (user_v_e * feature_e_2)], axis=1)
mlp_output_2 = self.mlp_layers_2(embedding_input_2, training)
return (tf.squeeze(mlp_output_1), tf.squeeze(mlp_output_2), user_mf_e, user_v_e, item_mf_e_1, item_mf_e_2)
def train_step(self, batch):
with tf.GradientTape() as tape:
(user, pos, feat_pos, neg, feat_neg) = batch
(mlp_output_1, mlp_output_2, user_mf_e, user_v_e, item_mf_e_1, item_mf_e_2) = self.call(inputs=(user, pos, feat_pos, neg, feat_neg), training=True)
difference = tf.clip_by_value((mlp_output_1 - mlp_output_2), (- 80.0), .0)
loss = tf.reduce_sum(tf.nn.softplus((- difference)))
reg_loss = ((self.l_w * tf.reduce_sum([tf.nn.l2_loss(user_mf_e), tf.nn.l2_loss(item_mf_e_1), tf.nn.l2_loss(item_mf_e_2)])) + (self.l_v * tf.reduce_sum([tf.nn.l2_loss(user_v_e), *[tf.nn.l2_loss(w1) for w1 in self.mlp_layers_1.trainable_variables], *[tf.nn.l2_loss(w2) for w2 in self.mlp_layers_2.trainable_variables]])))
loss += reg_loss
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss
def predict(self, inputs, training=False, **kwargs):
(u, i) = inputs
(output_1, output_2, _, _, _) = self.call(inputs=(u, i, i), training=training)
return ((output_1 + output_2) * 0.5)
def get_recs(self, inputs, training=False, **kwargs):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item)
item_mf_e_2 = self.item_mf_embedding_2(item)
feature_e = tf.nn.embedding_lookup(self.F, item)
mf_output_1 = tf.concat([(user_mf_e * item_mf_e_1), feature_e], axis=2)
mf_output_2 = tf.concat([(user_mf_e * item_mf_e_2), feature_e], axis=2)
mlp_output_1 = self.mlp_layers_1(mf_output_1)
mlp_output_2 = self.mlp_layers_2(mf_output_2)
return tf.squeeze(((mlp_output_1 + mlp_output_2) / 2))
def predict_item_batch(self, start, stop, item_mf_e_1, item_mf_e_2, feat):
user_mf_e = self.user_mf_embedding(tf.range(start, stop))
user_v_e = self.user_v_embedding(tf.range(start, stop))
mf_output_1 = tf.concat([(tf.expand_dims(user_mf_e, axis=1) * tf.expand_dims(item_mf_e_1, axis=0)), (tf.expand_dims(user_v_e, axis=1) * tf.expand_dims(feat, axis=0))], axis=2)
mf_output_2 = tf.concat([(tf.expand_dims(user_mf_e, axis=1) * tf.expand_dims(item_mf_e_2, axis=0)), (tf.expand_dims(user_v_e, axis=1) * tf.expand_dims(feat, axis=0))], axis=2)
mlp_output_1 = self.mlp_layers_1(mf_output_1, training=False)
mlp_output_2 = self.mlp_layers_2(mf_output_2, training=False)
return tf.squeeze(((mlp_output_1 + mlp_output_2) / 2), axis=2)
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
class Graph():
def __init__(self, image, objects, relationships, attributes):
self.image = image
self.objects = objects
self.relationships = relationships
self.attributes = attributes |
def get_loss(pred, label):
weight_decay_losses = tf.get_collection('losses')
weight_decay_loss = tf.reduce_sum(weight_decay_losses)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.summary.scalar('weight_decay_loss', weight_decay_loss)
return (classify_loss + weight_decay_loss) |
def concat(x, axis):
if tf.__version__.startswith('0'):
return tf.concat(axis, x)
else:
return tf.concat(x, axis=axis) |
def load_datasets(data_dir: str) -> Tuple[(List[Annotation], List[Annotation], List[Annotation])]:
train_data = annotations_from_jsonl(os.path.join(data_dir, 'train.jsonl'))
val_data = annotations_from_jsonl(os.path.join(data_dir, 'val.jsonl'))
test_data = annotations_from_jsonl(os.path.join(data_dir, 'test.jsonl'))
return (train_data, val_data, test_data) |
def structure_loss(pred, mask):
weit = (1 + (5 * torch.abs((F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask))))
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')
wbce = ((weit * wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3)))
pred = torch.sigmoid(pred)
inter = ((pred * mask) * weit).sum(dim=(2, 3))
union = ((pred + mask) * weit).sum(dim=(2, 3))
wiou = (1 - ((inter + 1) / ((union - inter) + 1)))
return (wbce + wiou).mean() |
class UNetModule(nn.Module):
def __init__(self, in_planes, nblock, filter_size, dprob, in_dim, index, max_planes, atrous=0):
super(UNetModule, self).__init__()
self.nblock = nblock
self.in_dim = np.array(in_dim, dtype=float)
self.down = nn.ModuleList([])
self.up = nn.ModuleList([])
self.upsample = None
if (in_planes != max_planes):
self.bn = nn.Sequential(OrderedDict([('bn0', nn.BatchNorm2d(in_planes)), ('relu0', nn.ReLU(inplace=True))]))
self.upsample = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(in_planes, max_planes, kernel_size=1, stride=1, bias=True))]))
for i in range(nblock):
if (i == 0):
in_ = in_planes
else:
in_ = filter_size
self.down.append(UNetConv(in_, filter_size, dprob, (index and (i == 0)), (in_planes == max_planes), ((2 ** i) * atrous)))
if (i > 1):
self.down[(- 1)].conv.weight = self.conv_1.conv.weight
self.down[(- 1)].conv1.weight = self.conv_1.conv1.weight
if (i == (nblock - 1)):
out_ = filter_size
else:
out_ = (2 * filter_size)
self.up.append(UNetDeConv(out_, filter_size, dprob, (index and (i == 0)), max_planes, ((2 ** i) * atrous), output_padding=(1 - int(np.mod(self.in_dim, 2)))))
if ((i > 0) and (i < (nblock - 1))):
self.up[(- 1)].deconv.weight = self.deconv_0.deconv.weight
self.up[(- 1)].deconv1.weight = self.deconv_0.deconv1.weight
self.in_dim = np.ceil((self.in_dim / 2))
def forward(self, x):
xs = []
if (self.upsample is not None):
x = self.bn(x)
xs.append(x)
for (i, down) in enumerate(self.down):
xout = down(xs[(- 1)])
xs.append(xout)
out = xs[(- 1)]
for (i, (x_skip, up)) in reversed(list(enumerate(zip(xs[:(- 1)], self.up)))):
out = up(out)
if i:
out = torch.cat([out, x_skip], 1)
else:
if (self.upsample is not None):
x_skip = self.upsample(x_skip)
out += x_skip
return out |
class Partition0(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]']
TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]']
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.0', 'l_3': 'encoder.1', 'l_4': 'encoder.2', 'l_5': 'encoder.3', 'l_6': 'encoder.4', 'l_7': 'encoder.5', 'l_8': 'encoder.6', 'l_9': 'decoder.embed_tokens', 'l_10': 'decoder.dropout', 'p_0': 'shared_embed_weight'}
self.to(self.device)
def forward(self, *args):
(attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure)
t_0 = input_ids.size()
t_0 = t_0[(- 1)]
t_0 = input_ids.view((- 1), t_0)
t_0 = self.l_0(self.p_0, t_0)
t_0 = self.l_1(t_0)
t_0 = self.l_2(t_0, attention_mask=attention_mask, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_1 = t_0[0]
t_0 = t_0[1]
t_1 = self.l_3(t_1, attention_mask=attention_mask, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_1 = self.l_4(t_1, attention_mask=attention_mask, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_1 = self.l_5(t_1, attention_mask=attention_mask, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_1 = self.l_6(t_1, attention_mask=attention_mask, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_1 = self.l_7(t_1, attention_mask=attention_mask, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_1 = self.l_8(t_1, attention_mask=attention_mask, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_2 = decoder_input_ids.size()
t_2 = t_2[(- 1)]
t_2 = decoder_input_ids.view((- 1), t_2)
t_2 = self.l_9(self.p_0, t_2)
t_2 = self.l_10(t_2)
return list(flatten((t_0, t_1, t_2)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def get_action(action_type, gt_graph, env, reward_config, strict):
action_type_str = (action_type + 'Action')
if (action_type_str in globals()):
action = globals()[action_type_str]
return action(gt_graph, env, reward_config[action_type_str], strict)
else:
raise Exception(('Invalid action_type %s' % action_type_str)) |
def test_subscriptionWithWrongPayload():
url = (brokerIp + '/v2/subscriptions')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data.subscriptionWrongPaylaod), headers=headers)
assert (r.status_code == 500) |
def clean_padding_(tensor, length, len_dim=1, mask_value=0.0):
max_len = tensor.size(len_dim)
mask = length_to_mask((length * max_len), max_len).bool()
mask_unsq = mask[((...,) + ((None,) * (tensor.dim() - 2)))]
mask_t = mask_unsq.transpose(1, len_dim).expand_as(tensor)
tensor[(~ mask_t)] = mask_value |
.skip(reason='This needs actual Atari 2600 environments.')
.parametrize('is_eval', [True])
def test_atari(is_eval: bool) -> None:
env = Atari(gym.make('BreakoutNoFrameskip-v4'), is_eval)
assert (env.observation_space.shape == (1, 84, 84))
(observation, _) = env.reset()
assert (observation.shape == (1, 84, 84))
(observation, _, _, _, _) = env.step(env.action_space.sample())
assert (observation.shape == (1, 84, 84)) |
.gpu
def test_gpu_vec():
sdfg: dace.SDFG = cudahello.to_sdfg()
sdfg.name = 'cuda_grid_gpu_vec'
assert (sdfg.apply_transformations([GPUTransformMap, Vectorization]) == 2)
_test(sdfg)
if (common.get_gpu_backend() == 'cuda'):
assert was_vectorized(sdfg) |
class Wikiextractor(PipelineJob):
def __init__(self, preprocess_jobs: Dict[(str, PipelineJob)], opts):
super().__init__(requires=[f'data/versions/{opts.data_version_name}/downloads/{opts.wiki_lang_version}/'], provides=[f'data/versions/{opts.data_version_name}/wikiextractor_out/{opts.wiki_lang_version}/'], preprocess_jobs=preprocess_jobs, opts=opts)
def _run(self):
self.log('Run WikiExtractor')
for input_file in glob.glob(f'data/versions/{self.opts.data_version_name}/downloads/{self.opts.wiki_lang_version}/*'):
self.log(input_file)
sys.argv = ['', '--json', '--filter_disambig_pages', '--collect_links', '--processes', str(self.opts.wikiextractor_num_workers), input_file, '-o', f'data/versions/{self.opts.data_version_name}/wikiextractor_out/tmp/{os.path.basename(input_file)}']
wiki_extractor_main()
os.rename(f'data/versions/{self.opts.data_version_name}/wikiextractor_out/tmp/', f'data/versions/{self.opts.data_version_name}/wikiextractor_out/{self.opts.wiki_lang_version}/')
self.log('WikiExtractor finished') |
class AudioCapsQADataset(AudioCapsDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add_binary = kwargs.get('add_binary', False)
self.binary_templates = ['do you hear {}?', 'is this {}?', 'does the audio contain {}?']
def __getitem__(self, index):
ann = copy.deepcopy(self.annotation[index])
for modality in self.modalities:
if ((modality == 'audio') and self.cached):
ann[f'{modality}_path'] = getattr(self, f'get_cached_{modality}_path')(ann)
ann['audio'] = torch.load(ann[f'{modality}_path'])
else:
ann[f'{modality}_path'] = getattr(self, f'get_{modality}_path')(ann)
if isinstance(ann[f'{modality}_path'], list):
ann[f'{modality}_path'] = random.choice(ann[f'{modality}_path'])
ann[(modality if ('image' not in modality) else 'image')] = getattr(self, f"{('vis' if ('image' in modality) else modality)}_processor")(ann[f'{modality}_path'])
if (ann['audio'].sum() == 0):
return None
if (self.add_binary and (random.randint(0, 10) < 3)):
yes_answer = (random.randint(0, 10) < 5)
if (not yes_answer):
caption_index = random.choice(list(set(range(len(self.annotation))).difference(set([index]))))
caption = self.annotation[caption_index]['caption']
else:
caption = ann['caption']
question = random.choice(self.binary_templates).format(caption)
answer = ('yes' if yes_answer else 'no')
return {'text_input': self.text_processor(question), 'instance_id': ann['instance_id'], 'text_output': answer, 'answer': answer, 'caption': ann['caption'], 'audio': ann['audio'], 'audio_id': ann['youtube_id'], 'question_id': ann['youtube_id']}
return {'text_input': self.text_processor(ann['question']), 'instance_id': ann['instance_id'], 'text_output': ann['answer'], 'answer': ann['answer'], 'caption': ann['caption'], 'audio': ann['audio'], 'audio_id': ann['youtube_id'], 'question_id': ann['youtube_id']} |
class TrainState(flax.struct.PyTreeNode):
step: int
apply_fn: Callable[(..., Any)] = nonpytree_field()
model_def: Any = nonpytree_field()
params: Params
tx: Optional[optax.GradientTransformation] = nonpytree_field()
opt_state: Optional[optax.OptState] = None
def create(cls, model_def: nn.Module, params: Params, tx: Optional[optax.GradientTransformation]=None, **kwargs) -> 'TrainState':
if (tx is not None):
opt_state = tx.init(params)
else:
opt_state = None
return cls(step=1, apply_fn=model_def.apply, model_def=model_def, params=params, tx=tx, opt_state=opt_state, **kwargs)
def __call__(self, *args, params=None, extra_variables: dict=None, method: ModuleMethod=None, **kwargs):
if (params is None):
params = self.params
variables = {'params': params}
if (extra_variables is not None):
variables = {**variables, **extra_variables}
if isinstance(method, str):
method = getattr(self.model_def, method)
return self.apply_fn(variables, *args, method=method, **kwargs)
def apply_gradients(self, *, grads, **kwargs):
(updates, new_opt_state) = self.tx.update(grads, self.opt_state, self.params)
new_params = optax.apply_updates(self.params, updates)
return self.replace(step=(self.step + 1), params=new_params, opt_state=new_opt_state, **kwargs)
def apply_loss_fn(self, *, loss_fn, pmap_axis=None, has_aux=False):
if has_aux:
(grads, info) = jax.grad(loss_fn, has_aux=has_aux)(self.params)
if (pmap_axis is not None):
grads = jax.lax.pmean(grads, axis_name=pmap_axis)
info = jax.lax.pmean(info, axis_name=pmap_axis)
return (self.apply_gradients(grads=grads), info)
else:
grads = jax.grad(loss_fn, has_aux=has_aux)(self.params)
if (pmap_axis is not None):
grads = jax.lax.pmean(grads, axis_name=pmap_axis)
return self.apply_gradients(grads=grads) |
_utils.test(arch=[ti.vulkan])
def test_devcap():
module = ti.aot.Module(ti.vulkan, caps=[ti.DeviceCapability.spirv_has_float16, ti.DeviceCapability.spirv_has_atomic_float16_minmax])
with tempfile.TemporaryDirectory() as tmpdir:
module.save(tmpdir)
with open((tmpdir + '/metadata.json')) as f:
j = json.load(f)
caps = j['required_caps']
for cap in caps:
key = cap['key']
if (key == 'spirv_version'):
assert (cap['value'] == 66304)
elif (key == 'spirv_has_float16'):
assert (cap['value'] == 1)
elif (key == 'spirv_has_atomic_float16_minmax'):
assert (cap['value'] == 1)
else:
assert False |
class Logger(object):
def __init__(self, log_dir):
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
self.writer.flush()
def image_summary(self, tag, images, step):
img_summaries = []
for (i, img) in enumerate(images):
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format='png')
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1])
img_summaries.append(tf.Summary.Value(tag=('%s/%d' % (tag, i)), image=img_sum))
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
self.writer.flush()
def histo_summary(self, tag, values, step, bins=1000):
(counts, bin_edges) = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum((values ** 2)))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush() |
class TestHyp1f1():
.parametrize('a, b, x', [(np.nan, 1, 1), (1, np.nan, 1), (1, 1, np.nan)])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyp1f1(a, b, x))
def test_poles(self):
assert_equal(sc.hyp1f1(1, [0, (- 1), (- 2), (- 3), (- 4)], 0.5), np.inf)
.parametrize('a, b, x, result', [((- 1), 1, 0.5, 0.5), (1, 1, 0.5, 1.), (2, 1, 0.5, 2.), (1, 2, 0.5, 1.), ((- 10), 1, 0.5, (- 0.))])
def test_special_cases(self, a, b, x, result):
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
.parametrize('a, b, x, result', [(1, 1, 0.44, 1.), ((- 1), 1, 0.44, 0.56), (100, 100, 0.89, 2.), ((- 100), 100, 0.89, 0.), (1.5, 100, 59.99, 3.), ((- 1.5), 100, 59.99, 0.)])
def test_geometric_convergence(self, a, b, x, result):
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
.parametrize('a, b, x, result', [((- 1), 1, 1.5, (- 0.5)), ((- 10), 1, 1.5, 0.), ((- 25), 1, 1.5, 0.), ((- 50), 1, 1.5, (- 0.)), ((- 80), 1, 1.5, (- 0.)), ((- 150), 1, 1.5, (- 0.))])
def test_a_negative_integer(self, a, b, x, result):
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=2e-14)
.parametrize('a, b, x, expected', [(0.01, 150, (- 4), 0.), (1, 5, 0.01, 1.), (50, 100, 0.01, 1.), (1, 0.3, (- 1000.0), (- 0.)), (1, 0.3, (- 10000.0), (- 7.e-05)), (9, 8.5, (- 350), (- 5.e-20)), (9, 8.5, (- 355), (- 4.e-20)), (75, (- 123.5), 15, 3425753.)])
def test_assorted_cases(self, a, b, x, expected):
assert_allclose(sc.hyp1f1(a, b, x), expected, atol=0, rtol=1e-14)
def test_a_neg_int_and_b_equal_x(self):
a = (- 10.0)
b = 2.5
x = 2.5
expected = 0.
computed = sc.hyp1f1(a, b, x)
assert_allclose(computed, expected, atol=0, rtol=1e-13)
.parametrize('a, b, x, desired', [((- 1), (- 2), 2, 2), ((- 1), (- 4), 10, 3.5), ((- 2), (- 2), 1, 2.5)])
def test_gh_11099(self, a, b, x, desired):
assert (sc.hyp1f1(a, b, x) == desired)
.parametrize('a', [(- 3), (- 2)])
def test_x_zero_a_and_b_neg_ints_and_a_ge_b(self, a):
assert (sc.hyp1f1(a, (- 3), 0) == 1)
.parametrize('b', [0, (- 1), (- 5)])
def test_legacy_case1(self, b):
assert_equal(sc.hyp1f1(0, b, [(- 1.5), 0, 1.5]), [np.inf, np.inf, np.inf])
def test_legacy_case2(self):
assert (sc.hyp1f1((- 4), (- 3), 0) == np.inf) |
def get_windows_version(run_lambda):
return run_and_read_all(run_lambda, 'wmic os get Caption | findstr /v Caption') |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('DeviceEnergyModelContainer', import_from_module='ns.energy')
module.add_class('DeviceEnergyModelHelper', allow_subclassing=True, import_from_module='ns.energy')
module.add_class('EnergySourceHelper', allow_subclassing=True, import_from_module='ns.energy')
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
module.add_class('NodeContainer', import_from_module='ns.network')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('Reservation')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('Tap')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double'])
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('UanAddress')
root_module['ns3::UanAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('UanHelper')
module.add_class('UanModesList')
module.add_class('UanPacketArrival')
module.add_class('UanPdp')
module.add_class('UanPhyListener', allow_subclassing=True)
module.add_class('UanTxMode')
module.add_enum('ModulationType', ['PSK', 'QAM', 'FSK', 'OTHER'], outer_class=root_module['ns3::UanTxMode'])
module.add_class('UanTxModeFactory')
module.add_class('Vector2D', import_from_module='ns.core')
module.add_class('Vector3D', import_from_module='ns.core')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('AcousticModemEnergyModelHelper', parent=root_module['ns3::DeviceEnergyModelHelper'])
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('UanHeaderCommon', parent=root_module['ns3::Header'])
module.add_class('UanHeaderRcAck', parent=root_module['ns3::Header'])
module.add_class('UanHeaderRcCts', parent=root_module['ns3::Header'])
module.add_class('UanHeaderRcCtsGlobal', parent=root_module['ns3::Header'])
module.add_class('UanHeaderRcData', parent=root_module['ns3::Header'])
module.add_class('UanHeaderRcRts', parent=root_module['ns3::Header'])
module.add_class('UanMac', parent=root_module['ns3::Object'])
module.add_class('UanMacAloha', parent=root_module['ns3::UanMac'])
module.add_class('UanMacCw', parent=[root_module['ns3::UanMac'], root_module['ns3::UanPhyListener']])
module.add_class('UanMacRc', parent=root_module['ns3::UanMac'])
module.add_enum('', ['TYPE_DATA', 'TYPE_GWPING', 'TYPE_RTS', 'TYPE_CTS', 'TYPE_ACK'], outer_class=root_module['ns3::UanMacRc'])
module.add_class('UanMacRcGw', parent=root_module['ns3::UanMac'])
module.add_class('UanNoiseModel', parent=root_module['ns3::Object'])
module.add_class('UanNoiseModelDefault', parent=root_module['ns3::UanNoiseModel'])
module.add_class('UanPhy', parent=root_module['ns3::Object'])
module.add_enum('State', ['IDLE', 'CCABUSY', 'RX', 'TX', 'SLEEP', 'DISABLED'], outer_class=root_module['ns3::UanPhy'])
module.add_class('UanPhyCalcSinr', parent=root_module['ns3::Object'])
module.add_class('UanPhyCalcSinrDefault', parent=root_module['ns3::UanPhyCalcSinr'])
module.add_class('UanPhyCalcSinrDual', parent=root_module['ns3::UanPhyCalcSinr'])
module.add_class('UanPhyCalcSinrFhFsk', parent=root_module['ns3::UanPhyCalcSinr'])
module.add_class('UanPhyDual', parent=root_module['ns3::UanPhy'])
module.add_class('UanPhyGen', parent=root_module['ns3::UanPhy'])
module.add_class('UanPhyPer', parent=root_module['ns3::Object'])
module.add_class('UanPhyPerCommonModes', parent=root_module['ns3::UanPhyPer'])
module.add_class('UanPhyPerGenDefault', parent=root_module['ns3::UanPhyPer'])
module.add_class('UanPhyPerUmodem', parent=root_module['ns3::UanPhyPer'])
module.add_class('UanPropModel', parent=root_module['ns3::Object'])
module.add_class('UanPropModelIdeal', parent=root_module['ns3::UanPropModel'])
module.add_class('UanPropModelThorp', parent=root_module['ns3::UanPropModel'])
module.add_class('UanTransducer', parent=root_module['ns3::Object'])
module.add_enum('State', ['TX', 'RX'], outer_class=root_module['ns3::UanTransducer'])
module.add_class('UanTransducerHd', parent=root_module['ns3::UanTransducer'])
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DeviceEnergyModel', import_from_module='ns.energy', parent=root_module['ns3::Object'])
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EnergyHarvester', import_from_module='ns.energy', parent=root_module['ns3::Object'])
module.add_class('EnergySource', import_from_module='ns.energy', parent=root_module['ns3::Object'])
module.add_class('EnergySourceContainer', import_from_module='ns.energy', parent=root_module['ns3::Object'])
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('PointerChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('PointerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('UanChannel', parent=root_module['ns3::Channel'])
module.add_class('UanModesListChecker', parent=root_module['ns3::AttributeChecker'])
module.add_class('UanModesListValue', parent=root_module['ns3::AttributeValue'])
module.add_class('UanNetDevice', parent=root_module['ns3::NetDevice'])
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AcousticModemEnergyModel', parent=root_module['ns3::DeviceEnergyModel'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'double', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'double', 'ns3::UanTxMode', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::UanAddress', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::UanTxMode', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Packet>', 'const ns3::UanAddress &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', template_parameters=['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::UanTxMode', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Time', 'ns3::Time', 'unsigned int', 'unsigned int', 'double', 'unsigned int', 'double', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > >', 'std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress >', container_type=u'list')
module.add_container('std::vector< ns3::Tap >', 'ns3::Tap', container_type=u'vector')
module.add_container('std::vector< std::complex< double > >', 'std::complex< double >', container_type=u'vector')
module.add_container('std::vector< double >', 'double', container_type=u'vector')
module.add_container('std::set< unsigned char >', 'unsigned char', container_type=u'set')
module.add_container('std::list< ns3::UanPacketArrival >', 'ns3::UanPacketArrival', container_type=u'list')
module.add_container('std::list< ns3::Ptr< ns3::UanPhy > >', 'ns3::Ptr< ns3::UanPhy >', container_type=u'list')
module.add_container('std::vector< std::pair< ns3::Ptr< ns3::UanNetDevice >, ns3::Ptr< ns3::UanTransducer > > >', 'std::pair< ns3::Ptr< ns3::UanNetDevice >, ns3::Ptr< ns3::UanTransducer > >', container_type=u'vector')
module.add_container('std::list< ns3::Ptr< ns3::UanTransducer > >', 'ns3::Ptr< ns3::UanTransducer >', container_type=u'list')
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module) |
def get_plot(q):
(eps, p) = (1e-08, 0)
(x, y) = ([(q[0] - np.abs((q[0] * 0.2)))], [0])
for i in range(0, len(q)):
x += [(q[i] - eps), q[i]]
y += [p, (p + (1 / len(q)))]
p += (1 / len(q))
x += [(q[i] + eps), (q[i] + np.abs((q[i] * 0.2)))]
y += [1.0, 1.0]
return (x, y) |
def main():
toolkits = []
for f in args.toolkits_paths:
toolkits.extend(read_file(f))
print(f'Loaded {len(toolkits)} toolkits')
existing_tool_names = set([t['toolkit'] for t in toolkits])
os.makedirs(args.dump_dir, exist_ok=True)
base_name = (args.gen_filename + ('_risky' if generator.gen_risky_tool else '_std'))
if (args.output_mode == 'new'):
base_name += f'_{NOW}'
output_file = osp.join(args.dump_dir, f'{base_name}.jsonl')
if (os.path.exists(output_file) and (args.output_mode == 'overwrite')):
os.remove(output_file)
try:
tool_names = list(DataLoader.from_args(args, item_name='toolkit name'))
except Exception as e:
print(e)
tool_names = None
if generator.brainstorm:
if (runner._target_num is None):
raise ValueError('Please specify --target-num when brainstorming new tools')
dataset = None
else:
if (tool_names is None):
raise ValueError('Please specify tool names with --input-path when using fixed tool names for generation')
dataset = [i for i in range(len(tool_names))]
def build_inputs(index: int):
if generator.brainstorm:
toolkit = {}
else:
toolkit = tool_names[index]
inputs = dict(existing_tools=existing_tool_names, toolkit=toolkit, domain_blacklist=None)
return inputs
if (generator._stop_at in ['preprocess', 'prompt']):
result = generator(build_inputs(0))
print_intermediate_result_and_stop(result, generator._stop_at)
def generate_tool_thoughts(index: int):
inputs = build_inputs(index)
try:
return (None, generator(inputs))
except Exception as e:
print(e)
return (index, None)
runner.run(generate_tool_thoughts, output_file, dataset=dataset) |
def sinc_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
m0 = F.not_equal_scalar(x0, 0)
m0 = no_grad(m0)
y0 = outputs[0]
dx0 = ((dy * (F.cos(x0) - (F.sin(x0) / x0))) / x0)
c0 = F.constant(0, x0.shape)
dx0 = F.where(m0, dx0, c0)
return dx0 |
class OUStrategy(ExplorationStrategy):
def __init__(self, env_spec, mu=0, sigma=0.3, theta=0.15, dt=0.01, x0=None):
self._env_spec = env_spec
self._action_space = env_spec.action_space
self._action_dim = self._action_space.flat_dim
self._mu = mu
self._sigma = sigma
self._theta = theta
self._dt = dt
self._x0 = (x0 if (x0 is not None) else (self._mu * np.zeros(self._action_dim)))
self._state = self._x0
def _simulate(self):
x = self._state
dx = (((self._theta * (self._mu - x)) * self._dt) + ((self._sigma * np.sqrt(self._dt)) * np.random.normal(size=len(x))))
self._state = (x + dx)
return self._state
def reset(self):
self._state = self._x0
def get_action(self, t, observation, policy, **kwargs):
del t
del kwargs
(action, agent_infos) = policy.get_action(observation)
ou_state = self._simulate()
return (np.clip((action + ou_state), self._action_space.low, self._action_space.high), agent_infos)
def get_actions(self, t, observations, policy, **kwargs):
del t
del kwargs
(actions, agent_infos) = policy.get_actions(observations)
ou_state = self._simulate()
return (np.clip((actions + ou_state), self._action_space.low, self._action_space.high), agent_infos) |
('clone_repository', 'Clone Repository', '"url": "<repository_url>", "clone_path": "<clone_path>"', (lambda config: (config.github_username and config.github_api_key)), 'Configure github_username and github_api_key.')
_url
def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
split_url = url.split('//')
auth_repo_url = f'//{agent.config.github_username}:{agent.config.github_api_key}'.join(split_url)
try:
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
return f'Cloned {url} to {clone_path}'
except Exception as e:
return f'Error: {str(e)}' |
class DumpUnpickler(pickle._Unpickler):
def __init__(self, file, *, catch_invalid_utf8=False, **kwargs):
super().__init__(file, **kwargs)
self.catch_invalid_utf8 = catch_invalid_utf8
def find_class(self, module, name):
return FakeClass(module, name)
def persistent_load(self, pid):
return FakeObject('pers', 'obj', (pid,))
dispatch = dict(pickle._Unpickler.dispatch)
def load_binunicode(self):
(strlen,) = struct.unpack('<I', self.read(4))
if (strlen > sys.maxsize):
raise Exception('String too long.')
str_bytes = self.read(strlen)
obj: Any
try:
obj = str(str_bytes, 'utf-8', 'surrogatepass')
except UnicodeDecodeError as exn:
if (not self.catch_invalid_utf8):
raise
obj = FakeObject('builtin', 'UnicodeDecodeError', (str(exn),))
self.append(obj)
dispatch[pickle.BINUNICODE[0]] = load_binunicode
def dump(cls, in_stream, out_stream):
value = cls(in_stream).load()
pprint.pprint(value, stream=out_stream)
return value |
class GradientDescentL2():
def __init__(self, problem: L2Problem, variable: TensorList, step_length: float, momentum: float=0.0, debug=False, plotting=False, fig_num=(10, 11)):
self.problem = problem
self.x = variable
self.step_legnth = step_length
self.momentum = momentum
self.debug = (debug or plotting)
self.plotting = plotting
self.fig_num = fig_num
self.losses = torch.zeros(0)
self.gradient_mags = torch.zeros(0)
self.residuals = None
self.clear_temp()
def clear_temp(self):
self.f0 = None
self.dir = None
def run(self, num_iter, dummy=None):
if (num_iter == 0):
return
lossvec = None
if self.debug:
lossvec = torch.zeros((num_iter + 1))
grad_mags = torch.zeros((num_iter + 1))
for i in range(num_iter):
self.x.requires_grad_(True)
self.f0 = self.problem(self.x)
loss = self.problem.ip_output(self.f0, self.f0)
grad = TensorList(torch.autograd.grad(loss, self.x))
if (self.dir is None):
self.dir = grad
else:
self.dir = (grad + (self.momentum * self.dir))
self.x.detach_()
self.x -= (self.step_legnth * self.dir)
if self.debug:
lossvec[i] = loss.item()
grad_mags[i] = sum((grad.view((- 1)) grad.view((- 1)))).sqrt().item()
if self.debug:
self.x.requires_grad_(True)
self.f0 = self.problem(self.x)
loss = self.problem.ip_output(self.f0, self.f0)
grad = TensorList(torch.autograd.grad(loss, self.x))
lossvec[(- 1)] = self.problem.ip_output(self.f0, self.f0).item()
grad_mags[(- 1)] = sum((grad.view((- 1)) grad.view((- 1)))).cpu().sqrt().item()
self.losses = torch.cat((self.losses, lossvec))
self.gradient_mags = torch.cat((self.gradient_mags, grad_mags))
if self.plotting:
plot_graph(self.losses, self.fig_num[0], title='Loss')
plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude')
self.x.detach_()
self.clear_temp() |
def frobenius_expansion_by_series(Q, p, M):
S = SpecialCubicQuotientRing(Q)
(x, _) = S.gens()
base_ring = S.base_ring()
x_to_p_less_1 = (x ** (p - 1))
x_to_p = (x_to_p_less_1 * x)
x_to_p_squared = (x_to_p * x_to_p)
x_to_p_cubed = (x_to_p_squared * x_to_p)
frobQ = ((x_to_p_cubed + (Q[1] * x_to_p)) + (Q[0] * S.one()))
E = (frobQ - S.one().shift(p))
offset = int((((((2 * M) - 3) * p) - 1) / 2))
term = (p * x_to_p_less_1)
F0 = term.shift(((M - 2) * p))
for k in range(1, int((M - 1))):
term = (term * E)
c = base_ring(binomial(QQ(((- 1), 2)), k))
F0 += (term * c).shift((((M - k) - 2) * p))
return (F0, (F0 * x_to_p), offset) |
class EmitTrmmUniversalInstance():
def __init__(self):
self.trmm_template = '\n// Trmm operator ${operation_name}\nusing Operation_${operation_name} = \n typename cutlass::gemm::device::Trmm<\n ${element_a}, ${layout_a},\n ${side_mode}, ${fill_mode}, ${diag_type}, \n ${element_b}, ${layout_b}, \n ${element_c}, ${layout_c},\n ${element_accumulator},\n ${opcode_class},\n ${arch},\n cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,\n cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,\n cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,\n ${epilogue_functor}<\n ${element_c},\n ${epilogue_vector_length},\n ${element_accumulator},\n ${element_epilogue},\n cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling\n >,\n ${swizzling_functor},\n ${stages},\n ${align_a},\n ${align_b},\n ${split_k_serial},\n ${math_operation}\n>;\n'
self.trmm_complex_template = '\n// Trmm operator ${operation_name}\nusing Operation_${operation_name} = \n typename cutlass::gemm::device::Trmm<\n ${element_a}, ${layout_a}, \n ${side_mode}, ${fill_mode}, ${diag_type}, \n ${element_b}, ${layout_b}, \n ${element_c}, ${layout_c},\n ${element_accumulator},\n ${opcode_class},\n ${arch},\n cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,\n cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,\n cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,\n ${epilogue_functor}<\n ${element_c},\n ${epilogue_vector_length},\n ${element_accumulator},\n ${element_epilogue},\n cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling\n >,\n ${swizzling_functor},\n ${stages},\n ${align_a},\n ${align_b},\n ${split_k_serial},\n ${math_operation},\n ${transform_a} \n>;\n'
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [(threadblock_shape[idx] // warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int((min((operation.C.alignment * DataTypeSize[operation.C.element]), 128) / DataTypeSize[operation.C.element]))
values = {'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'side_mode': SideModeTag[operation.A.side_mode], 'fill_mode': FillModeTag[operation.A.fill_mode], 'diag_type': DiagTypeTag[operation.A.diag_type], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': ('cutlass::arch::Sm%d' % operation.arch), 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(1), 'align_b': str(operation.B.alignment), 'split_k_serial': 'false', 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'transform_a': ComplexTransformTag[operation.A.complex_transform]}
trmm_template = (self.trmm_complex_template if operation.is_complex() else self.trmm_template)
return SubstituteTemplate(trmm_template, values) |
def set_cpus(local_rank, world_size):
local_size = min(world_size, 8)
curr_process = psutil.Process()
total_cpus = curr_process.cpu_affinity()
total_cpu_count = len(total_cpus)
if (total_cpu_count > (multiprocessing.cpu_count() / world_size)):
orig_cpus = total_cpus
total_cpus = []
for i in range((total_cpu_count // 2)):
total_cpus.append(orig_cpus[i])
total_cpus.append(orig_cpus[(i + (total_cpu_count // 2))])
ptr = 0
local_cpu_count = 0
local_cpus = []
CORE_GROUPING = min(local_size, (4 if ((total_cpu_count / 2) >= 20) else (2 if ((total_cpu_count / 2) >= 10) else 1)))
CORE_GROUPING = 1
core_dist_size = max((local_size // CORE_GROUPING), 1)
core_dist_rank = (local_rank // CORE_GROUPING)
for r in range((core_dist_rank + 1)):
ptr += local_cpu_count
local_cpu_count = ((total_cpu_count // core_dist_size) + (1 if (r < (total_cpu_count % core_dist_size)) else 0))
local_cpus += total_cpus[ptr:(ptr + local_cpu_count)]
pop_inds = [(((local_rank + offset) + 1) % CORE_GROUPING) for offset in range((CORE_GROUPING - 1))]
for ind in sorted(pop_inds, reverse=True):
local_cpus.pop(ind)
if (BPS_BENCHMARK and (world_size == 1)):
local_cpus = total_cpus[0:12]
curr_process.cpu_affinity(local_cpus)
logger.info('Rank {} uses cpus {}'.format(local_rank, sorted(curr_process.cpu_affinity()))) |
_ARCH_REGISTRY.register()
class PanopticFPN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.instance_loss_weight = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT
self.combine_on = cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED
self.combine_overlap_threshold = cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH
self.combine_stuff_area_limit = cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT
self.combine_instances_confidence_threshold = cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape())
self.register_buffer('pixel_mean', torch.Tensor(cfg.MODEL.PIXEL_MEAN).view((- 1), 1, 1))
self.register_buffer('pixel_std', torch.Tensor(cfg.MODEL.PIXEL_STD).view((- 1), 1, 1))
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
images = [x['image'].to(self.device) for x in batched_inputs]
images = [((x - self.pixel_mean) / self.pixel_std) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if ('proposals' in batched_inputs[0]):
proposals = [x['proposals'].to(self.device) for x in batched_inputs]
proposal_losses = {}
if ('sem_seg' in batched_inputs[0]):
gt_sem_seg = [x['sem_seg'].to(self.device) for x in batched_inputs]
gt_sem_seg = ImageList.from_tensors(gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value).tensor
else:
gt_sem_seg = None
(sem_seg_results, sem_seg_losses) = self.sem_seg_head(features, gt_sem_seg)
if ('instances' in batched_inputs[0]):
gt_instances = [x['instances'].to(self.device) for x in batched_inputs]
else:
gt_instances = None
if self.proposal_generator:
(proposals, proposal_losses) = self.proposal_generator(images, features, gt_instances)
(detector_results, detector_losses) = self.roi_heads(images, features, proposals, gt_instances)
if self.training:
losses = {}
losses.update(sem_seg_losses)
losses.update({k: (v * self.instance_loss_weight) for (k, v) in detector_losses.items()})
losses.update(proposal_losses)
return losses
processed_results = []
for (sem_seg_result, detector_result, input_per_image, image_size) in zip(sem_seg_results, detector_results, batched_inputs, images.image_sizes):
height = input_per_image.get('height', image_size[0])
width = input_per_image.get('width', image_size[1])
sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width)
detector_r = detector_postprocess(detector_result, height, width)
processed_results.append({'sem_seg': sem_seg_r, 'instances': detector_r})
if self.combine_on:
panoptic_r = combine_semantic_and_instance_outputs(detector_r, sem_seg_r.argmax(dim=0), self.combine_overlap_threshold, self.combine_stuff_area_limit, self.combine_instances_confidence_threshold)
processed_results[(- 1)]['panoptic_seg'] = panoptic_r
return processed_results |
def _first_line_re():
if isinstance(first_line_re.pattern, str):
return first_line_re
return re.compile(first_line_re.pattern.decode()) |
class CocoClipDatasetMapper():
def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, sampling_frame_num: int=2):
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.sampling_frame_num = sampling_frame_num
logger = logging.getLogger(__name__)
mode = ('training' if is_train else 'inference')
logger.info(f'[DatasetMapper] Augmentations used in {mode}: {augmentations}')
def from_config(cls, cfg, is_train: bool=True):
augs = build_augmentation(cfg, is_train)
sampling_frame_num = cfg.INPUT.SAMPLING_FRAME_NUM
ret = {'is_train': is_train, 'augmentations': augs, 'image_format': cfg.INPUT.FORMAT, 'use_instance_mask': cfg.MODEL.MASK_ON, 'sampling_frame_num': sampling_frame_num}
return ret
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
img_annos = dataset_dict.pop('annotations', None)
file_name = dataset_dict.pop('file_name', None)
original_image = utils.read_image(file_name, format=self.image_format)
dataset_dict['image'] = []
dataset_dict['instances'] = []
dataset_dict['file_names'] = ([file_name] * self.sampling_frame_num)
for _ in range(self.sampling_frame_num):
utils.check_image_size(dataset_dict, original_image)
aug_input = T.AugInput(original_image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2]
dataset_dict['image'].append(torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))))
if ((img_annos is None) or (not self.is_train)):
continue
_img_annos = []
for anno in img_annos:
_anno = {}
for (k, v) in anno.items():
_anno[k] = copy.deepcopy(v)
_img_annos.append(_anno)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape) for obj in _img_annos if (obj.get('iscrowd', 0) == 0)]
_gt_ids = list(range(len(annos)))
for idx in range(len(annos)):
if (len(annos[idx]['segmentation']) == 0):
annos[idx]['segmentation'] = [np.array(([0.0] * 6))]
instances = utils.annotations_to_instances(annos, image_shape, mask_format='bitmask')
instances.gt_ids = torch.tensor(_gt_ids)
if instances.has('gt_masks'):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
instances = filter_empty_instances(instances)
else:
instances.gt_masks = BitMasks(torch.empty((0, *image_shape)))
dataset_dict['instances'].append(instances)
return dataset_dict |
def get_comparison_dtype(a, b):
a_dtype = (torch.float32 if (a.dtype is torch.bfloat16) else a.dtype)
b_dtype = (torch.float32 if (b.dtype is torch.bfloat16) else b.dtype)
compare_dtype = torch.promote_types(a_dtype, b_dtype)
if ((compare_dtype is torch.float16) and ((a.device != b.device) or (a.device.type != 'cuda') or (b.device.type != 'cuda'))):
compare_dtype = torch.float32
return compare_dtype |
def expid2model(expr_dir):
from configer import Configer
if (not os.path.exists(expr_dir)):
raise ValueError(('Could not find the experiment directory: %s' % expr_dir))
best_model_fname = sorted(glob.glob(os.path.join(expr_dir, 'snapshots', '*.pt')), key=os.path.getmtime)[(- 1)]
try_num = os.path.basename(best_model_fname).split('_')[0]
print(('Found Trained Model: %s' % best_model_fname))
default_ps_fname = glob.glob(os.path.join(expr_dir, '*.ini'))[0]
if (not os.path.exists(default_ps_fname)):
raise ValueError(('Could not find the appropriate vposer_settings: %s' % default_ps_fname))
ps = Configer(default_ps_fname=default_ps_fname, work_dir=expr_dir, best_model_fname=best_model_fname)
return (ps, best_model_fname) |
def dict_to_str(d: dict, grab: Optional[bool]=None) -> str:
if grab:
return '\n'.join([f'''{k}: {getattr(v, 'shape', None)} {getattr(v, 'dtype', None)}
{grab_tensor(v)}''' for (k, v) in d.items()])
return '\n'.join([f'{k}: {v}' for (k, v) in d.items()]) |
class TFAutoModelForMultipleChoice():
def __init__(self):
raise EnvironmentError('TFAutoModelForMultipleChoice is designed to be instantiated using the `TFAutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForMultipleChoice.from_config(config)` methods.')
_list_option_in_docstrings(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()):
return TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()))))
_list_option_in_docstrings(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING)
_start_docstrings('Instantiate one of the model classes of the library---with a multiple choice classification head---from a pretrained model.', TF_AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()):
return TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys())))) |
def decision_list(n_leaves):
def _list(leaves):
if (len(leaves) == 2):
return (leaves[0], leaves[1])
else:
return (leaves[0], _list(leaves[1:]))
return _list(np.arange(n_leaves)) |
class PositionalEncoding(nn.Module):
def __init__(self, dim, max_pos=512):
super().__init__()
pos = torch.arange(max_pos)
freq = (torch.arange((dim // 2)) / dim)
freq = (freq * torch.tensor(10000).log()).exp()
x = (rearrange(pos, 'L -> L 1') / freq)
x = rearrange(x, 'L d -> L d 1')
pe = torch.cat((x.sin(), x.cos()), dim=(- 1))
self.pe = rearrange(pe, 'L d sc -> L (d sc)')
self.dummy = nn.Parameter(torch.rand(1))
def forward(self, length):
enc = self.pe[:length]
enc = enc.to(self.dummy.device)
return enc |
def create_collaborator(col, workspace_root, data_path, archive_name, fed_workspace):
col_path = (workspace_root / col)
shutil.rmtree(col_path, ignore_errors=True)
col_path.mkdir()
check_call(['fx', 'workspace', 'import', '--archive', (workspace_root / archive_name)], cwd=col_path)
check_call(['fx', 'collaborator', 'create', '-d', data_path, '-n', col, '--silent'], cwd=(col_path / fed_workspace))
check_call(['fx', 'collaborator', 'generate-cert-request', '-n', col, '--silent'], cwd=(col_path / fed_workspace))
request_pkg = ((col_path / fed_workspace) / f'col_{col}_to_agg_cert_request.zip')
check_call(['fx', 'collaborator', 'certify', '--request-pkg', str(request_pkg), '--silent'], cwd=workspace_root)
import_path = (workspace_root / f'agg_to_col_{col}_signed_cert.zip')
check_call(['fx', 'collaborator', 'certify', '--import', import_path], cwd=(col_path / fed_workspace)) |
class AdminLanguage():
def __init__(self):
self.explicit_removal = ['Admission Date', 'Discharge Date', 'Date of Birth', 'Phone', 'Date/Time', 'ID', 'Completed by', 'Dictated By', 'Attending', 'Provider: ', 'Provider', 'Primary', 'Secondary', ' MD Phone', ' M.D. Phone', ' MD', ' PHD', ' X', ' IV', ' VI', ' III', ' II', ' VIII', 'JOB#', 'JOB#: cc', '# Code', 'Metoprolol Tartrate 25 mg Tablet Sig', ')', '000 unit/mL Suspension Sig', '0.5 % Drops ', ' Status: Inpatient DOB', 'Levothyroxine 50 mcg Tablet Sig', '0.5 % Drops Sig', 'Lidocaine 5 %(700 mg/patch) Adhesive Patch', 'Clopidogrel Bisulfate 75 mg Tablet Sig', 'Levofloxacin 500 mg Tablet Sig', 'Albuterol 90 mcg/Actuation Aerosol ', 'None Tech Quality: Adequate Tape #', '000 unit/mL Solution Sig', ' x', ' am', ' pm'] |
class DataStore(object):
def __init__(self, ui):
self._files = {}
self._run_ids = {}
self._bench_cfgs = {}
self.ui = ui
def load_data(self, runs, discard_run_data):
for persistence in list(self._files.values()):
persistence.load_data(runs, discard_run_data)
def get(self, filename, configurator, action):
if (filename not in self._files):
source = determine_source_details(configurator)
if (configurator.use_rebench_db and (source['commitId'] is None)):
raise UIError(((('Reporting to ReBenchDB is enabled, ' + 'but failed to obtain source details. ') + 'If ReBench is run outside of the relevant repo ') + 'set the path with --git-repo'), None)
if (configurator.use_rebench_db and ('repo_url' in configurator.rebench_db)):
source['repoURL'] = configurator.rebench_db['repo_url']
if (configurator.options and configurator.options.branch):
source['branchOrTag'] = configurator.options.branch
if (action == 'profile'):
p = _ProfileFilePersistence(filename, self, configurator, self.ui)
else:
p = _FilePersistence(filename, self, configurator, self.ui)
self.ui.debug_output_info('ReBenchDB enabled: {e}\n', e=configurator.use_rebench_db)
if configurator.use_rebench_db:
if (action == 'profile'):
db = _ProfileReBenchDB(configurator, self, self.ui)
else:
db = _ReBenchDB(configurator, self, self.ui)
p = _CompositePersistence(p, db)
self._files[filename] = p
return self._files[filename]
def create_run_id(self, benchmark, cores, input_size, var_value, machine):
if (isinstance(cores, str) and cores.isdigit()):
cores = int(cores)
if (input_size == ''):
input_size = None
if (var_value == ''):
var_value = None
run = RunId(benchmark, cores, input_size, var_value, machine)
if (run in self._run_ids):
return self._run_ids[run]
else:
self._run_ids[run] = run
return run
def get_config(self, name, executor_name, suite_name, extra_args):
key = (name, executor_name, suite_name, ('' if (extra_args is None) else str(extra_args)))
if (key not in self._bench_cfgs):
raise ValueError(('Requested configuration is not available: ' + str(key)))
return self._bench_cfgs[key]
def register_config(self, cfg):
key = tuple(cfg.as_str_list())
if (key in self._bench_cfgs):
raise ValueError((('Two identical BenchmarkConfig tried to ' + 'register. This seems to be wrong: ') + str(key)))
self._bench_cfgs[key] = cfg
return cfg |
def parseArgs():
args = TestOptions().parse()
args.output_channels = OUTPUT_CHANNELS
args.img_width = IMG_WIDTH
args.img_height = IMG_HEIGHT
return args |
class LitePose(nn.Module):
def __init__(self, dictionary=None, model_cfg=None):
super().__init__()
self.dictionary = dictionary
self.model_cfg = model_cfg
self.input_size = [1024, 2048]
self.dummy_input = torch.zeros(1, 3, self.input_size[0], self.input_size[1])
self.num_classes = len(self.dictionary)
self.category = [v for d in self.dictionary for v in d.keys()]
self.weight = [d[v] for d in self.dictionary for v in d.keys() if (v in self.category)]
self.setup_extra_params()
self.backbone = build_backbone(self.model_cfg.BACKBONE)
self.head = build_head(self.model_cfg.HEAD)
self.criterion = CrossEntropyLoss2d(weight=torch.from_numpy(np.array(self.weight)).float()).cuda()
set_bn_momentum(self.backbone, momentum=0.01)
def setup_extra_params(self):
self.model_cfg.HEAD.__setitem__('num_classes', self.num_classes)
def _init_weight(self, *stages):
for m in chain(*stages):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, imgs, targets=None, mode='infer', **kwargs):
(batch_size, ch, _, _) = imgs.shape
(low_level_feat, x) = self.backbone(imgs)
x = self.head(x, low_level_feat)
outputs = F.interpolate(x, size=imgs.size()[2:], mode='bilinear', align_corners=False)
if (mode == 'infer'):
return torch.argmax(outputs, dim=1)
else:
losses = {}
losses['ce_loss'] = self.criterion(outputs, targets)
losses['loss'] = losses['ce_loss']
if (mode == 'val'):
return (losses, torch.argmax(outputs, dim=1))
else:
return losses |
def searchForAnswer(answer, table, passages, mapping_entity):
(results, matched_cells) = ([], [])
loop_through_table(answer, table, results, matched_cells)
for (k, v) in passages.items():
if (k in mapping_entity):
if (((' ' + answer.lower()) + ' ') in ((' ' + v.lower()) + ' ')):
for loc in mapping_entity[k]:
if (loc not in matched_cells):
results.append((k.replace('/wiki/', '').replace('_', ' '), loc, k, 'passage'))
else:
continue
return results |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('prob', [0.7, 1.0])
.parametrize('area_ratios', [(0.02, 0.04)])
.parametrize('aspect_ratios', [(0.3, 3.3333)])
.parametrize('replacements', [(2.0, 2.0), (3.0, 4.0)])
.parametrize('n', [1, 3])
.parametrize('share', [True, False])
.parametrize('inplace', [False])
.parametrize('base_axis', [1, (- 3)])
.parametrize('func_seed', [412, (- 1)])
.parametrize('channel_last', [False, True])
.parametrize('ste_fine_grained', [True, False])
def test_random_erase_backward(ctx, func_name, seed, prob, area_ratios, aspect_ratios, replacements, n, share, inplace, base_axis, func_seed, channel_last, ste_fine_grained):
if (channel_last and (func_name == 'RandomErase')):
pytest.skip('RandomErase with channel_last is only supported in CUDA.')
lb = replacements[0]
rng = np.random.RandomState(seed)
(b, c, h, w) = (4, 3, 32, 32)
ishape = [b, c, h, w]
x = nn.Variable.from_numpy_array((rng.rand(*ishape) + 1.0)).apply(need_grad=True)
xg = rng.randn(*x.shape)
x.g = xg
with nn.context_scope(ctx):
y = F.random_erase(x, prob=prob, area_ratios=area_ratios, aspect_ratios=aspect_ratios, replacements=replacements, n=n, share=share, inplace=inplace, base_axis=base_axis, seed=func_seed, channel_last=channel_last, ste_fine_grained=ste_fine_grained)
y.forward()
y.backward(clear_buffer=True)
if ste_fine_grained:
xg[np.where((y.d < lb))] += 1.0
assert_allclose(x.g, xg)
else:
assert_allclose(x.g, (xg + 1.0))
x = nn.Variable.from_numpy_array((rng.rand(*ishape) + 1.0)).apply(need_grad=True)
y = F.identity(x)
with nn.context_scope(ctx):
z = F.random_erase(y, prob=prob, area_ratios=area_ratios, aspect_ratios=aspect_ratios, replacements=replacements, n=n, share=share, inplace=inplace, base_axis=base_axis, seed=func_seed, channel_last=channel_last, ste_fine_grained=ste_fine_grained)
z.forward()
z.backward(clear_buffer=False)
if ste_fine_grained:
assert_allclose(y.g[np.where((z.d >= lb))], 0.0)
assert_allclose(y.g[np.where((z.d < lb))], 1.0)
else:
assert_allclose(y.g, 1.0) |
def dataio_prepare(hparams, tokenizer):
data_folder = hparams['data_folder']
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['train_csv'], replacements={'data_root': data_folder})
if (hparams['sorting'] == 'ascending'):
train_data = train_data.filtered_sorted(sort_key='duration', key_max_value={'duration': hparams['avoid_if_longer_than']})
hparams['train_dataloader_opts']['shuffle'] = False
elif (hparams['sorting'] == 'descending'):
train_data = train_data.filtered_sorted(sort_key='duration', reverse=True, key_max_value={'duration': hparams['avoid_if_longer_than']})
hparams['train_dataloader_opts']['shuffle'] = False
elif (hparams['sorting'] == 'random'):
pass
else:
raise NotImplementedError('sorting must be random, ascending or descending')
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['valid_csv'], replacements={'data_root': data_folder})
valid_data = valid_data.filtered_sorted(sort_key='duration')
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['test_csv'], replacements={'data_root': data_folder})
test_data = test_data.filtered_sorted(sort_key='duration')
datasets = [train_data, valid_data, test_data]
.data_pipeline.takes('wav')
.data_pipeline.provides('sig')
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(info.sample_rate, hparams['sample_rate'])(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
.data_pipeline.takes('wrd')
.data_pipeline.provides('wrd', 'tokens_list', 'tokens_bos', 'tokens_eos', 'tokens')
def text_pipeline(wrd):
(yield wrd)
tokens_list = tokenizer.sp.encode_as_ids(wrd)
(yield tokens_list)
tokens_bos = torch.LongTensor(([hparams['bos_index']] + tokens_list))
(yield tokens_bos)
tokens_eos = torch.LongTensor((tokens_list + [hparams['eos_index']]))
(yield tokens_eos)
tokens = torch.LongTensor(tokens_list)
(yield tokens)
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
sb.dataio.dataset.set_output_keys(datasets, ['id', 'sig', 'tokens_bos', 'tokens_eos', 'tokens'])
return (train_data, valid_data, test_data) |
def put_acquire_arg_buffer(entry, code, pos):
buffer_aux = entry.buffer_aux
getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
code.putln('{')
code.putln(('__Pyx_BufFmt_StackElem __pyx_stack[%d];' % entry.type.dtype.struct_nesting_depth()))
code.putln(code.error_goto_if(('%s == -1' % getbuffer), pos))
code.putln('}')
put_unpack_buffer_aux_into_scope(entry, code) |
def uncertainty_sampling(model_instance, pool, size):
active_eval_loader = get_tr_set(train_examples=pool, batch_size=1, args=args)
(raw_prediction, turncate_list) = active_eval(active_eval_loader, model_instance)
word_prob = np.max(raw_prediction, axis=2)
sentence_uncertainty = []
for (i, sentence) in enumerate(word_prob):
sentence_uncertainty.append(np.sum((1 - sentence[:turncate_list[i]])))
query_index = multi_argmax(np.array(sentence_uncertainty), size)
return (query_index, pool[query_index]) |
class ModelEMA():
def __init__(self, model, decay=0.9999, updates=0):
self.ema = deepcopy((model.module if is_parallel(model) else model)).eval()
self.updates = updates
self.decay = (lambda x: (decay * (1 - math.exp(((- x) / 2000)))))
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = (model.module.state_dict() if is_parallel(model) else model.state_dict())
for (k, v) in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += ((1.0 - d) * msd[k].detach())
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
copy_attr(self.ema, model, include, exclude) |
def mean(aList):
theSum = 0
count = 0
for x in aList:
theSum += x
count += 1
return (0 if (count == 0) else (theSum / count)) |
class CategoricalCrossEntropy(Layer):
def __init__(self, from_logits=False, **kwargs):
self._from_logits = from_logits
super().__init__(**kwargs)
def call(self, x):
return K.categorical_crossentropy(x[1], x[0], from_logits=self._from_logits) |
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__('Pre-compute embeddings for flashlight datasets')
kwargs = {'action': 'store', 'type': str, 'required': True}
self.add_argument('--input', '-i', help='Input Directory', **kwargs)
self.add_argument('--output', '-o', help='Output Directory', **kwargs)
self.add_argument('--model', help='Path to model checkpoint', **kwargs)
self.add_argument('--split', help='Dataset Splits', nargs='+', **kwargs)
self.add_argument('--ext', default='wav', required=False, help='Audio file extension')
self.add_argument('--no-copy-labels', action='store_true', help='Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.')
self.add_argument('--use-feat', action='store_true', help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument('--gpu', help='GPU to use', default=0, type=int) |
def analyze_predictions(model, dataset, class_to_idx, pad_idx, device, args, out_file=None, visualize_output=True, tokenizer=None):
references = dataset.references
hardness = references.stimulus_id.apply((lambda x: decode_stimulus_string(x)[2]))
view_dep_mask = is_explicitly_view_dependent(references)
easy_context_mask = (hardness <= 2)
test_seeds = [args.random_seed, 1, 10, 20, 100]
net_stats_all_seed = []
for seed in test_seeds:
d_loader = dataset_to_dataloader(dataset, 'test', args.batch_size, n_workers=5, seed=seed)
assert (d_loader.dataset.references is references)
net_stats = detailed_predictions_on_dataset(model, d_loader, args=args, device=device, FOR_VISUALIZATION=True, tokenizer=tokenizer)
net_stats_all_seed.append(net_stats)
if visualize_output:
from referit3d.utils import pickle_data
pickle_data((out_file[:(- 4)] + 'all_vis.pkl'), net_stats_all_seed)
all_accuracy = []
view_dep_acc = []
view_indep_acc = []
easy_acc = []
hard_acc = []
among_true_acc = []
for stats in net_stats_all_seed:
got_it_right = stats['guessed_correctly']
all_accuracy.append((got_it_right.mean() * 100))
view_dep_acc.append((got_it_right[view_dep_mask].mean() * 100))
view_indep_acc.append((got_it_right[(~ view_dep_mask)].mean() * 100))
easy_acc.append((got_it_right[easy_context_mask].mean() * 100))
hard_acc.append((got_it_right[(~ easy_context_mask)].mean() * 100))
got_it_right = stats['guessed_correctly_among_true_class']
among_true_acc.append((got_it_right.mean() * 100))
acc_df = pd.DataFrame({'hard': hard_acc, 'easy': easy_acc, 'v-dep': view_dep_acc, 'v-indep': view_indep_acc, 'all': all_accuracy, 'among-true': among_true_acc})
acc_df.to_csv((out_file[:(- 4)] + '.csv'), index=False)
pd.options.display.float_format = '{:,.1f}'.format
descriptive = acc_df.describe().loc[['mean', 'std']].T
if (out_file is not None):
with open(out_file, 'w') as f_out:
f_out.write(descriptive.to_latex())
return descriptive |
class RandomCrop_city_gnet(object):
def __init__(self, size, padding=0):
self.size = tuple(size)
self.padding = padding
def __call__(self, img, mask):
if (self.padding > 0):
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert (img.size == mask.size)
(w, h) = img.size
(th, tw) = self.size
img = img.resize((600, 300), Image.BILINEAR)
mask = mask.resize((600, 300), Image.NEAREST)
x1 = random.randint(0, (600 - tw))
y1 = random.randint(0, (300 - th))
return (img.crop((x1, y1, (x1 + tw), (y1 + th))), mask.crop((x1, y1, (x1 + tw), (y1 + th)))) |
def compute_score(hist, correct, labeled):
iu = (np.diag(hist) / ((hist.sum(1) + hist.sum(0)) - np.diag(hist)))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = (hist.sum(1) / hist.sum())
freq_IU = (iu[(freq > 0)] * freq[(freq > 0)]).sum()
mean_pixel_acc = (correct / labeled)
return (iu, mean_IU, mean_IU_no_back, mean_pixel_acc) |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_lsq', parent_package, top_path)
config.add_extension('givens_elimination', sources=['givens_elimination.c'])
return config |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.