code stringlengths 101 5.91M |
|---|
class PropDualVars():
def __init__(self, lambdas, mus):
self.lambdas = lambdas
self.mus = mus
def get_duals_from(weights, additional_coeffs, lower_bounds, upper_bounds, init_type='crown', alphas=None):
(mus, mu, lay_idx) = handle_propagation_add_coeff(weights, additional_coeffs, lower_bounds)
do_kw = ((init_type == 'KW') and (alphas is None))
do_crown = ((init_type == 'crown') and (alphas is None))
lbdas = []
while (lay_idx > 0):
lay = weights[lay_idx]
lbda = lay.backward(mu)
lbdas.append(lbda)
lbs = lower_bounds[lay_idx].unsqueeze(1)
ubs = upper_bounds[lay_idx].unsqueeze(1)
ub_slope = (ubs / (ubs - lbs))
ub_slope.masked_fill_((lbs >= 0), 1)
ub_slope.masked_fill_((ubs <= 0), 0)
if do_crown:
lb_slope = (ubs >= torch.abs(lbs)).type(lbs.dtype)
lb_slope.masked_fill_((lbs >= 0), 1)
lb_slope.masked_fill_((ubs <= 0), 0)
elif (alphas is None):
lb_slope = ub_slope
else:
lb_slope = torch.where(((lbs < 0) & (ubs > 0)), alphas[(lay_idx - 1)], ub_slope)
if (not do_kw):
mu = (torch.where((lbda >= 0), ub_slope, lb_slope) * lbda)
else:
mu = (lbda * ub_slope)
mus.append(mu)
lay_idx -= 1
mus.reverse()
lbdas.reverse()
return PropDualVars(lbdas, mus) |
def convert_func_to_numpy(func, shape, device, dtype):
def np_func(t, y):
t = torch.tensor(t).to(device, dtype)
y = torch.reshape(torch.tensor(y).to(device, dtype), shape)
with torch.no_grad():
f = func(t, y)
return f.detach().cpu().numpy().reshape((- 1))
return np_func |
class TestDocsLinks(unittest.TestCase):
def check_link(_url):
try:
response = requests.get(_url)
if (response.status_code == 200):
return True
except Exception as e:
print(f"Error checking link '{_url}': {e}")
return False
def test_readme_and_rst_files(self):
mct_folder = getcwd()
print('MCT folder:', mct_folder)
for (filepath, _, filenames) in walk(mct_folder):
for filename in filenames:
if (filename.endswith('.md') or filename.endswith('.ipynb')):
with open(join(filepath, filename), 'r') as fh:
lines = fh.readlines()
for (i, l) in enumerate(lines):
_strs = re.findall('\\[.[^]]*\\]\\(.[^)]*\\)', l)
for link_str in _strs:
_link = link_str.split(']')[(- 1)][1:(- 1)]
_link = _link.replace('://colab.research.google.com/github/', '://github.com/')
if (_link[0] == '#'):
pass
elif ((' in _link) or (' in _link)):
self.assertTrue(self.check_link(_link), msg=f'Broken link: {_link} in {join(filepath, filename)}')
print('Link ok:', _link)
else:
_link = _link.split('#')[0]
self.assertTrue((isdir(join(filepath, _link)) or isfile(join(filepath, _link))), msg=f'Broken link: {_link} in {join(filepath, filename)}')
print('Link ok:', _link)
elif filename.endswith('.rst'):
with open(join(filepath, filename), 'r') as fh:
lines = fh.readlines()
for (i, l) in enumerate(lines):
_strs = re.findall('<([^<>]+)>', l)
for _link in _strs:
if _link.startswith('ug-'):
pass
elif ((' in _link) or (' in _link)):
self.assertTrue(self.check_link(_link), msg=f'Broken link: {_link} in {join(filepath, filename)}')
print('Link ok:', _link)
else:
self.assertTrue(isfile(join(filepath, (_link.replace('../', '') + '.rst'))), msg=f'Broken link: {_link} in {join(filepath, filename)}')
print('Link ok:', _link) |
class ExperimentManager(object):
def __init__(self, experiment_dir, model=None, optimizer=None):
self.logger = logging.getLogger(type(self).__name__)
self.experiment_dir = experiment_dir
self.model = model
self.optimizer = optimizer
self.model_dir = os.path.join(self.experiment_dir, 'state', 'model')
self.optim_dir = os.path.join(self.experiment_dir, 'state', 'optimizer')
self.log_dir = os.path.join(self.experiment_dir, 'log')
self.dirs = (self.experiment_dir, self.model_dir, self.log_dir, self.optim_dir)
def make_dirs(self):
for d in self.dirs:
if (not os.path.exists(d)):
os.makedirs(d)
assert self.all_dirs_exists()
def delete_dirs(self):
for d in self.dirs:
if os.path.exists(d):
shutil.rmtree(d)
assert (not self.any_dir_exists())
def any_dir_exists(self):
return any([os.path.exists(d) for d in self.dirs])
def all_dirs_exists(self):
return all([os.path.exists(d) for d in self.dirs])
def save_model_state(self, epoch):
model_fname = os.path.join(self.model_dir, '{}.pt'.format(epoch))
self.logger.info('Saving model state to: {}'.format(model_fname))
torch.save(self.model.state_dict(), model_fname)
def load_model_state(self, epoch):
model_fname = os.path.join(self.model_dir, '{}.pt'.format(epoch))
self.logger.info('Loading model state from: {}'.format(model_fname))
self.model.load_state_dict(torch.load(model_fname))
def save_optimizer_state(self, epoch):
optim_fname = os.path.join(self.optim_dir, '{}.pt'.format(epoch))
self.logger.info('Saving optimizer state to: {}'.format(optim_fname))
torch.save(self.optimizer.state_dict(), optim_fname)
def load_optimizer_state(self, epoch):
optim_fname = os.path.join(self.optim_dir, '{}.pt'.format(epoch))
self.logger.info('Loading optimizer state from {}'.format(optim_fname))
self.optimizer.load_state_dict(torch.load(optim_fname))
def save_train_state(self, epoch):
self.save_model_state(epoch)
self.save_optimizer_state(epoch)
def load_train_state(self, epoch):
self.load_model_state(epoch)
self.load_optimizer_state(epoch)
def get_last_model_iteration(self):
return np.array(([0] + [int(os.path.basename(e).split('.')[0]) for e in glob.glob(os.path.join(self.model_dir, '*.pt'))])).max()
def load_last_train_state(self):
self.load_train_state(self.get_last_model_iteration()) |
def calculate_iou_simple(pred_arr1, pred_arr2):
diff = (pred_arr1.shape[0] - (pred_arr1 - pred_arr2).count_nonzero())
iou = (diff / pred_arr1.shape[0])
return iou.cpu() |
def visualfrontend_checker():
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = VisualFrontend().to(device)
model.to(device)
model.eval()
(T, N, C, H, W) = (10, args['BATCH_SIZE'], 1, args['ROI_SIZE'], args['ROI_SIZE'])
inputBatch = torch.rand(T, N, C, H, W).to(device)
with torch.no_grad():
outputBatch = model(inputBatch)
print(outputBatch.shape)
return |
class TestGammaincc(object):
.parametrize('a, x', INVALID_POINTS)
def test_domain(self, a, x):
assert np.isnan(sc.gammaincc(a, x))
def test_a_eq_0_x_gt_0(self):
assert (sc.gammaincc(0, 1) == 0)
.parametrize('a, x, desired', [(np.inf, 1, 1), (np.inf, 0, 1), (np.inf, np.inf, np.nan), (1, np.inf, 0)])
def test_infinite_arguments(self, a, x, desired):
result = sc.gammaincc(a, x)
if np.isnan(desired):
assert np.isnan(result)
else:
assert (result == desired)
def test_infinite_limits(self):
assert (sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100))
assert_allclose(sc.gammaincc(100, 1000), sc.gammaincc(100, np.inf), atol=1e-200, rtol=0)
def test_limit_check(self):
result = sc.gammaincc(1e-10, 1)
limit = sc.gammaincc(0, 1)
assert np.isclose(result, limit)
def test_x_zero(self):
a = np.arange(1, 10)
assert_array_equal(sc.gammaincc(a, 0), 1)
def test_roundtrip(self):
a = np.logspace((- 5), 10, 100)
x = np.logspace((- 5), 10, 100)
y = sc.gammainccinv(a, sc.gammaincc(a, x))
assert_allclose(x, y, rtol=1e-14) |
class Net():
def __init__(self, config, mode):
self.config = config
self.mode = mode
self.w_init = tf.keras.initializers.RandomNormal()
self.b_init = tf.keras.initializers.RandomUniform(*self.config.net.b_init)
self.build_net()
def build_net(self):
num_items = self.config.num_items
num_hidden_units = self.config.net.num_hidden_units
wd = (None if ('wd' not in self.config.train) else self.config.train.wd)
with tf.variable_scope('utility'):
self.alpha = create_var('alpha', [num_items, num_hidden_units], initializer=self.w_init, wd=wd)
self.bias = create_var('bias', [num_hidden_units], initializer=self.b_init)
def inference(self, x):
padding_w = tf.constant([[0, 0], [0, 1]])
padding_b = tf.constant([[0, 1]])
w = tf.pad(tf.nn.sigmoid(self.alpha), padding_w, 'CONSTANT')
b = tf.pad(self.bias, padding_b, 'CONSTANT')
utility = (tf.matmul(x, w) + b)
U = tf.nn.softmax((utility * self.config.net.eps), (- 1))
if (self.mode is 'train'):
a = tf.matmul(U, tf.transpose(w))
else:
a = tf.matmul(tf.one_hot(tf.argmax(utility, (- 1)), (self.config.net.num_hidden_units + 1)), tf.transpose(w))
p = (tf.reduce_sum(tf.multiply(a, x), (- 1)) - tf.reduce_max(utility, (- 1)))
return (a, p) |
def _invert_perm(perm):
perm_inv = ([0] * len(perm))
for (i, j) in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv) |
class Tacotron2Brain(sb.Brain):
def on_fit_start(self):
self.hparams.progress_sample_logger.reset()
self.last_epoch = 0
self.last_batch = None
self.last_preds = None
if self.hparams.log_audio_samples:
self.vocoder = HIFIGAN.from_hparams(source=self.hparams.vocoder, savedir=self.hparams.vocoder_savedir, run_opts={'device': self.device}, freeze_params=True)
self.last_loss_stats = {}
return super().on_fit_start()
def compute_forward(self, batch, stage):
effective_batch = self.batch_to_device(batch)
(inputs, y, num_items, _, _, spk_embs, spk_ids) = effective_batch
(_, input_lengths, _, _, _) = inputs
max_input_length = input_lengths.max().item()
return self.modules.model(inputs, spk_embs, alignments_dim=max_input_length)
def fit_batch(self, batch):
result = super().fit_batch(batch)
self.hparams.lr_annealing(self.optimizer)
return result
def compute_objectives(self, predictions, batch, stage):
effective_batch = self.batch_to_device(batch)
self.last_batch = effective_batch
self.last_preds = predictions
self._remember_sample(effective_batch, predictions)
loss = self._compute_loss(predictions, effective_batch, stage)
return loss
def _compute_loss(self, predictions, batch, stage):
(inputs, targets, num_items, labels, wavs, spk_embs, spk_ids) = batch
(text_padded, input_lengths, _, max_len, output_lengths) = inputs
spk_emb_input = None
loss_stats = self.hparams.criterion(predictions, targets, input_lengths, output_lengths, spk_emb_input, self.last_epoch)
self.last_loss_stats[stage] = scalarize(loss_stats)
return loss_stats.loss
def _remember_sample(self, batch, predictions):
(inputs, targets, num_items, labels, wavs, spk_embs, spk_ids) = batch
(text_padded, input_lengths, _, max_len, output_lengths) = inputs
(mel_target, _) = targets
(mel_out, mel_out_postnet, gate_out, alignments, pred_mel_lengths) = predictions
alignments_max = alignments[0].max(dim=(- 1)).values.max(dim=(- 1)).values.unsqueeze((- 1)).unsqueeze((- 1))
alignments_output = (alignments[0].T.flip(dims=(1,)) / alignments_max)
self.hparams.progress_sample_logger.remember(target=self._get_spectrogram_sample(mel_target), output=self._get_spectrogram_sample(mel_out), output_postnet=self._get_spectrogram_sample(mel_out_postnet), alignments=alignments_output, raw_batch=self.hparams.progress_sample_logger.get_batch_sample({'text_padded': text_padded, 'input_lengths': input_lengths, 'mel_target': mel_target, 'mel_out': mel_out, 'mel_out_postnet': mel_out_postnet, 'max_len': max_len, 'output_lengths': output_lengths, 'gate_out': gate_out, 'alignments': alignments, 'labels': labels, 'wavs': wavs, 'spk_embs': spk_embs, 'spk_ids': spk_ids}))
def batch_to_device(self, batch):
(text_padded, input_lengths, mel_padded, gate_padded, output_lengths, len_x, labels, wavs, spk_embs, spk_ids) = batch
text_padded = text_padded.to(self.device, non_blocking=True).long()
input_lengths = input_lengths.to(self.device, non_blocking=True).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = mel_padded.to(self.device, non_blocking=True).float()
gate_padded = gate_padded.to(self.device, non_blocking=True).float()
output_lengths = output_lengths.to(self.device, non_blocking=True).long()
x = (text_padded, input_lengths, mel_padded, max_len, output_lengths)
y = (mel_padded, gate_padded)
len_x = torch.sum(output_lengths)
spk_embs = spk_embs.to(self.device, non_blocking=True).float()
return (x, y, len_x, labels, wavs, spk_embs, spk_ids)
def _get_spectrogram_sample(self, raw):
sample = raw[0]
return torch.sqrt(torch.exp(sample))
def on_stage_end(self, stage, stage_loss, epoch):
if ((stage == sb.Stage.TRAIN) and ((self.hparams.epoch_counter.current % 10) == 0)):
if (self.last_batch is None):
return
train_sample_path = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current))
if (not os.path.exists(train_sample_path)):
os.makedirs(train_sample_path)
(_, targets, _, labels, wavs, spk_embs, spk_ids) = self.last_batch
train_sample_text = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current), 'train_input_text.txt')
with open(train_sample_text, 'w') as f:
f.write(labels[0])
train_input_audio = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current), 'train_input_audio.wav')
torchaudio.save(train_input_audio, sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), self.hparams.sample_rate)
(_, mel_out_postnet, _, _, pred_mel_lengths) = self.last_preds
if self.hparams.log_audio_samples:
waveform_ss = self.vocoder.decode_batch(mel_out_postnet[0])
train_sample_audio = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current), 'train_output_audio.wav')
torchaudio.save(train_sample_audio, waveform_ss.squeeze(1).cpu(), self.hparams.sample_rate)
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_audio(f'{stage}/train_audio_target', sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), self.hparams.sample_rate)
if self.hparams.log_audio_samples:
self.tensorboard_logger.log_audio(f'{stage}/train_audio_pred', waveform_ss.squeeze(1), self.hparams.sample_rate)
try:
self.tensorboard_logger.log_figure(f'{stage}/train_mel_target', targets[0][0])
self.tensorboard_logger.log_figure(f'{stage}/train_mel_pred', mel_out_postnet[0])
except Exception:
pass
if (stage == sb.Stage.VALID):
lr = self.optimizer.param_groups[(- 1)]['lr']
self.last_epoch = epoch
self.hparams.train_logger.log_stats(stats_meta={'Epoch': epoch, 'lr': lr}, train_stats=self.last_loss_stats[sb.Stage.TRAIN], valid_stats=self.last_loss_stats[sb.Stage.VALID])
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_stats(stats_meta={'Epoch': epoch, 'lr': lr}, train_stats=self.last_loss_stats[sb.Stage.TRAIN], valid_stats=self.last_loss_stats[sb.Stage.VALID])
epoch_metadata = {**{'epoch': epoch}, **self.last_loss_stats[sb.Stage.VALID]}
self.checkpointer.save_and_keep_only(meta=epoch_metadata, min_keys=['loss'], ckpt_predicate=((lambda ckpt: ((ckpt.meta['epoch'] % self.hparams.keep_checkpoint_interval) != 0)) if (self.hparams.keep_checkpoint_interval is not None) else None))
output_progress_sample = (self.hparams.progress_samples and ((epoch % self.hparams.progress_samples_interval) == 0))
if output_progress_sample:
self.run_inference_sample(sb.Stage.VALID)
self.hparams.progress_sample_logger.save(epoch)
if (stage == sb.Stage.TEST):
self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=self.last_loss_stats[sb.Stage.TEST])
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=self.last_loss_stats[sb.Stage.TEST])
if self.hparams.progress_samples:
self.run_inference_sample(sb.Stage.TEST)
self.hparams.progress_sample_logger.save('test')
def run_inference_sample(self, stage):
if (self.last_batch is None):
return
(inputs, targets, _, labels, wavs, spk_embs, spk_ids) = self.last_batch
(text_padded, input_lengths, _, _, _) = inputs
(mel_out, _, _) = self.hparams.model.infer(text_padded[:1], spk_embs[:1], input_lengths[:1])
self.hparams.progress_sample_logger.remember(inference_mel_out=self._get_spectrogram_sample(mel_out))
if (stage == sb.Stage.VALID):
inf_sample_path = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current))
if (not os.path.exists(inf_sample_path)):
os.makedirs(inf_sample_path)
inf_sample_text = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current), 'inf_input_text.txt')
with open(inf_sample_text, 'w') as f:
f.write(labels[0])
inf_input_audio = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current), 'inf_input_audio.wav')
torchaudio.save(inf_input_audio, sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), self.hparams.sample_rate)
if self.hparams.log_audio_samples:
waveform_ss = self.vocoder.decode_batch(mel_out)
inf_sample_audio = os.path.join(self.hparams.progress_sample_path, str(self.hparams.epoch_counter.current), 'inf_output_audio.wav')
torchaudio.save(inf_sample_audio, waveform_ss.squeeze(1).cpu(), self.hparams.sample_rate)
if self.hparams.use_tensorboard:
self.tensorboard_logger.log_audio(f'{stage}/inf_audio_target', sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0), self.hparams.sample_rate)
if self.hparams.log_audio_samples:
self.tensorboard_logger.log_audio(f'{stage}/inf_audio_pred', waveform_ss.squeeze(1), self.hparams.sample_rate)
try:
self.tensorboard_logger.log_figure(f'{stage}/inf_mel_target', targets[0][0])
self.tensorboard_logger.log_figure(f'{stage}/inf_mel_pred', mel_out)
except Exception:
pass |
def simplify_abs_trig(expr):
w0 = SR.wild()
if (expr.has(abs_symbolic(sin(w0))) or expr.has(abs_symbolic(cos(w0)))):
return SimplifyAbsTrig(expr)()
return expr |
_utils.test(debug=True)
def test_assign_assign():
def func_assign():
a = 0
a = 1
assert (a == 1)
func_assign() |
def url_to_path(url):
assert url.startswith('file:'), 'You can only turn file: urls into filenames (not {url!r})'.format(**locals())
(_, netloc, path, _, _) = urllib_parse.urlsplit(url)
if ((not netloc) or (netloc == 'localhost')):
netloc = ''
elif (sys.platform == 'win32'):
netloc = ('\\\\' + netloc)
else:
raise ValueError('non-local file URIs are not supported on this platform: {url!r}'.format(**locals()))
path = urllib_request.url2pathname((netloc + path))
return path |
class ReflectionPad2d(_ReflectionPadNd):
padding: _size_4_t
def __init__(self, padding: _size_4_t) -> None:
super(ReflectionPad2d, self).__init__()
self.padding = _quadruple(padding) |
class SamConfig(PretrainedConfig):
model_type = 'sam'
is_composition = True
def __init__(self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
vision_config = (vision_config if (vision_config is not None) else {})
prompt_encoder_config = (prompt_encoder_config if (prompt_encoder_config is not None) else {})
mask_decoder_config = (mask_decoder_config if (mask_decoder_config is not None) else {})
if isinstance(vision_config, SamVisionConfig):
vision_config = vision_config.to_dict()
if isinstance(prompt_encoder_config, SamPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, SamMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = SamVisionConfig(**vision_config)
self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['vision_config'] = self.vision_config.to_dict()
output['prompt_encoder_config'] = self.prompt_encoder_config.to_dict()
output['mask_decoder_config'] = self.mask_decoder_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def test_listoffsetarray():
with open((SAMPLES_DIR / 'awkward1-listoffsetarray.pkl'), 'rb') as file:
array = pickle.load(file)
assert (array.to_list() == [[1.1, 2.2, 3.3], [], [4.4, 5.5]])
assert (pickle.loads(pickle.dumps(array)).layout.form == array.layout.form) |
class PET_Prompt():
def __init__(self, dataset_name=''):
(self.label_texts, self.template) = ([], '')
if (dataset_name in ['SST-2', 'MR', 'CR']):
self.label_texts = ['terrible', 'great']
self.template = '[sentence1] It was [label].'
elif (dataset_name == 'Subj'):
self.label_texts = ['subjective', 'objective']
self.template = '[sentence1] This is [label].'
elif (dataset_name == 'MPQA'):
self.label_texts = ['terrible', 'great']
self.template = '[sentence1] It was [label].'
elif (dataset_name == 'SST-5'):
self.label_texts = ['terrible', 'bad', 'okay', 'good', 'great']
self.template = '[sentence1] It was [label] .'
elif (dataset_name == 'AGNews'):
self.label_texts = ['political', 'sports', 'business', 'technology']
self.template = 'A [label] news : [sentence1] '
elif (dataset_name == 'Yahoo'):
self.label_texts = ['Society', 'Science', 'Health', 'Education', 'Computer', 'Sports', 'Business', 'Entertainment', 'Relationship', 'Politics']
self.template = '[label] question: [sentence1]'
elif (dataset_name in ['EPRSTMT']):
self.label_texts = ['', '']
self.template = '[label]. [sentence1]'
elif (dataset_name in ['TNEWS', 'TNEWSK']):
self.label_texts = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
self.template = '[label]. [sentence1]'
elif (dataset_name in ['CSLDCP']):
self.label_texts = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
self.template = '[label]. [sentence1]'
elif (dataset_name in ['IFLYTEK']):
self.label_texts = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
self.template = '[label]. [sentence1]'
try:
self.is_pre = (True if (self.template.index('[label]') <= self.template.index('[sentence1]')) else False)
except:
print('Prompt template error!') |
def random_sample_cls(sentences: List[str], labels: List[str], n_support: int, n_query: int, label: str):
data = [sentences[i] for (i, lab) in enumerate(labels) if (lab == label)]
perm = torch.randperm(len(data))
idx = perm[:n_support]
support = [data[i] for i in idx]
idx = perm[n_support:(n_support + n_query)]
query = [data[i] for i in idx]
return (support, query) |
class wordEmbedding(object):
def __init__(self, filename):
f = open(filename)
self.vocab2id = {}
self.id2vocab = {}
self.vectors = []
id = 0
for line in f.readlines():
word = line.strip().split()[0]
vector = np.asarray(map(float, line.split()[1:]))
self.id2vocab[id] = word
self.vocab2id[word] = id
self.vectors.append(vector)
id += 1
self.id2vocab[len(self.id2vocab)] = 'UNK'
self.vocab2id['UNK'] = len(self.vocab2id)
self.vectors.append(np.zeros(50))
self.vectors = np.asarray(self.vectors)
def get_index(self, word):
return (self.vocab2id[word] if (word in self.vocab2id) else self.vocab2id['UNK'])
def get_index_list(self, tokens):
return [self.get_index(t) for t in tokens] |
def initialize_train_state(config, device):
params = []
nnet = get_nnet(**config.nnet)
params += nnet.parameters()
nnet_ema = get_nnet(**config.nnet)
nnet_ema.eval()
logging.info(f'nnet has {cnt_params(nnet)} parameters')
optimizer = get_optimizer(params, **config.optimizer)
lr_scheduler = get_lr_scheduler(optimizer, **config.lr_scheduler)
train_state = TrainState(optimizer=optimizer, lr_scheduler=lr_scheduler, step=0, nnet=nnet, nnet_ema=nnet_ema)
train_state.ema_update(0)
train_state.to(device)
return train_state |
class CIFAR10_BASE_DRP05(nn.Module):
def __init__(self, dropout=0.5):
super(CIFAR10_BASE_DRP05, self).__init__()
self.dropout = dropout
self.conv_layer = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Dropout2d(p=0.05), nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2))
self.fc_layer = nn.Sequential(nn.Dropout(p=self.dropout), nn.Linear(4096, 1024), nn.ReLU(inplace=True), nn.Linear(1024, 512), nn.ReLU(inplace=True), nn.Dropout(p=self.dropout), nn.Linear(512, 10))
def forward(self, x):
x = self.conv_layer(x)
x = x.view(x.size(0), (- 1))
x = self.fc_layer(x)
return x |
def make_discriminator(kind, **kwargs):
logging.info(f'Make discriminator {kind}')
if (kind == 'pix2pixhd_nlayer_multidilated'):
return MultidilatedNLayerDiscriminator(**kwargs)
if (kind == 'pix2pixhd_nlayer'):
return NLayerDiscriminator(**kwargs)
raise ValueError(f'Unknown discriminator kind {kind}') |
def load(root):
root = os.path.expanduser(root)
system = compat_system(root)
builder = functools.partial(build, source_dir=root, system=system)
path = Path(build_as_zip(builder))
return imp_meta.PathDistribution(path) |
class VariationalRecurrentDropout(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dropout, mean_field_inference=False):
if (mean_field_inference is True):
return x
else:
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_((1 - dropout))
mask = (m / (1 - dropout))
mask = mask.expand_as(x)
return (mask * x) |
class BertForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def register_Ns3DefaultSimulatorImpl_methods(root_module, cls):
cls.add_constructor([param('ns3::DefaultSimulatorImpl const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_virtual=True)
cls.add_method('Destroy', 'void', [], is_virtual=True)
cls.add_method('GetContext', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_const=True, is_virtual=True)
cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_const=True, is_virtual=True)
cls.add_method('GetSystemId', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_const=True, is_virtual=True)
cls.add_method('IsFinished', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Now', 'ns3::Time', [], is_const=True, is_virtual=True)
cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_virtual=True)
cls.add_method('Run', 'void', [], is_virtual=True)
cls.add_method('Schedule', 'ns3::EventId', [param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')], is_virtual=True)
cls.add_method('ScheduleDestroy', 'ns3::EventId', [param('ns3::EventImpl *', 'event')], is_virtual=True)
cls.add_method('ScheduleNow', 'ns3::EventId', [param('ns3::EventImpl *', 'event')], is_virtual=True)
cls.add_method('ScheduleWithContext', 'void', [param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')], is_virtual=True)
cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_virtual=True)
cls.add_method('Stop', 'void', [], is_virtual=True)
cls.add_method('Stop', 'void', [param('ns3::Time const &', 'delay')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
return |
class GroupMode(enum.Enum):
NoneGroup = enum_auto()
SingleGroup = enum_auto()
MultipleGroup = enum_auto()
Depthwise = enum_auto() |
def train(hparams, run_opts):
test(hparams, run_opts, hparams['base_locales'], f'wer_test_before.txt')
for (i, locale) in enumerate(hparams['new_locales']):
old_mas_params = hparams.pop('mas_params', None)
if (not hparams['skip_mas']):
if (i == 0):
mas_params = compute_mas_params(hparams, run_opts, hparams['base_locales'])
else:
mas_params = compute_mas_params(hparams, run_opts, [hparams['new_locales'][(i - 1)]])
for (name, importance) in mas_params[1].items():
if (name in old_mas_params[1]):
old_importance = old_mas_params[1][name]
if ('embed_tokens.weight' in name):
diff = (importance.shape[0] - old_importance.shape[0])
old_importance = torch.nn.functional.pad(old_importance, [0, 0, 0, diff])
mas_params[1][name] *= (1 - hparams['mas_alpha'])
mas_params[1][name] += (hparams['mas_alpha'] * old_importance)
hparams['mas_params'] = mas_params
run_on_main(prepare_common_voice, kwargs={'locales': [locale], 'data_folder': hparams['data_folder'], 'max_durations': hparams['max_durations']})
new_tokens = [f'<|{locale.lower()}|>']
tokenizer = hparams['whisper'].tokenizer
tokenizer._additional_special_tokens += new_tokens
tokenizer.supported_languages.update({locale.lower(): locale.lower()})
tokenizer.to_language_codes.update({locale.lower(): locale.lower()})
new_tokens = sorted(list((set(new_tokens) - set(tokenizer.get_vocab().keys()))))
tokenizer.add_tokens(new_tokens)
logging.info(f"Total number of tokens: {hparams['whisper'].model.decoder.embed_tokens.num_embeddings}")
hparams['whisper'].model.resize_token_embeddings(len(tokenizer))
logging.info(f"Total number of tokens: {hparams['whisper'].model.decoder.embed_tokens.num_embeddings}")
hparams['forced_decoder_locale'] = locale
(train_data, valid_data, _) = dataio_prepare(hparams, tokenizer)
checkpoint_folder = os.path.join(hparams['save_folder'], locale)
os.makedirs(checkpoint_folder, exist_ok=True)
hparams['checkpointer'].checkpoints_dir = pathlib.Path(checkpoint_folder)
hparams['lr_annealing'].hyperparam_value = hparams['lr']
hparams['lr_annealing'].metric_values.clear()
hparams['lr_annealing'].current_patient = 0
asr_brain = ASR(modules=hparams['modules'], hparams=hparams, run_opts=run_opts, opt_class=hparams['opt_class'], checkpointer=hparams['checkpointer'])
asr_brain.tokenizer = tokenizer
hparams['valid_dataloader_kwargs'].pop('ckpt_prefix', None)
hparams['epoch_counter'].current = 0
asr_brain.fit(hparams['epoch_counter'], train_data, valid_data, train_loader_kwargs=hparams['train_dataloader_kwargs'], valid_loader_kwargs=hparams['valid_dataloader_kwargs'])
test(hparams, run_opts, (hparams['base_locales'] + hparams['new_locales'][:(i + 1)]), f'wer_test_after_{locale}.txt') |
def _activation(input, activation=None):
assert (activation in ['relu', 'leaky', 'tanh', 'sigmoid', None])
if (activation == 'relu'):
return tf.nn.relu(input)
elif (activation == 'leaky'):
return tf.contrib.keras.layers.LeakyReLU(0.1)(input)
elif (activation == 'tanh'):
return tf.tanh(input)
elif (activation == 'sigmoid'):
return tf.sigmoid(input)
elif (activation == 'prelu'):
alphas = tf.get_variable('alpha', input.get_shape()[(- 1)], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
pos = tf.nn.relu(input)
neg = ((alphas * (input - abs(input))) * 0.5)
return (pos + neg)
else:
return input |
def compile_single(source, options, full_module_name=None):
return run_pipeline(source, options, full_module_name) |
class EdgeConv2(MessagePassing):
def __init__(self, nn, aggr='max', **kwargs):
super(EdgeConv2, self).__init__(aggr=aggr, **kwargs)
self.nn = nn
self.reset_parameters()
def reset_parameters(self):
reset(self.nn)
def forward(self, x, edge_index):
x = (x.unsqueeze((- 1)) if (x.dim() == 1) else x)
return self.propagate(edge_index, x=x)
def message(self, x_i, x_j):
return self.nn(torch.cat([x_i, x_j], dim=1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn) |
class UninitializedParameter(UninitializedTensorMixin, Parameter):
cls_to_become = Parameter
def __new__(cls, requires_grad=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
data = torch.tensor([], **factory_kwargs)
return torch.Tensor._make_subclass(cls, data, requires_grad) |
def verify_assignments(assignments):
for cur in assignments:
for (x, y) in zip(cur[0:(- 1)], cur[1:]):
assert (x[1].used < y[1].defined) |
_utils.test()
def test_atomic_mul_expr_evaled():
c = ti.field(ti.i32)
base = 2
ti.root.place(c)
def func():
c[None] = 1
for i in range(16):
ti.atomic_mul(c[None], base)
func()
assert (c[None] == (base ** 16)) |
def load_ckpt(model: torch.nn.Module, optimizer: Optional[torch.optim.Optimizer]=None, scheduler: Optional[Any]=None, epoch: int=(- 1)) -> int:
epoch = get_ckpt_epoch(epoch)
path = get_ckpt_path(epoch)
if (not osp.exists(path)):
return 0
ckpt = torch.load(path)
model.load_state_dict(ckpt[MODEL_STATE])
if ((optimizer is not None) and (OPTIMIZER_STATE in ckpt)):
optimizer.load_state_dict(ckpt[OPTIMIZER_STATE])
if ((scheduler is not None) and (SCHEDULER_STATE in ckpt)):
scheduler.load_state_dict(ckpt[SCHEDULER_STATE])
return (epoch + 1) |
class lora_sdr_lora_rx(gr.hier_block2):
def __init__(self, center_freq=, bw=125000, cr=1, has_crc=True, impl_head=False, pay_len=255, samp_rate=250000, sf=7, sync_word=[18], soft_decoding=False, ldro_mode=2, print_rx=[True, True]):
gr.hier_block2.__init__(self, 'lora_sdr_lora_rx', gr.io_signature(1, 1, (gr.sizeof_gr_complex * 1)), gr.io_signature(1, 1, (gr.sizeof_char * 1)))
self.message_port_register_hier_out('out')
self.bw = bw
self.cr = cr
self.has_crc = has_crc
self.impl_head = impl_head
self.pay_len = pay_len
self.samp_rate = samp_rate
self.sf = sf
self.soft_decoding = soft_decoding
self.print_header = print_rx[0]
self.print_payload = print_rx[1]
self.center_freq = center_freq
self.sync_word = sync_word
self.lora_sdr_header_decoder_0 = lora_sdr.header_decoder(impl_head, cr, pay_len, has_crc, ldro_mode, self.print_header)
self.lora_sdr_hamming_dec_0 = lora_sdr.hamming_dec(soft_decoding)
self.lora_sdr_gray_mapping_0 = lora_sdr.gray_mapping(soft_decoding)
self.lora_sdr_frame_sync_0 = lora_sdr.frame_sync(center_freq, bw, sf, impl_head, sync_word, int((samp_rate / bw)), 8)
self.lora_sdr_fft_demod_0 = lora_sdr.fft_demod(soft_decoding, True)
self.lora_sdr_dewhitening_0 = lora_sdr.dewhitening()
self.lora_sdr_deinterleaver_0 = lora_sdr.deinterleaver(soft_decoding)
self.lora_sdr_crc_verif_0 = lora_sdr.crc_verif(self.print_payload, False)
self.msg_connect((self.lora_sdr_crc_verif_0, 'msg'), (self, 'out'))
self.msg_connect((self.lora_sdr_header_decoder_0, 'frame_info'), (self.lora_sdr_frame_sync_0, 'frame_info'))
self.connect((self.lora_sdr_crc_verif_0, 0), (self, 0))
self.connect((self.lora_sdr_deinterleaver_0, 0), (self.lora_sdr_hamming_dec_0, 0))
self.connect((self.lora_sdr_dewhitening_0, 0), (self.lora_sdr_crc_verif_0, 0))
self.connect((self.lora_sdr_fft_demod_0, 0), (self.lora_sdr_gray_mapping_0, 0))
self.connect((self.lora_sdr_frame_sync_0, 0), (self.lora_sdr_fft_demod_0, 0))
self.connect((self.lora_sdr_gray_mapping_0, 0), (self.lora_sdr_deinterleaver_0, 0))
self.connect((self.lora_sdr_hamming_dec_0, 0), (self.lora_sdr_header_decoder_0, 0))
self.connect((self.lora_sdr_header_decoder_0, 0), (self.lora_sdr_dewhitening_0, 0))
self.connect((self, 0), (self.lora_sdr_frame_sync_0, 0))
def get_bw(self):
return self.bw
def set_bw(self, bw):
self.bw = bw
def get_cr(self):
return self.cr
def set_cr(self, cr):
self.cr = cr
def get_has_crc(self):
return self.has_crc
def set_has_crc(self, has_crc):
self.has_crc = has_crc
def get_impl_head(self):
return self.impl_head
def set_impl_head(self, impl_head):
self.impl_head = impl_head
def get_pay_len(self):
return self.pay_len
def set_pay_len(self, pay_len):
self.pay_len = pay_len
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
def get_sf(self):
return self.sf
def set_sf(self, sf):
self.sf = sf
def get_soft_decoding(self):
return self.soft_decoding
def set_soft_decoding(self, soft_decoding):
self.soft_decoding = soft_decoding |
class GoogleCalendarCreateOrUpdateEvent(VirtualFunctionTool):
name = 'GoogleCalendarCreateOrUpdateEvent'
summary = 'Create a new event or update an existing event in the calendar.'
parameters: List[ArgParameter] = [{'name': 'event_id', 'type': 'string', 'description': 'The unique identifier of the event to be updated. If not provided, a new event will be created.', 'required': False}, {'name': 'event_name', 'type': 'string', 'description': 'The name of the event. Required when creating a new event.', 'required': False}, {'name': 'content', 'type': 'string', 'description': 'The content of the event.', 'required': False}, {'name': 'start_time', 'type': 'string', 'description': 'The start time of the event in ISO 8601 format. Required when creating a new event.', 'required': False}, {'name': 'end_time', 'type': 'string', 'description': 'The end time of the event in ISO 8601 format. Required when creating a new event.', 'required': False}, {'name': 'timezone', 'type': 'string', 'description': 'The timezone of the event, e.g. UTC-04:00.', 'required': False}, {'name': 'location', 'type': 'string', 'description': 'The location of the event.', 'required': False}, {'name': 'attendees', 'type': 'array', 'description': 'An array of email addresses of the attendees.', 'required': False}, {'name': 'recurrence', 'type': 'string', 'description': 'The recurrence of the event in Google Calendar format. For instance, use `RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=MO` to set the event to repeat every week on Monday.', 'required': False}]
returns: List[ArgReturn] = [{'name': 'event_id', 'type': 'string', 'description': 'The unique identifier of the created or updated event.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': 'The start_time is later than the end_time, the format of start_time, end_time or recurrence is incorrect, or start_time, end_time and event_name are not provided when creating a new event.'}, {'name': 'NotFoundException', 'description': 'The provided event_id does not exist.'}] |
def fOptProps(cid, length, direction, fOptDict=None):
if ((cid < 0) or (cid > 255)):
raise ValueError('cid must be between 0 and 255')
if (length < 0):
raise ValueError('length must be positive')
if (not isinstance(direction, FOptsDir)):
raise ValueError('direction must be an instance of FOptsDir')
def addFOptProps(cls):
class FOptPropClass(cls):
pass
FOptPropClass.cid = cid
FOptPropClass.length = length
FOptPropClass.direction = direction
if isinstance(fOptDict, dict):
fOptDict[cid] = FOptPropClass
return FOptPropClass
return addFOptProps |
def _load_stop_words(stop_words_path):
with stop_words_path.open(encoding='utf8') as f:
stop_words = set((l.strip() for l in f if l))
return stop_words |
class ERFNet(nn.Sequential):
def __init__(self, n_classes=19):
super().__init__(Downsampler(3, 16, 0.0), Downsampler(16, 64, 0.03), NonBottleneck1D(64, 0.03), NonBottleneck1D(64, 0.03), NonBottleneck1D(64, 0.03), NonBottleneck1D(64, 0.03), NonBottleneck1D(64, 0.03), Downsampler(64, 128, 0.3), NonBottleneck1D(128, 0.3, 2), NonBottleneck1D(128, 0.3, 4), NonBottleneck1D(128, 0.3, 8), NonBottleneck1D(128, 0.3, 16), NonBottleneck1D(128, 0.3, 2), NonBottleneck1D(128, 0.3, 4), NonBottleneck1D(128, 0.3, 8), NonBottleneck1D(128, 0.3, 16), Upsampler(128, 64), NonBottleneck1D(64), NonBottleneck1D(64), Upsampler(64, 16), NonBottleneck1D(16), NonBottleneck1D(16), nn.ConvTranspose2d(16, (n_classes + 1), (3, 3), 2, 1, 1)) |
def downsample_module(data, num_filter, kernel, stride, pad, b_h_w, name, aggre_type=None):
assert isinstance(data, list)
data = mx.sym.concat(*data, dim=0)
ret = conv2d_act(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, act_type=cfg.MODEL.CNN_ACT_TYPE, name=(name + '_conv'))
return ret |
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_constructor([param('char const *', 'name')])
cls.add_constructor([])
cls.add_constructor([param('ns3::TypeId const &', 'o')])
cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True)
cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('std::size_t', 'i')], is_const=True)
cls.add_method('GetAttributeFullName', 'std::string', [param('std::size_t', 'i')], is_const=True)
cls.add_method('GetAttributeN', 'std::size_t', [], is_const=True)
cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True)
cls.add_method('GetGroupName', 'std::string', [], is_const=True)
cls.add_method('GetHash', 'ns3::TypeId::hash_t', [], is_const=True)
cls.add_method('GetName', 'std::string', [], is_const=True)
cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True)
cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint16_t', 'i')], is_static=True)
cls.add_method('GetRegisteredN', 'uint16_t', [], is_static=True)
cls.add_method('GetSize', 'std::size_t', [], is_const=True)
cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('std::size_t', 'i')], is_const=True)
cls.add_method('GetTraceSourceN', 'std::size_t', [], is_const=True)
cls.add_method('GetUid', 'uint16_t', [], is_const=True)
cls.add_method('HasConstructor', 'bool', [], is_const=True)
cls.add_method('HasParent', 'bool', [], is_const=True)
cls.add_method('HideFromDocumentation', 'ns3::TypeId', [])
cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True)
cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True)
cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True)
cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True)
cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True)
cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True)
cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True)
cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True)
cls.add_method('SetAttributeInitialValue', 'bool', [param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')])
cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')])
cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')])
cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')])
return |
.parametrize('device', devices)
def test_spline_weighting_backward(device):
pseudo = torch.rand((4, 2), dtype=torch.double, device=device)
kernel_size = tensor([5, 5], torch.long, device)
is_open_spline = tensor([1, 1], torch.uint8, device)
degree = 1
(basis, weight_index) = spline_basis(pseudo, kernel_size, is_open_spline, degree)
basis.requires_grad_()
x = torch.rand((4, 2), dtype=torch.double, device=device)
x.requires_grad_()
weight = torch.rand((25, 2, 4), dtype=torch.double, device=device)
weight.requires_grad_()
data = (x, weight, basis, weight_index)
assert (gradcheck(spline_weighting, data, eps=1e-06, atol=0.0001) is True) |
class IndexedRawTextDataset(IndexedDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = Tokenizer.tokenize(line, dictionary, add_if_not_exist=False, reverse_order=self.reverse_order).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def read_into(self, start, dst):
if (self.token_blob is None):
self.token_blob = [t for l in self.tokens_list for t in l]
np.copyto(dst, self.token_blob[start:])
def __del__(self):
pass
def __len__(self):
return self.size
def exists(path):
return os.path.exists(path) |
def get_emotion(wav_path):
num = wav_path.split('_')[(- 1)][:(- 4)]
num = (int(num) - 1)
if ((num // 350) == 1):
return 'angry'
elif ((num // 350) == 2):
return 'happy'
elif ((num // 350) == 3):
return 'sad' |
def get_device():
is_device_available = {'npu': is_npu_available(), 'cuda': torch.cuda.is_available(), 'mlu': is_mlu_available()}
device_list = [k for (k, v) in is_device_available.items() if v]
return (device_list[0] if (len(device_list) >= 1) else 'cpu') |
def _return_output(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
if (not torch.jit.is_scripting()):
if ((type(input) is not Tensor) and has_torch_function((input,))):
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
(output, _, _) = _unique_impl(input, sorted, return_inverse, return_counts, dim)
return output |
('/dump', method='POST')
def dump_data():
data = measurer.get_data()
samples_counter = len(data)
with open(out_file, 'w') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(metrics[args.metric].header())
for val in data:
csv_writer.writerow(val)
measurer.cleanup() |
def test_specify_label(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], config={'diff.label': ['label_1', 'label_2']}) |
_decorator('')
def get_orignalmid(html):
if is_root(html):
return get_mid(html)
else:
cont = _get_statushtml(html)
soup = BeautifulSoup(cont, 'lxml')
return soup.find(attrs={'action-type': 'feed_list_item'})['omid'] |
class ReLU(tf.keras.layers.ReLU):
def compute_output_shape(self, input_shape):
return tf.TensorShape(input_shape) |
def fusion_two_mat(input1, input2, hn=None, scope=None, wd=0.0, keep_prob=1.0, is_train=None):
ivec1 = input1.get_shape()[(- 1)]
ivec2 = input2.get_shape()[(- 1)]
if (hn is None):
hn = ivec1
with tf.variable_scope((scope or 'fusion_two_mat')):
part1 = linear(input1, hn, False, 0.0, 'linear_1', False, wd, keep_prob, is_train)
part2 = linear(input2, hn, True, 0.0, 'linear_2', False, wd, keep_prob, is_train)
return (part1 + part2) |
class CriteoDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path=None, cache_path='.criteo', rebuild_cache=False, min_threshold=10, category_only=False):
self.NUM_FEATS = 39
self.NUM_INT_FEATS = 13
self.min_threshold = min_threshold
self.category_only = category_only
self.item_idx = 0
if (rebuild_cache or (not Path(cache_path).exists())):
shutil.rmtree(cache_path, ignore_errors=True)
if (dataset_path is None):
raise ValueError('create cache: failed: dataset_path is None')
self.__build_cache(dataset_path, cache_path)
self.env = lmdb.open(cache_path, create=False, lock=False, readonly=True)
print(self.item_idx)
with self.env.begin(write=False) as txn:
stat = txn.stat()
self.length = (stat['entries'] - 1)
self.field_dims = np.frombuffer(txn.get(b'field_dims'), dtype=np.uint32)
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
name = struct.pack('>I', index)
stream = txn.get(name)
if (stream is None):
print('None')
print(index)
np_array = np.frombuffer(stream, dtype=np.uint32).astype(dtype=np.long)
if self.category_only:
return (np_array[(1 + self.NUM_INT_FEATS):], np_array[0])
else:
return (np_array[1:], np_array[0])
def __len__(self):
return self.length
def __build_cache(self, path, cache_path):
(feat_mapper, defaults, field_dims) = self.__get_feat_mapper(path)
with lmdb.open(cache_path, map_size=int(.0)) as env:
with env.begin(write=True) as txn:
txn.put(b'field_dims', field_dims.tobytes())
for buffer in self.__yield_buffer(path, feat_mapper, defaults):
with env.begin(write=True) as txn:
for (key, value) in buffer:
txn.put(key, value)
def __get_feat_mapper(self, path):
feat_cnts = defaultdict((lambda : defaultdict(int)))
new_feat_cnts = defaultdict((lambda : defaultdict(int)))
with open(path) as f:
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create criteo dataset cache: counting features')
for line in pbar:
values = line.rstrip('\n').split('\t')
if (len(values) != (self.NUM_FEATS + 1)):
continue
for i in range(1, (self.NUM_INT_FEATS + 1)):
feat_cnts[i][convert_numeric_feature(values[i])] += 1
for i in range((self.NUM_INT_FEATS + 1), (self.NUM_FEATS + 1)):
feat_cnts[i][values[i]] += 1
feat_mapper = {i: {feat for (feat, c) in cnt.items() if (c >= self.min_threshold)} for (i, cnt) in feat_cnts.items()}
feat_mapper = {i: {feat: idx for (idx, feat) in enumerate(cnt)} for (i, cnt) in feat_mapper.items()}
defaults = {i: len(cnt) for (i, cnt) in feat_mapper.items()}
for (field, sub_dict) in feat_cnts.items():
for key in list(sub_dict.keys()):
if (sub_dict[key] < self.min_threshold):
sub_dict['default'] += 1
else:
new_feat_cnts[field][feat_mapper[field][key]] = sub_dict[key]
if (sub_dict['default'] != 0):
new_feat_cnts[field][len(feat_mapper[field])] = sub_dict['default']
field_dims = self.__get_field_dims(new_feat_cnts)
return (feat_mapper, defaults, field_dims)
def __yield_buffer(self, path, feat_mapper, defaults, buffer_size=int(100000.0)):
item_idx = 0
buffer = list()
with open(path) as f:
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create criteo dataset cache: setup lmdb')
for line in pbar:
values = line.rstrip('\n').split('\t')
if (len(values) != (self.NUM_FEATS + 1)):
continue
np_array = np.zeros((self.NUM_FEATS + 1), dtype=np.uint32)
np_array[0] = int(values[0])
for i in range(1, (self.NUM_INT_FEATS + 1)):
np_array[i] = feat_mapper[i].get(convert_numeric_feature(values[i]), defaults[i])
for i in range((self.NUM_INT_FEATS + 1), (self.NUM_FEATS + 1)):
np_array[i] = feat_mapper[i].get(values[i], defaults[i])
name = struct.pack('>I', item_idx)
if (name is None):
print('None')
buffer.append((name, np_array.tobytes()))
item_idx += 1
if ((item_idx % buffer_size) == 0):
(yield buffer)
buffer.clear()
self.item_idx = item_idx
(yield buffer)
def __get_field_dims(self, data):
all_freq = None
index_offset = 0
field_dims = np.zeros(self.NUM_FEATS, dtype=np.uint32)
for (i, col) in enumerate(data.keys()):
freq = pd.Series(data[col]).sort_values(ascending=False)
freq.index = (freq.index + index_offset)
if (all_freq is None):
all_freq = freq
else:
all_freq = pd.concat([all_freq, freq], axis=0)
index_offset += len(freq)
field_dims[i] = len(freq)
return field_dims |
def calc_shifted_geometric_mean(list_of_numbers, shift_by=10.0):
geometric_mean = 1.0
nitems = 0
for number in list_of_numbers:
nitems = (nitems + 1)
nextnumber = (number + shift_by)
geometric_mean = (pow(geometric_mean, ((nitems - 1) / float(nitems))) * pow(nextnumber, (1 / float(nitems))))
return (geometric_mean - shift_by) |
def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):
ret = Forward()
lastExpr = (baseExpr | ((lpar + ret) + rpar))
for (i, operDef) in enumerate(opList):
(opExpr, arity, rightLeftAssoc, pa) = (operDef + (None,))[:4]
termName = (('%s term' % opExpr) if (arity < 3) else ('%s%s term' % opExpr))
if (arity == 3):
if ((opExpr is None) or (len(opExpr) != 2)):
raise ValueError('if numterms=3, opExpr must be a tuple or list of two expressions')
(opExpr1, opExpr2) = opExpr
thisExpr = Forward().setName(termName)
if (rightLeftAssoc == opAssoc.LEFT):
if (arity == 1):
matchExpr = (FollowedBy((lastExpr + opExpr)) + Group((lastExpr + OneOrMore(opExpr))))
elif (arity == 2):
if (opExpr is not None):
matchExpr = (FollowedBy(((lastExpr + opExpr) + lastExpr)) + Group((lastExpr + OneOrMore((opExpr + lastExpr)))))
else:
matchExpr = (FollowedBy((lastExpr + lastExpr)) + Group((lastExpr + OneOrMore(lastExpr))))
elif (arity == 3):
matchExpr = (FollowedBy(((((lastExpr + opExpr1) + lastExpr) + opExpr2) + lastExpr)) + Group(((((lastExpr + opExpr1) + lastExpr) + opExpr2) + lastExpr)))
else:
raise ValueError('operator must be unary (1), binary (2), or ternary (3)')
elif (rightLeftAssoc == opAssoc.RIGHT):
if (arity == 1):
if (not isinstance(opExpr, Optional)):
opExpr = Optional(opExpr)
matchExpr = (FollowedBy((opExpr.expr + thisExpr)) + Group((opExpr + thisExpr)))
elif (arity == 2):
if (opExpr is not None):
matchExpr = (FollowedBy(((lastExpr + opExpr) + thisExpr)) + Group((lastExpr + OneOrMore((opExpr + thisExpr)))))
else:
matchExpr = (FollowedBy((lastExpr + thisExpr)) + Group((lastExpr + OneOrMore(thisExpr))))
elif (arity == 3):
matchExpr = (FollowedBy(((((lastExpr + opExpr1) + thisExpr) + opExpr2) + thisExpr)) + Group(((((lastExpr + opExpr1) + thisExpr) + opExpr2) + thisExpr)))
else:
raise ValueError('operator must be unary (1), binary (2), or ternary (3)')
else:
raise ValueError('operator must indicate right or left associativity')
if pa:
matchExpr.setParseAction(pa)
thisExpr <<= (matchExpr.setName(termName) | lastExpr)
lastExpr = thisExpr
ret <<= lastExpr
return ret |
def max_unconstrained(weights, lengths, max_ratio):
max_tokens = math.ceil((sum(lengths) * max_ratio))
glob_sort = global_argsort(weights)
return tuple([g[(- max_tokens):] for g in glob_sort]) |
def _scikit_umfpack_version(pkg_name):
try:
import scikits.umfpack
scikits.umfpack
try:
return scikits.umfpack.__version__
except AttributeError:
return '<0.3.1'
except:
return None |
class IndexedSequence(SageObject):
def __init__(self, L, index_object):
try:
ind = index_object.list()
except AttributeError:
ind = list(index_object)
self._index_object = index_object
self._list = Sequence(L)
self._base_ring = self._list.universe()
dict = {}
for i in range(len(ind)):
dict[ind[i]] = L[i]
self._dict = dict
def dict(self):
return self._dict
def list(self):
return self._list
def base_ring(self):
return self._base_ring
def index_object(self):
return self._index_object
def _repr_(self):
return ((('Indexed sequence: ' + str(self.list())) + '\n indexed by ') + str(self.index_object()))
def plot_histogram(self, clr=(0, 0, 1), eps=0.4):
from sage.rings.real_mpfr import RR
I = self.index_object()
N = len(I)
S = self.list()
P = [polygon([[(RR(I[i]) - eps), 0], [(RR(I[i]) - eps), RR(S[i])], [(RR(I[i]) + eps), RR(S[i])], [(RR(I[i]) + eps), 0], [RR(I[i]), 0]], rgbcolor=clr) for i in range(N)]
T = [text(str(I[i]), (RR(I[i]), (- 0.8)), fontsize=15, rgbcolor=(1, 0, 0)) for i in range(N)]
return (sum(P) + sum(T))
def plot(self):
from sage.rings.real_mpfr import RR
I = self.index_object()
S = self.list()
return line([[RR(I[i]), RR(S[i])] for i in range((len(I) - 1))])
def dft(self, chi=None):
if (chi is None):
chi = (lambda x: x)
J = self.index_object()
N = len(J)
S = self.list()
F = self.base_ring()
if (J[0] not in ZZ):
G = J[0].parent()
if ((J[0] in ZZ) and (F.base_ring().fraction_field() == QQ)):
zeta = CyclotomicField(N).gen()
FT = [sum([(S[i] * chi((zeta ** (i * j)))) for i in J]) for j in J]
elif (((J[0] not in ZZ) and G.is_abelian() and (F == ZZ)) or (F.is_field() and (F.base_ring() == QQ))):
if isinstance(J[0], PermutationGroupElement):
n = G.order()
a = list(n.factor())
invs = [(x[0] ** x[1]) for x in a]
G = AbelianGroup(len(a), invs)
Gd = G.dual_group()
FT = [sum([(S[i] * chid(G.list()[i])) for i in range(N)]) for chid in Gd]
elif (((J[0] not in ZZ) and G.is_finite() and (F == ZZ)) or (F.is_field() and (F.base_ring() == QQ))):
chi = G.character_table()
FT = [sum([(S[i] * chi[(i, j)]) for i in range(N)]) for j in range(N)]
else:
raise ValueError(f'list elements must be in QQ(zeta_{N})')
return IndexedSequence(FT, J)
def idft(self):
F = self.base_ring()
J = self.index_object()
N = len(J)
S = self.list()
zeta = CyclotomicField(N).gen()
iFT = [sum([(S[i] * (zeta ** ((- i) * j))) for i in J]) for j in J]
if ((J[0] not in ZZ) or (F.base_ring().fraction_field() != QQ)):
raise NotImplementedError('Sorry this type of idft is not implemented yet.')
return (IndexedSequence(iFT, J) * (Integer(1) / N))
def dct(self):
F = self.base_ring()
try:
pi = F.pi()
except AttributeError:
from sage.symbolic.constants import pi
pi = F(pi)
J = self.index_object()
N = len(J)
S = self.list()
PI = ((2 * pi) / N)
FT = [sum([(S[i] * cos(((PI * i) * j))) for i in J]) for j in J]
return IndexedSequence(FT, J)
def dst(self):
F = self.base_ring()
try:
pi = F.pi()
except AttributeError:
from sage.symbolic.constants import pi
pi = F(pi)
J = self.index_object()
N = len(J)
S = self.list()
PI = ((2 * F(pi)) / N)
FT = [sum([(S[i] * sin(((PI * i) * j))) for i in J]) for j in J]
return IndexedSequence(FT, J)
def convolution(self, other):
S = self.list()
T = other.list()
I0 = self.index_object()
J0 = other.index_object()
F = self.base_ring()
E = other.base_ring()
if (F != E):
raise TypeError('IndexedSequences must have same base ring')
if (I0 != J0):
raise TypeError('IndexedSequences must have same index set')
M = len(S)
N = len(T)
if (M < N):
a = ([S[i] for i in range(M)] + [F(0) for i in range((2 * N))])
b = (T + [E(0) for i in range((2 * M))])
if (M > N):
b = ([T[i] for i in range(N)] + [E(0) for i in range((2 * M))])
a = (S + [F(0) for i in range((2 * M))])
if (M == N):
a = (S + [F(0) for i in range((2 * M))])
b = (T + [E(0) for i in range((2 * M))])
N = max(M, N)
return [sum([(a[i] * b[(j - i)]) for i in range(N)]) for j in range(((2 * N) - 1))]
def convolution_periodic(self, other):
S = self.list()
T = other.list()
I = self.index_object()
J = other.index_object()
F = self.base_ring()
E = other.base_ring()
if (F != E):
raise TypeError('IndexedSequences must have same parent')
if (I != J):
raise TypeError('IndexedSequences must have same index set')
M = len(S)
N = len(T)
if (M < N):
a = ([S[i] for i in range(M)] + [F(0) for i in range((N - M))])
b = other
if (M > N):
b = ([T[i] for i in range(N)] + [E(0) for i in range((M - N))])
a = self
if (M == N):
a = S
b = T
N = max(M, N)
return [sum([(a[i] * b[((j - i) % N)]) for i in range(N)]) for j in range(((2 * N) - 1))]
def __mul__(self, other):
S = self.list()
S1 = [(S[i] * other) for i in range(len(self.index_object()))]
return IndexedSequence(S1, self.index_object())
def __eq__(self, other):
if (type(self) is not type(other)):
return False
S = self.list()
T = other.list()
I = self.index_object()
J = other.index_object()
if (I != J):
return False
for i in I:
try:
if (abs((S[i] - T[i])) > (10 ** (- 8))):
return False
except TypeError:
pass
return True
def fft(self):
from sage.rings.cc import CC
I = CC.gen()
J = self.index_object()
N = len(J)
S = self.list()
a = FastFourierTransform(N)
for i in range(N):
a[i] = S[i]
a.forward_transform()
return IndexedSequence([(a[j][0] + (I * a[j][1])) for j in J], J)
def ifft(self):
from sage.rings.cc import CC
I = CC.gen()
J = self.index_object()
N = len(J)
S = self.list()
a = FastFourierTransform(N)
for i in range(N):
a[i] = S[i]
a.inverse_transform()
return IndexedSequence([(a[j][0] + (I * a[j][1])) for j in J], J)
def dwt(self, other='haar', wavelet_k=2):
from sage.rings.real_mpfr import RR
J = self.index_object()
N = len(J)
S = self.list()
if ((other == 'haar') or (other == 'haar_centered')):
if (wavelet_k in [2]):
a = WaveletTransform(N, other, wavelet_k)
else:
raise ValueError('wavelet_k must be = 2')
if ((other == 'daubechies') or (other == 'daubechies_centered')):
if (wavelet_k in [4, 6, 8, 10, 12, 14, 16, 18, 20]):
a = WaveletTransform(N, other, wavelet_k)
else:
raise ValueError('wavelet_k must be in {4,6,8,10,12,14,16,18,20}')
if ((other == 'bspline') or (other == 'bspline_centered')):
if (wavelet_k in [103, 105, 202, 204, 206, 208, 301, 305, 307, 309]):
a = WaveletTransform(N, other, 103)
else:
raise ValueError('wavelet_k must be in {103,105,202,204,206,208,301,305,307,309}')
for i in range(N):
a[i] = S[i]
a.forward_transform()
return IndexedSequence([RR(a[j]) for j in J], J)
def idwt(self, other='haar', wavelet_k=2):
from sage.rings.real_mpfr import RR
J = self.index_object()
N = len(J)
S = self.list()
k = wavelet_k
if ((other == 'haar') or (other == 'haar_centered')):
if (k in [2]):
a = WaveletTransform(N, other, wavelet_k)
else:
raise ValueError('wavelet_k must be = 2')
if ((other == 'daubechies') or (other == 'daubechies_centered')):
if (k in [4, 6, 8, 10, 12, 14, 16, 18, 20]):
a = WaveletTransform(N, other, wavelet_k)
else:
raise ValueError('wavelet_k must be in {4,6,8,10,12,14,16,18,20}')
if ((other == 'bspline') or (other == 'bspline_centered')):
if (k in [103, 105, 202, 204, 206, 208, 301, 305, 307, 309]):
a = WaveletTransform(N, other, 103)
else:
raise ValueError('wavelet_k must be in {103,105,202,204,206,208,301,305,307,309}')
for i in range(N):
a[i] = S[i]
a.backward_transform()
return IndexedSequence([RR(a[j]) for j in J], J) |
def virtualenv_no_global():
if _running_under_venv():
return _no_global_under_venv()
if _running_under_regular_virtualenv():
return _no_global_under_regular_virtualenv()
return False |
def build_batch_data_sampler(sampler, images_per_batch, group_bin_edges=None, grouping_features=None):
if (group_bin_edges and grouping_features):
assert isinstance(group_bin_edges, (list, tuple))
assert isinstance(grouping_features, (list, tuple))
group_ids = _quantize(grouping_features, group_bin_edges)
batch_sampler = samplers.GroupedBatchSampler(sampler, group_ids, images_per_batch)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, images_per_batch, drop_last=True)
return batch_sampler |
def supplementary_difference_set(q, existence=False, check=True):
from sage.misc.superseded import deprecation
deprecation(35211, 'This function is deprecated, please use supplementary_difference_set_from_rel_diff_set instead.')
if existence:
return supplementary_difference_set_from_rel_diff_set(q, existence=True)
(_, s) = supplementary_difference_set_from_rel_diff_set(q, check=check)
return s |
class IndicatorBox(BasePenalty):
def __init__(self, alpha):
self.alpha = alpha
def get_spec(self):
spec = (('alpha', float64),)
return spec
def params_to_dict(self):
return dict(alpha=self.alpha)
def value(self, w):
if (np.max(w) > self.alpha):
return np.inf
elif (np.min(w) < 0):
return np.inf
return 0.0
def prox_1d(self, value, stepsize, j):
return box_proj(value, 0, self.alpha)
def subdiff_distance(self, w, grad, ws):
subdiff_dist = np.zeros_like(grad)
for (idx, j) in enumerate(ws):
if (w[j] == 0):
subdiff_dist[idx] = max(0, (- grad[idx]))
elif (w[j] == self.alpha):
subdiff_dist[idx] = max(0, grad[idx])
else:
subdiff_dist[idx] = np.abs(grad[idx])
return subdiff_dist
def is_penalized(self, n_features):
return np.ones(n_features, bool_)
def generalized_support(self, w):
return np.logical_and((w != 0), (w != self.alpha)) |
class LabelMapper():
UNCERTAIN = (- 1)
MISSING = (- 2)
def __init__(self, from_seq, to_seq):
assert (len(set(from_seq)) == len(from_seq))
assert (len(set(to_seq)) == len(to_seq))
assert (len(set(to_seq.values())) == len(to_seq.values()))
assert (len(set(from_seq.values())) == len(from_seq.values()))
self.from_seq = from_seq
self.to_seq = to_seq
self.mapping_matrix = self._get_map_matrix(from_seq, to_seq)
missing_tasks_indeces = np.where((np.sum(self.mapping_matrix, axis=1) == 0))
self.missing_bias = np.zeros(len(to_seq))
self.missing_bias[missing_tasks_indeces] = LabelMapper.MISSING
def map(self, label):
new_label = (np.dot(self.mapping_matrix, label) + self.missing_bias)
return new_label
def _get_map_matrix(self, from_seq, to_seq):
num_from_tasks = len(from_seq)
num_to_tasks = len(to_seq)
map_matrix = np.zeros((num_to_tasks, num_from_tasks))
for target_pathology in to_seq:
to_id = to_seq[target_pathology]
if (target_pathology in from_seq):
from_id = from_seq[target_pathology]
map_matrix[(to_id, from_id)] = 1
return map_matrix
def label_overlap(self):
overlap = set(self.from_seq).intersection(set(self.to_seq))
return list(overlap)
def display(sequence, array):
tasks = list(sequence)
array = array.tolist()
assert (len(tasks) == len(array))
path_label_dict = dict(zip(tasks, array))
print(json.dumps(path_label_dict, indent=4))
return dict(zip(tasks, array)) |
def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, minimizer_kwargs=None, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=None, seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9):
if ((target_accept_rate <= 0.0) or (target_accept_rate >= 1.0)):
raise ValueError('target_accept_rate has to be in range (0, 1)')
if ((stepwise_factor <= 0.0) or (stepwise_factor >= 1.0)):
raise ValueError('stepwise_factor has to be in range (0, 1)')
x0 = np.array(x0)
rng = check_random_state(seed)
if (minimizer_kwargs is None):
minimizer_kwargs = dict()
wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, **minimizer_kwargs)
if (take_step is not None):
if (not callable(take_step)):
raise TypeError('take_step must be callable')
if hasattr(take_step, 'stepsize'):
take_step_wrapped = AdaptiveStepsize(take_step, interval=interval, accept_rate=target_accept_rate, factor=stepwise_factor, verbose=disp)
else:
take_step_wrapped = take_step
else:
displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
take_step_wrapped = AdaptiveStepsize(displace, interval=interval, accept_rate=target_accept_rate, factor=stepwise_factor, verbose=disp)
accept_tests = []
if (accept_test is not None):
if (not callable(accept_test)):
raise TypeError('accept_test must be callable')
accept_tests = [accept_test]
metropolis = Metropolis(T, random_gen=rng)
accept_tests.append(metropolis)
if (niter_success is None):
niter_success = (niter + 2)
bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, accept_tests, disp=disp)
if callable(callback):
callback(bh.storage.minres.x, bh.storage.minres.fun, True)
(count, i) = (0, 0)
message = ['requested number of basinhopping iterations completed successfully']
for i in range(niter):
new_global_min = bh.one_cycle()
if callable(callback):
val = callback(bh.xtrial, bh.energy_trial, bh.accept)
if (val is not None):
if val:
message = ['callback function requested stop early byreturning True']
break
count += 1
if new_global_min:
count = 0
elif (count > niter_success):
message = ['success condition satisfied']
break
res = bh.res
res.lowest_optimization_result = bh.storage.get_lowest()
res.x = np.copy(res.lowest_optimization_result.x)
res.fun = res.lowest_optimization_result.fun
res.message = message
res.nit = (i + 1)
res.success = res.lowest_optimization_result.success
return res |
class DeepSpeech2Extractor(CNNExtractor):
def __init__(self, activation: str='hardtanh', mask_conv: bool=False) -> None:
super(DeepSpeech2Extractor, self).__init__(activation)
self.mask_conv = mask_conv
self.conv = nn.Sequential(nn.Conv2d(1, 32, kernel_size=(41, 11), stride=(2, 2), padding=(20, 5), bias=False), nn.BatchNorm2d(32), self.activation, nn.Conv2d(32, 32, kernel_size=(21, 11), stride=(2, 1), padding=(10, 5), bias=False), nn.BatchNorm2d(32), self.activation)
if mask_conv:
self.conv = MaskConv(self.conv)
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Optional[Any]:
if self.mask_conv:
return self.conv(inputs, input_lengths)
return self.conv(inputs) |
('just_spaces')
class JustSpacesWordSplitter(WordSplitter):
def split_words(self, sentence: str) -> List[Token]:
return [Token(t) for t in sentence.split()] |
def load_non_english_user_set():
non_english_user_set = set(np.load('uids.npz')['data'])
return non_english_user_set |
def test_clustering_ADP_pure_python_with_merging():
cl = Clustering(coordinates=X)
_ = cl.compute_density_kNN(k=5)
cl.kstar = (np.ones(cl.N, dtype=int) * 5)
_ = cl.compute_clustering_ADP_pure_python()
assert (cl.N_clusters == 2)
assert (cl.cluster_assignment == expected_cluster_assignment).all() |
def get_dense_json_path(data_dir: str, data_type: str, split: str='1.0') -> str:
json_path = f'{data_dir}/visdial_{split}_{data_type}_dense_annotations.json'
return json_path |
class SS3Prompt(Cmd):
_args
def do_new(self, args):
global CLF
args = split_args(args)
model_name = args[0].lower()
if args:
if (model_name in MODELS):
print()
Print.warn(WARN_OVERWRITE, False)
if (input() == 'Y'):
overwrite_model(SS3.__models_folder__, model_name)
print()
else:
print()
return
CLF = SS3(name=model_name)
Evaluation.set_classifier(CLF)
else:
Print.error("Empty model's name: please provide your model's name")
_args
def do_load(self, args):
global CLF, ARGS_CATS
args = split_args(args)
try:
new_clf = SS3(name=args[0])
new_clf.load_model()
CLF = new_clf
Evaluation.set_classifier(new_clf)
ARGS_CATS = CLF.get_categories()
except IOError:
Print.error(("Failed to load the model: No model named '%s' was found in folder ./%s" % (args[0], SS3.__models_folder__)))
_model
def do_rename(self, args):
args = split_args(args)
if (len(args) == 1):
m_folder = CLF.__models_folder__
model_file = path.join(m_folder, ('%s.%s' % (CLF.__name__, STR_MODEL_EXT)))
model_new_file = path.join(m_folder, ('%s.%s' % (args[0], STR_MODEL_EXT)))
cache_file = path.join(m_folder, (CLF.__name__ + EVAL_CACHE_EXT))
cache_new_file = path.join(m_folder, (args[0] + EVAL_CACHE_EXT))
rename = True
if path.exists(model_new_file):
print()
Print.warn(WARN_OVERWRITE, False)
if (input() != 'Y'):
rename = False
if rename:
CLF.__name__ = args[0]
Evaluation.__cache_file__ = path.join(CLF.__models_folder__, (CLF.__name__ + EVAL_CACHE_EXT))
overwrite_model(m_folder, CLF.__name__)
if path.exists(cache_file):
rename_file(cache_file, cache_new_file)
if path.exists(model_file):
rename_file(model_file, model_new_file)
else:
Print.error((ERROR_WAN % (1, len(args))))
_model
def do_clone(self, args):
args = split_args(args)
if (len(args) == 1):
Evaluation.__cache_load__()
CLF.__name__ = args[0]
CLF.save_model()
Evaluation.__cache_file__ = path.join(CLF.__models_folder__, (CLF.__name__ + EVAL_CACHE_EXT))
Evaluation.__cache_update__()
Evaluation.set_classifier(CLF)
else:
Print.error((ERROR_WAN % (1, len(args))))
_model
_args
def do_train(self, args):
try:
(train_path, folder_label, n_grams) = self.args_train(args)
try:
(x_train, y_train) = Dataset.load_from_files(train_path, folder_label, True)
except OSError:
Print.error((ERROR_NSD % train_path))
return
train(x_train, y_train, n_grams, train_path, folder_label)
except ArgsParseError:
pass
_model
_args
def do_k_fold(self, args):
try:
(data_path, folder_label, def_cat, n_grams, k_fold, hparams, cache) = self.args_k_fold(args)
(x_data, y_data) = load_data(data_path, folder_label, cmd_name='k_fold')
(s, l, p, a) = CLF.get_hyperparameters()
CLF.set_hyperparameters(hparams['s'], hparams['l'], hparams['p'], hparams['a'])
Evaluation.kfold_cross_validation(CLF, x_data, y_data, k_fold, n_grams, def_cat, data_path, cache=cache)
CLF.set_hyperparameters(s, l, p, a)
except (ArgsParseError, LoadDataError):
return
except InvalidCategoryError:
Print.error((ERROR_ICN % def_cat))
except KeyboardInterrupt:
Print.warn('Interrupted!\n')
Print.set_verbosity(VERBOSITY.VERBOSE)
_model
_args
def do_test(self, args):
try:
(test_path, folder_label, def_cat, hparams, cache) = self.args_test(args)
(x_test, y_test) = load_data(test_path, folder_label)
(s, l, p, a) = CLF.get_hyperparameters()
CLF.set_hyperparameters(hparams['s'], hparams['l'], hparams['p'], hparams['a'])
Evaluation.test(CLF, x_test, y_test, def_cat, test_path, cache=cache)
CLF.set_hyperparameters(s, l, p, a)
except (ArgsParseError, LoadDataError):
return
except InvalidCategoryError:
Print.error((ERROR_ICN % def_cat))
except KeyboardInterrupt:
Print.warn('Interrupted!\n')
Print.set_verbosity(VERBOSITY.VERBOSE)
_model
def do_live_test(self, args):
try:
(test_path, folder_label, verbose) = self.args_live_test(args)
except ArgsParseError:
return
if test_path:
try:
success = Server.set_testset_from_files(test_path, folder_label)
if (not success):
Print.warn(('Suggestion: live_test %s %s' % (test_path, (STR_FOLDER if (not folder_label) else STR_FILE))))
return
except OSError:
Print.error((ERROR_NSD % test_path))
return
else:
Server.set_model(CLF)
Server.set_testset([], [])
Server.serve(CLF, quiet=(not verbose))
_model
_args
def do_grid_search(self, args):
try:
(data_path, folder_label, def_cat, n_grams, k_fold, hparams, cache) = self.args_grid_search(args)
(x_data, y_data) = load_data(data_path, folder_label, cmd_name='grid_search')
Evaluation.grid_search(CLF, x_data, y_data, hparams['s'], hparams['l'], hparams['p'], hparams['a'], k_fold, n_grams, def_cat, data_path, cache=cache, extended_pbar=True)
Print.warn(("Suggestion: use the command 'plot %s' to visualize the results" % STR_EVALUATIONS))
print('\n')
except InvalidCategoryError:
Print.error((ERROR_ICN % def_cat))
except (ArgsParseError, LoadDataError):
pass
except KeyboardInterrupt:
Print.warn('Interrupted!\n')
Print.set_verbosity(VERBOSITY.VERBOSE)
_model
def do_evaluations(self, args):
try:
(cmd, data_path, method, def_cat, hparams) = self.args_evaluations(args)
except ArgsParseError:
return
if (cmd == STR_INFO):
Evaluation.show_best(data_path, method)
elif (cmd == STR_PLOT):
evaluation_plot()
elif (cmd == STR_SAVE):
evaluation_plot(open_browser=False)
elif (cmd == STR_REMOVE):
evaluation_remove(data_path, method, def_cat, hparams)
else:
Print.error((ERROR_UA % cmd))
_model
def do_classify(self, args):
try:
document = self.args_classify(args)
except ArgsParseError:
return
result = CLF.classify(document)
print()
print('SS3 prediction is:\n')
for (i, catinfo) in enumerate(result):
if catinfo[1]:
cat_result = (' %d. %s (confidence value is %.1f)' % ((i + 1), CLF.get_category_name(catinfo[0]).upper(), catinfo[1]))
if (i == 0):
cat_result = Print.style.bold(cat_result)
print(cat_result)
print()
_model
_args
def do_learn(self, args):
global ARGS_CATS
try:
(cat, n_grams, document) = self.args_learn(args)
if document.strip():
CLF.learn(document, cat, n_grams=n_grams)
ARGS_CATS = CLF.get_categories()
else:
Print.info('empty document')
except ArgsParseError:
pass
_model
def do_update(self, args):
CLF.update_values()
Print.warn("Remember to use the 'save' command if you want these changes to be permanently stored")
_model
def do_save(self, args):
try:
(arg, value) = self.args_save(args)
except ArgsParseError:
return
if (arg == STR_MODEL):
CLF.save_model()
elif (arg == STR_VOCABULARY):
if value:
try:
CLF.save_cat_vocab(value)
except InvalidCategoryError:
Print.error((ERROR_ICN % value))
else:
CLF.save_vocab()
elif (arg == STR_EVALUATIONS):
evaluation_plot(open_browser=False)
elif (arg == STR_STOPWORDS):
if value:
stopwords = CLF.get_stopwords(value)
else:
stopwords = CLF.get_stopwords()
if stopwords:
stopwords_file = (STOPWORDS_FILE % CLF.__name__)
with open(stopwords_file, 'w', encoding=ENCODING) as fstopws:
fstopws.write(u'\n'.join(stopwords))
Print.info(("stopwords saved in '%s'" % stopwords_file))
else:
Print.warn(WARN_NO_STOPWORDS)
_model
def do_info(self, args):
args = split_args(args)
if (args and (args[0] == STR_EVALUATIONS)):
Evaluation.show_best()
else:
CLF.print_model_info()
all_on = ((not args) or (args[0] == STR_ALL))
if (all_on or (args[0] == STR_PARAMETERS)):
CLF.print_hyperparameters_info()
if (all_on or (args[0] == STR_CATEGORIES)):
CLF.print_categories_info()
_model
_args
def do_debug_term(self, args):
args = args.strip('"\'')
if args:
CLF.print_ngram_info(args)
else:
Print.error((ERROR_WAN % (1, 0)))
_model
_args
def do_plot(self, args):
args = split_args(args)
if (args[0] == STR_DISTRIBUTION):
try:
CLF.plot_value_distribution(args[1])
except InvalidCategoryError:
Print.error((ERROR_ICN % args[1]))
return
except IndexError:
Print.error((ERROR_WAN % (2, 1)))
Print.warn(('Suggestion: add the category name (e.g. plot distribution %s)' % CLF.get_category_name(0)))
return
elif (args[0] == STR_EVALUATIONS):
evaluation_plot()
else:
Print.error((ERROR_UA % args[0]))
_model
_args
def do_set(self, args):
try:
hparams = self.args_set(args)
except ArgsParseError:
return
CLF.set_hyperparameters(hparams['s'], hparams['l'], hparams['p'], hparams['a'])
Print.warn("Remember to use the 'update' command to update the model")
_model
_args
def do_get(self, args):
args = split_args(args)
hp = args[0]
if (len(args) != 1):
Print.error((ERROR_WAN % (1, len(args))))
return
if (hp == STR_S):
value = CLF.get_smoothness()
elif (hp == STR_L):
value = CLF.get_significance()
elif (hp == STR_P):
value = CLF.get_sanction()
elif (hp == STR_A):
value = CLF.get_alpha()
else:
Print.error((ERROR_UA % hp))
return
print(Print.style.warning(value))
_model
_args
def do_next_word(self, args):
if (not args):
Print.error((ERROR_WAN % (1, 0)))
return
args = args.strip('"\'')
print()
for cat in CLF.get_categories():
next_w = [('%s%s' % (Print.style.green(w.upper()), Print.style.blue(('(%.1f%%)' % (P * 100))))) for (w, fr, P) in CLF.get_next_words(args, cat, 3)]
if next_w:
print((' %s: %s' % (Print.style.bold(cat), ' '.join(next_w))))
print()
def do_license(self, args):
print('The MIT License (MIT)\n\nCopyright (c) 2019 Sergio Burdisso (sergio.)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n')
def do_exit(self, args=''):
print('Bye')
if readline:
readline.set_history_length(HISTFILE_SIZE)
try:
readline.write_history_file(HISTFILE)
except IOError:
pass
raise SystemExit
def complete_info(self, text, line, begidx, endidx):
return [a for a in ARGS['info'] if a.startswith(text)]
def complete_save(self, text, line, begidx, endidx):
return [a for a in (ARGS['save'] + ARGS_CATS) if a.startswith(text)]
def complete_load(self, text, line, begidx, endidx):
return [a for a in ARGS['load'] if a.startswith(text)]
def complete_train(self, text, line, begidx, endidx):
return [a for a in ARGS['train'] if a.startswith(text)]
def complete_test(self, text, line, begidx, endidx):
return [a for a in ((ARGS['test'] + ARGS['train']) + ARGS_CATS) if a.startswith(text)]
def complete_live_test(self, text, line, begidx, endidx):
return [a for a in ARGS['live_test'] if a.startswith(text)]
def complete_learn(self, text, line, begidx, endidx):
return [a for a in (ARGS['learn'] + ARGS_CATS) if a.startswith(text)]
def complete_set(self, text, line, begidx, endidx):
return [a for a in ARGS['set'] if a.startswith(text)]
def complete_plot(self, text, line, begidx, endidx):
return [a for a in (ARGS['plot'] + ARGS_CATS) if a.startswith(text)]
def complete_grid_search(self, text, line, begidx, endidx):
return [a for a in (((ARGS['grid_search'] + ARGS['test']) + ARGS['train']) + ARGS_CATS) if a.startswith(text)]
def complete_evaluations(self, text, line, begidx, endidx):
return [a for a in ARGS['evaluations'] if a.startswith(text)]
def args_train(self, args):
args = split_args(args)
n_grams = 1
folder_label = True
if (not args):
Print.error((ERROR_WAN % (1, 0)), raises=ArgsParseError)
op_args = args[1:]
op_args_ix = []
folder_arg = intersect([STR_FILE, STR_FOLDER], op_args)
if folder_arg:
folder_label = (folder_arg[0] == STR_FOLDER)
op_args_ix.append(op_args.index(folder_arg[0]))
n_grams_arg = re_in(('(.+)-' + STR_NGRAMS), op_args)
if n_grams_arg:
op_args_ix.append(op_args.index(n_grams_arg.group(0)))
try:
n_grams = int(n_grams_arg.group(1))
if (n_grams <= 0):
raise Exception
except BaseException:
Print.error(ERROR_WNGRAM, raises=ArgsParseError)
unkown_args = subtract(range(len(op_args)), op_args_ix)
if (len(unkown_args) > 0):
Print.error(ERROR_WNAUA, raises=ArgsParseError)
return (args[0], folder_label, n_grams)
def args_test(self, args):
args = split_args(args)
def_cat = STR_MOST_PROBABLE
folder_label = True
cache = True
if (not CLF.get_categories()):
Print.error(ERROR_MNT, raises=ArgsParseError)
if (not args):
Print.error((ERROR_WAN % (1, 0)), raises=ArgsParseError)
op_args = args[1:]
op_args_ix = []
folder_arg = intersect([STR_FILE, STR_FOLDER], op_args)
if folder_arg:
folder_label = (folder_arg[0] == STR_FOLDER)
op_args_ix.append(op_args.index(folder_arg[0]))
if (STR_NO_CACHE in op_args):
cache = False
op_args_ix.append(op_args.index(STR_NO_CACHE))
(hparams, used_args_ix) = parse_hparams_args(op_args)
op_args_ix.extend(used_args_ix)
def_cat_arg = subtract(range(len(op_args)), op_args_ix)
if (len(def_cat_arg) == 1):
def_cat = op_args[def_cat_arg[0]]
elif (len(def_cat_arg) > 1):
Print.error((ERROR_WAN % ((len(op_args_ix) + 2), len(args))), raises=ArgsParseError)
return (args[0], folder_label, def_cat, hparams, cache)
def args_k_fold(self, args):
args = split_args(args)
def_cat = STR_MOST_PROBABLE
folder_label = True
cache = True
k_fold = 4
n_grams = 1
if (not CLF.get_categories()):
Print.error(ERROR_MNT, raises=ArgsParseError)
if (len(args) < 1):
Print.error((ERROR_WAN % (2, len(args))), raises=ArgsParseError)
op_args = args[1:]
op_args_ix = []
if (STR_NO_CACHE in op_args):
cache = False
op_args_ix.append(op_args.index(STR_NO_CACHE))
n_grams_arg = re_in(('(.+)-' + STR_NGRAMS), op_args)
if n_grams_arg:
op_args_ix.append(op_args.index(n_grams_arg.group(0)))
try:
n_grams = int(n_grams_arg.group(1))
if (n_grams <= 0):
raise Exception
except BaseException:
Print.error(ERROR_WNGRAM, raises=ArgsParseError)
k_fold_arg = re_in(('(.+)-' + STR_FOLD), op_args)
if k_fold_arg:
op_args_ix.append(op_args.index(k_fold_arg.group(0)))
try:
k_fold = int(k_fold_arg.group(1))
if (k_fold < 2):
raise Exception
except BaseException:
Print.error(ERROR_WKFOLD, raises=ArgsParseError)
folder_arg = intersect([STR_FILE, STR_FOLDER], op_args)
if folder_arg:
folder_label = (folder_arg[0] == STR_FOLDER)
op_args_ix.append(op_args.index(folder_arg[0]))
(hparams, used_args_ix) = parse_hparams_args(op_args)
op_args_ix.extend(used_args_ix)
def_cat_arg = subtract(range(len(op_args)), op_args_ix)
if (len(def_cat_arg) == 1):
def_cat = op_args[def_cat_arg[0]]
elif (len(def_cat_arg) > 1):
Print.error(ERROR_WNAUA, raises=ArgsParseError)
return (args[0], folder_label, def_cat, n_grams, k_fold, hparams, cache)
def args_grid_search(self, args):
args = split_args(args)
def_cat = STR_MOST_PROBABLE
folder_label = True
cache = True
hparams = {}
k_fold = 0
n_grams = len(CLF.__max_fr__[0])
if (not CLF.get_categories()):
Print.error(ERROR_MNT, raises=ArgsParseError)
if (len(args) < 2):
Print.error((ERROR_WAN % (2, len(args))), raises=ArgsParseError)
op_args = args[1:]
op_args_ix = []
if (STR_NO_CACHE in op_args):
cache = False
op_args_ix.append(op_args.index(STR_NO_CACHE))
n_grams_arg = re_in(('(.+)-' + STR_NGRAMS), op_args)
if n_grams_arg:
op_args_ix.append(op_args.index(n_grams_arg.group(0)))
try:
n_grams = int(n_grams_arg.group(1))
if (n_grams <= 0):
raise Exception
except BaseException:
Print.error(ERROR_WNGRAM, raises=ArgsParseError)
k_fold_arg = re_in(('(.+)-' + STR_FOLD), op_args)
if k_fold_arg:
op_args_ix.append(op_args.index(k_fold_arg.group(0)))
try:
k_fold = int(k_fold_arg.group(1))
if (k_fold < 2):
raise Exception
except BaseException:
Print.error(ERROR_WKFOLD, raises=ArgsParseError)
folder_arg = intersect([STR_FILE, STR_FOLDER], op_args)
if folder_arg:
folder_label = (folder_arg[0] == STR_FOLDER)
op_args_ix.append(op_args.index(folder_arg[0]))
(s, l, p, a) = CLF.get_hyperparameters()
no_hparams = True
for key_args in ((STR_S, s), (STR_L, l), (STR_P, p), (STR_A, a)):
(hp_str, h_v) = key_args
arg = intersect(key_args, op_args)
if arg:
argi = op_args.index(arg[0])
op_args_ix.extend([argi, (argi + 1)])
try:
hparams[hp_str] = eval(op_args[(argi + 1)])
except IndexError:
Print.error((ERROR_HVM % hp_str), raises=ArgsParseError)
except BaseException:
Print.error(("[python] error: the value for the hyperparameter '%s' is not valid" % hp_str), raises=ArgsParseError)
try:
hparams[hp_str] = [float(hparams[hp_str])]
except BaseException:
pass
try:
hparams[hp_str] = [float(v) for v in hparams[hp_str]]
except ValueError:
Print.error(("Wrong hyperparameter value type: Some of values for the hyperparameter '%s' are not numbers" % hp_str), raises=ArgsParseError)
no_hparams = False
else:
hparams[hp_str] = [h_v]
if no_hparams:
Print.error('hyperparameters missing: at least one hyperparameter value range must be given', raises=ArgsParseError)
def_cat_arg = subtract(range(len(op_args)), op_args_ix)
if (len(def_cat_arg) == 1):
def_cat = op_args[def_cat_arg[0]]
if (def_cat not in [STR_MOST_PROBABLE, STR_UNKNOWN]):
if (CLF.get_category_index(def_cat) == IDX_UNKNOWN_CATEGORY):
Print.error((ERROR_ICN % def_cat), raises=ArgsParseError)
return
elif (len(def_cat_arg) > 1):
Print.error(ERROR_WNAUA, raises=ArgsParseError)
return (args[0], folder_label, def_cat, n_grams, k_fold, hparams, cache)
def args_evaluations(self, args):
args = split_args(args)
(data_path, method, def_cat) = (None, None, None)
hparams = {}
cmd = (args[0] if args else STR_INFO)
if (cmd in [STR_REMOVE, STR_INFO]):
op_args = args[1:]
(hparams, used_args_ix) = parse_hparams_args(op_args, defaults=False)
k_fold_arg = re_in(('(.+)-' + STR_FOLD), op_args)
if k_fold_arg:
method = k_fold_arg.group(0)
elif (STR_TEST in op_args):
method = STR_TEST
if method:
used_args_ix.append(op_args.index(method))
free_args_ix = subtract(range(len(op_args)), used_args_ix)
if len(free_args_ix):
data_path = op_args[free_args_ix[0]]
if (len(free_args_ix) > 1):
def_cat = op_args[free_args_ix[1]]
elif (len(free_args_ix) > 2):
Print.error(ERROR_WNAUA, raises=ArgsParseError)
elif (len(args) > 1):
Print.error(ERROR_WNAUA)
Print.warn(('Suggestion: evaluations %s' % cmd), raises=ArgsParseError)
return (cmd, data_path, method, def_cat, hparams)
def args_classify(self, args):
args = args.strip('"\'')
if (not args):
Print.info(MSG_USER_INPUT_DOC)
document = ''.join(sys.stdin.readlines())
print('\n---')
else:
try:
with open(args, 'r', encoding=ENCODING) as fdoc:
document = fdoc.read()
except IOError:
Print.error((ERROR_NSF % args), raises=ArgsParseError)
return document
def args_live_test(self, args):
args = split_args(args)
path = None
verbose = False
folder_label = True
path_required = False
op_args = args
op_args_ix = []
folder_arg = intersect([STR_FILE, STR_FOLDER], op_args)
if folder_arg:
path_required = True
folder_label = (folder_arg[0] == STR_FOLDER)
op_args_ix.append(op_args.index(folder_arg[0]))
if (STR_VERBOSE in op_args):
verbose = True
op_args_ix.append(op_args.index(STR_VERBOSE))
path_ix = subtract(range(len(op_args)), op_args_ix)
if (len(path_ix) == 1):
path = op_args[path_ix[0]]
elif (len(path_ix) > 1):
Print.error((ERROR_UA % op_args[path_ix[1]]), raises=ArgsParseError)
elif ((len(path_ix) == 0) and path_required):
Print.error('A path must be given', raises=ArgsParseError)
return (path, folder_label, verbose)
def args_learn(self, args):
args = split_args(args)
n_grams = 1
doc_path = None
if (not args):
Print.error((ERROR_WAN % (1, 0)), raises=ArgsParseError)
if (args[0] not in CLF.get_categories()):
Print.error((ERROR_ICN % args[0]), raises=ArgsParseError)
op_args = args[1:]
op_args_ix = []
n_grams_arg = re_in(('(.+)-' + STR_NGRAMS), op_args)
if n_grams_arg:
op_args_ix.append(op_args.index(n_grams_arg.group(0)))
try:
n_grams = int(n_grams_arg.group(1))
if (n_grams <= 0):
raise Exception
except BaseException:
Print.error(ERROR_WNGRAM, raises=ArgsParseError)
def_cat_arg = subtract(range(len(op_args)), op_args_ix)
if (len(def_cat_arg) == 1):
doc_path = op_args[def_cat_arg[0]]
elif (len(def_cat_arg) > 1):
Print.error(ERROR_WNAUA, raises=ArgsParseError)
if (not doc_path):
Print.info(MSG_USER_INPUT_DOC)
document = ''.join(sys.stdin.readlines())
print()
else:
try:
with open(doc_path, 'r', encoding=ENCODING) as fdoc:
document = fdoc.read()
except IOError:
Print.error((ERROR_NSF % doc_path), raises=ArgsParseError)
return (args[0], n_grams, document)
def args_save(self, args):
args = split_args(args)
if ((not args) or (args[0] == STR_MODEL)):
return (STR_MODEL, None)
elif (args[0] == STR_VOCABULARY):
return (STR_VOCABULARY, (args[1] if (len(args) == 2) else ''))
elif (args[0] == STR_EVALUATIONS):
return (STR_EVALUATIONS, None)
elif (args[0] == STR_STOPWORDS):
threshold = None
if ((len(args) > 1) and args[1]):
try:
threshold = float(args[1])
except ValueError:
Print.error((ERROR_WAT % 'float'), raises=ArgsParseError)
return (STR_STOPWORDS, threshold)
else:
Print.error((ERROR_UA % args[0]), raises=ArgsParseError)
def args_set(self, args):
args = split_args(args)
if (not args):
Print.error((ERROR_AR % 'set'), raises=ArgsParseError)
(hparams, used_args_ix) = parse_hparams_args(args)
if (len(used_args_ix) != len(args)):
Print.error(ERROR_WNAUA, raises=ArgsParseError)
return hparams
def preloop(self):
if (readline and path.exists(HISTFILE)):
readline.read_history_file(HISTFILE)
def precmd(self, line):
if (line != 'EOF'):
line = line.lower()
else:
print('')
return line
def default(self, line):
Print.error(('Unknown command: %s' % line))
do_EOF = do_exit
complete_get = complete_set
complete_ld = complete_load
complete_sv = complete_save
complete_k_fold = complete_grid_search |
def cal_running_avg_loss(loss, running_avg_loss, decay=0.99):
if (running_avg_loss == 0):
return loss
else:
running_avg_loss = ((running_avg_loss * decay) + ((1 - decay) * loss))
return running_avg_loss |
_function
def cmunu1(mu, nu):
(q, t) = QQqt.gens()
for (i, val) in enumerate(nu._list):
if (val < mu._list[i]):
A = prod(((((t ** mu.leg_length(i, s)) - (q ** (mu.arm_length(i, s) + 1))) / ((t ** nu.leg_length(i, s)) - (q ** (nu.arm_length(i, s) + 1)))) for s in range(val)))
B = prod(((((q ** mu.arm_length(*s)) - (t ** (mu.leg_length(*s) + 1))) / ((q ** nu.arm_length(*s)) - (t ** (nu.leg_length(*s) + 1)))) for s in nu.cells() if (s[1] == val)))
return QQqt((A * B))
return QQqt(prod(((((q ** mu.arm_length(s, 0)) - (t ** (mu.leg_length(s, 0) + 1))) / ((q ** nu.arm_length(s, 0)) - (t ** (nu.leg_length(s, 0) + 1)))) for s in range(len(nu._list))))) |
def load_proto(fpath):
with open(fpath, 'rb') as f:
loaded = f.read()
model = base_pb2.ModelProto().FromString(loaded)
return model |
def coarse_model(F, bcs, J, y, u, p, config_ocsm):
return ocsm.CoarseModel(F, bcs, J, y, u, p, config=config_ocsm) |
_set_init
def func_set_import_onnx_config(config):
def handle_source_func_list(func):
source_func_list.append(func)
def handle_target_func_list(func, opset):
if opset.startswith('opset_'):
opset = opset[len('opset_'):]
target_func_list.append('{}{}'.format(func, opset))
def map_target_to_source(func_list):
_func_list = []
target_set = set(func_list)
for (nnabla_func, impl_funcs) in _onnx_func_info.items():
if (set(impl_funcs) <= target_set):
_func_list.append(nnabla_func)
return _func_list
source_func_list = []
target_func_list = []
with open(config, 'r') as f:
for func_decl in f.readlines():
func_decl = func_decl.strip().split('')
if func_decl[0].startswith(';'):
continue
elif (len(func_decl) == 1):
handle_source_func_list(func_decl[0])
elif (len(func_decl) == 2):
handle_target_func_list(func_decl[0], func_decl[1])
if ((not source_func_list) and (not target_func_list)):
print('WARNING: function list seems empty!')
return set()
if target_func_list:
func_list = map_target_to_source(target_func_list)
else:
func_list = source_func_list
return set(func_list) |
def test_ifloordiv():
value = 42
copy = proxy = tt.ObjectProxy(value)
value //= 3
proxy //= 3
assert (value == proxy)
assert (int in tt.UsageTraceNode.from_proxy(copy).children['__ifloordiv__'].arg_types[0]) |
class TriStageLRScheduler(LearningRateScheduler):
def __init__(self, optimizer, init_lr, peak_lr, final_lr, init_lr_scale, final_lr_scale, warmup_steps, total_steps):
assert isinstance(warmup_steps, int), 'warmup_steps should be inteager type'
assert isinstance(total_steps, int), 'total_steps should be inteager type'
super(TriStageLRScheduler, self).__init__(optimizer, init_lr)
self.init_lr *= init_lr_scale
self.final_lr = final_lr
self.peak_lr = peak_lr
self.warmup_steps = warmup_steps
self.hold_steps = (int((total_steps >> 1)) - warmup_steps)
self.decay_steps = int((total_steps >> 1))
self.warmup_rate = (((self.peak_lr - self.init_lr) / self.warmup_steps) if (self.warmup_steps != 0) else 0)
self.decay_factor = ((- math.log(final_lr_scale)) / self.decay_steps)
self.lr = self.init_lr
self.update_step = 0
def _decide_stage(self):
if (self.update_step < self.warmup_steps):
return (0, self.update_step)
offset = self.warmup_steps
if (self.update_step < (offset + self.hold_steps)):
return (1, (self.update_step - offset))
offset += self.hold_steps
if (self.update_step <= (offset + self.decay_steps)):
return (2, (self.update_step - offset))
offset += self.decay_steps
return (3, (self.update_step - offset))
def step(self):
(stage, steps_in_stage) = self._decide_stage()
if (stage == 0):
self.lr = (self.init_lr + (self.warmup_rate * steps_in_stage))
elif (stage == 1):
self.lr = self.peak_lr
elif (stage == 2):
self.lr = (self.peak_lr * math.exp(((- self.decay_factor) * steps_in_stage)))
elif (stage == 3):
self.lr = self.final_lr
else:
raise ValueError('Undefined stage')
self.set_lr(self.optimizer, self.lr)
self.update_step += 1
return self.lr |
class BaseMetric(ABC):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
def compute(self, y_true, y_pred): |
def run_bo_table1_tf(landscape, wt, problem_name, start_num):
alphabet = s_utils.DNAA
def _robustness(landscape: flexs.Landscape, make_explorer: Callable[([flexs.Model, float, str], flexs.Explorer)]):
results = []
for ss in [0.0, 0.5, 0.9, 1.0]:
print(f'Evaluating for robustness with model accuracy; signal_strength: {ss}')
model = flexs.Ensemble([baselines.models.NoisyAbstractModel(landscape, signal_strength=ss)], combine_with=(lambda x: x))
explorer = make_explorer(model, ss, tag=f'ss{ss}')
res = explorer.run(landscape, verbose=False)
results.append((ss, res))
return results
def make_explorer(model, ss, tag):
return baselines.explorers.BO(model=model, rounds=10, starting_sequence=wt, sequences_batch_size=sequences_batch_size, model_queries_per_batch=model_queries_per_batch, alphabet=alphabet, log_file=f'runs/bo/{problem_name}_start{start_num}_{tag}')
results = _robustness(landscape, make_explorer)
return results |
()
class FQEConfig(LearnableConfig):
learning_rate: float = 0.0001
optim_factory: OptimizerFactory = make_optimizer_field()
encoder_factory: EncoderFactory = make_encoder_field()
q_func_factory: QFunctionFactory = make_q_func_field()
batch_size: int = 100
gamma: float = 0.99
n_critics: int = 1
target_update_interval: int = 100
def create(self, device: DeviceArg=False) -> '_FQEBase':
raise NotImplementedError('Config object must be directly given to constructor')
def get_type() -> str:
return 'fqe' |
class Evaluator():
def __init__(self, case_sensitive=False):
self.case_sensitive = case_sensitive
self.get_edit_distance = editdistance.eval
self.anls_threshold = 0.5
self.total_accuracies = []
self.total_anls = []
self.best_accuracy = 0
self.best_epoch = 0
def get_metrics(self, gt_answers, preds, answer_types=None, update_global_metrics=True):
answer_types = (answer_types if (answer_types is not None) else ['string' for batch_idx in range(len(gt_answers))])
batch_accuracy = []
batch_anls = []
for batch_idx in range(len(preds)):
gt = [self._preprocess_str(gt_elm) for gt_elm in gt_answers[batch_idx]]
pred = self._preprocess_str(preds[batch_idx])
batch_accuracy.append(self._calculate_accuracy(gt, pred, answer_types[batch_idx]))
batch_anls.append(self._calculate_anls(gt, pred, answer_types[batch_idx]))
return {'accuracy': batch_accuracy, 'anls': batch_anls}
def get_retrieval_metric(self, gt_answer_page, pred_answer_page):
retrieval_precision = [(1 if (gt == pred) else 0) for (gt, pred) in zip(gt_answer_page, pred_answer_page)]
return retrieval_precision
def update_global_metrics(self, accuracy, anls, current_epoch):
if (accuracy > self.best_accuracy):
self.best_accuracy = accuracy
self.best_epoch = current_epoch
return True
else:
return False
def _preprocess_str(self, string):
if (not self.case_sensitive):
string = string.lower()
return string.strip()
def _calculate_accuracy(self, gt, pred, answer_type):
if (answer_type == 'not-answerable'):
return (1 if (pred in ['', 'none', 'NA', None, []]) else 0)
if ((pred == 'none') and (answer_type != 'not-answerable')):
return 0
for gt_elm in gt:
if (gt_elm == pred):
return 1
return 0
def _calculate_anls(self, gt, pred, answer_type):
if (len(pred) == 0):
return 0
if (answer_type == 'not-answerable'):
return (1 if (pred in ['', 'none', 'NA', None, []]) else 0)
if ((pred == 'none') and (answer_type != 'not-answerable')):
return 0
answers_similarity = [(1 - (self.get_edit_distance(gt_elm, pred) / max(len(gt_elm), len(pred)))) for gt_elm in gt]
max_similarity = max(answers_similarity)
anls = (max_similarity if (max_similarity >= self.anls_threshold) else 0)
return anls |
def pad_starts_stops(starts, stops, length):
outstarts = []
outstops = []
for x in starts:
outstarts.append(x)
for y in range(len(stops)):
outstops.append((starts[y] + length))
return (outstarts, outstops) |
def test_inductor_Q():
ind = Inductor(10, 'GHz', Q=1)
assert (ind.Q((- 1), 0) == 1)
assert (ind.Q(10, 12) == 1)
assert (ind.Q(0, 11) == 1)
ind = Inductor(10, 'GHz', Q=.0)
assert (ind.Q((- 1), 5) == .0)
assert (ind.Q(10, 6) == .0)
assert (ind.Q(0, 1) == .0)
ind = Inductor(10, 'GHz')
assert np.isclose(ind.Q(((2 * np.pi) * .0), 10), .0)
assert np.isclose(ind.Q(((2 * np.pi) * .0), 1), .0)
Q = (lambda omega, T: (omega ** 2))
ind = Inductor(10, 'GHz', Q=Q)
assert (ind.Q(2, 10) == 4) |
class DummyTransf(Transf):
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
self.timestamp_ = time.time()
return self |
class AverageMetric(NumericMetric):
def show(self):
return ('%.2f' % (1.0 * self.value())) |
class Adafactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, eps=1e-30, eps_scale=0.001, clip_threshold=1.0, decay_rate=(- 0.8), betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = (not lr)
if (warmup_init and (not relative_step)):
raise ValueError('warmup_init requires relative_step=True')
beta1 = (None if (betas is None) else betas[0])
defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01)
lr_t = min(min_step, (1.0 / math.sqrt(param_state['step'])))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = (lr_t * param_scale)
return param_group['lr']
def _get_options(param_group, param_shape):
factored = (len(param_shape) >= 2)
use_first_moment = (param_group['beta1'] is not None)
return (factored, use_first_moment)
def _rms(tensor):
return (tensor.norm(2) / (tensor.numel() ** 0.5))
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1), keepdim=True)).rsqrt_().unsqueeze((- 1))
c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt()
return torch.mul(r_factor, c_factor)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad
if (grad.dtype in {torch.float16, torch.bfloat16}):
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
(factored, use_first_moment) = self._get_options(group, grad.shape)
if (len(state) == 0):
state['step'] = 0
if use_first_moment:
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad.shape[:(- 1)]).to(grad)
state['exp_avg_sq_col'] = torch.zeros((grad.shape[:(- 2)] + grad.shape[(- 1):])).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_fp32 = p
if (p.dtype in {torch.float16, torch.bfloat16}):
p_fp32 = p_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_fp32)
lr_t = self._get_lr(group, state)
beta2t = (1.0 - math.pow(state['step'], group['decay_rate']))
update = ((grad ** 2) + group['eps'])
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t))
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t))
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t))
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(update, alpha=(1 - group['beta1']))
update = exp_avg
if (group['weight_decay'] != 0):
p_fp32.add_(p_fp32, alpha=((- group['weight_decay']) * lr_t))
p_fp32.add_((- update))
if (p.dtype in {torch.float16, torch.bfloat16}):
p.copy_(p_fp32)
return loss |
def write_cov(cpath, cov, cargs):
cfile = open(cpath, 'a')
for f in cov:
cfile.write(('File: %s\n' % f))
for ctype in sorted(cov[f]):
if (ctype == 'function'):
for val in sorted(cov[f][ctype]):
cfile.write((' %s: %s\n' % (ctype, val)))
elif (ctype == 'line'):
if cargs.coverage_include_lines:
for val in sorted(cov[f][ctype], key=int):
cfile.write((' %s: %s\n' % (ctype, val)))
cfile.close()
return |
def _get_global_builtins():
supported_builtins = ['print', 'tuple', 'float', 'int', 'bool', 'str', 'getattr', 'hasattr', 'isinstance', 'len', 'hex', 'oct', 'round', 'hash', 'min', 'max', 'abs', 'all', 'divmod', 'list', 'ord', 'chr', 'bin', 'range', 'zip', 'enumerate', 'sorted']
op_renames = {'bool': 'aten::Bool', 'int': 'aten::Int', 'float': 'aten::Float', 'abs': 'prim::abs', 'max': 'prim::max', 'min': 'prim::min', 'range': 'fake::does_not_exist'}
schemaless_op_explanations = {'print': 'Print any value', 'tuple': 'Lists cannot be converted to tuples with this method since their size is not statically known', 'getattr': 'Attribute name must be a literal string', 'hasattr': 'Attribute name must be a literal string', 'isinstance': 'Result is static', 'zip': 'Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.', 'enumerate': 'Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.', 'range': 'Can only be used as an iterator in a for loop'}
magic_methods = [('float', '__float__'), ('int', '__int__'), ('bool', '__bool__'), ('str', '__str__'), ('len', '__len__'), ('hex', '__hex__'), ('oct', '__oct__')]
magic_methods_rows = []
for (fn, magic_method) in magic_methods:
magic_methods_rows.append('"{}", "``{}``"'.format(fn, magic_method))
schematized_ops = []
schemaless_ops = []
for fn in supported_builtins:
op_name = 'aten::{}'.format(fn)
if (fn in op_renames):
op_name = op_renames[fn]
schemas = torch._C._jit_get_schemas_for_operator(op_name)
for s in schemas:
schematized_ops.append(_emit_schema(None, fn, s, padding=0))
if (len(schemas) > 0):
schematized_ops.append('')
else:
table_row = '":any:`{}`", "{}"'.format(fn, schemaless_op_explanations[fn])
schemaless_ops.append(table_row)
schematized_ops_str = '\n'.join(schematized_ops)
schemaless_ops_str = '\n'.join(schemaless_ops)
magic_methods_rows_str = '\n'.join(magic_methods_rows)
schematized_ops_str = textwrap.indent(schematized_ops_str, '\t')
schemaless_ops_str = textwrap.indent(schemaless_ops_str, '\t')
magic_methods_rows_str = textwrap.indent(magic_methods_rows_str, '\t')
section = '\nThe functions in the following table are supported but do not have a static schema\n\n.. csv-table::\n :header: "Function", "Note"\n\n{}\n\nThe following functions will use the corresponding magic method on :any:`TorchScript classes`\n\n.. csv-table::\n :header: "Function", "Magic Method"\n\n{}\n\nThese built-in functions use the schema\n\n.. rst-class:: codeblock-height-limiter\n\n::\n\n{}\n '.format(schemaless_ops_str, magic_methods_rows_str, schematized_ops_str)
return ('Python Built-in Functions', section) |
def register_Ns3PbbAddressBlockIpv4_methods(root_module, cls):
cls.add_constructor([param('ns3::PbbAddressBlockIpv4 const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_const=True, visibility='protected', is_virtual=True)
cls.add_method('GetAddressLength', 'uint8_t', [], is_const=True, visibility='protected', is_virtual=True)
cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True)
cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True)
return |
def kendall_top_k(a, b, k=None, p=0.5):
a = np.array(a)
b = np.array(b)
if (k is None):
k = a.size
if (a.size != b.size):
raise NameError('The two arrays need to have same lengths')
k = min(k, a.size)
a_top_k = np.argpartition(a, (- k))[(- k):]
b_top_k = np.argpartition(b, (- k))[(- k):]
common_items = np.intersect1d(a_top_k, b_top_k)
only_in_a = np.setdiff1d(a_top_k, common_items)
only_in_b = np.setdiff1d(b_top_k, common_items)
kendall = ((1 - ((stats.kendalltau(a[common_items], b[common_items])[0] / 2) + 0.5)) * (common_items.size ** 2))
if np.isnan(kendall):
kendall = 0
for i in common_items:
for j in only_in_a:
if (a[i] < a[j]):
kendall += 1
for j in only_in_b:
if (b[i] < b[j]):
kendall += 1
kendall += ((2 * p) * special.binom((k - common_items.size), 2))
kendall /= (((only_in_a.size + only_in_b.size) + common_items.size) ** 2)
return np.array([kendall, 1.0]) |
class GoalFollower(RandomAgent):
def __init__(self, success_distance, goal_sensor_uuid):
super().__init__(success_distance, goal_sensor_uuid)
self.pos_th = self.dist_threshold_to_stop
self.angle_th = float(np.deg2rad(15))
self.random_prob = 0
def normalize_angle(self, angle):
if (angle < (- pi)):
angle = ((2.0 * pi) + angle)
if (angle > pi):
angle = (((- 2.0) * pi) + angle)
return angle
def turn_towards_goal(self, angle_to_goal):
if ((angle_to_goal > pi) or ((angle_to_goal < 0) and (angle_to_goal > (- pi)))):
action = HabitatSimActions.TURN_RIGHT
else:
action = HabitatSimActions.TURN_LEFT
return action
def act(self, observations):
if self.is_goal_reached(observations):
action = HabitatSimActions.STOP
else:
angle_to_goal = self.normalize_angle(np.array(observations[self.goal_sensor_uuid][1]))
if (abs(angle_to_goal) < self.angle_th):
action = HabitatSimActions.MOVE_FORWARD
else:
action = self.turn_towards_goal(angle_to_goal)
return {'action': action} |
def test_train_gpt2():
dataset = TextDataset(DATASET_OTHER_EXAMPLE_DICT)
model = BaseModel.create('distilgpt2')
finetuning_config = model.finetuning_config()
finetuning_config.num_train_epochs = 1
model.finetune(dataset=dataset)
generation_config = model.generation_config()
generation_config.do_sample = False
generation_config.max_new_tokens = None
generation_config.top_k = 50
generation_config.top_p = 1.0
result = model.generate(dataset=dataset)
assert (len(result) == 2) |
class TFImageClassifierOutputWithNoAttention(ModelOutput):
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[(tf.Tensor, ...)]] = None |
def _get_ctx59_meta():
stuff_ids = [k['id'] for k in PASCAL_CTX_59_CATEGORIES]
assert (len(stuff_ids) == 59), len(stuff_ids)
stuff_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(stuff_ids)}
stuff_classes = [k['name'] for k in PASCAL_CTX_59_CATEGORIES]
ret = {'stuff_dataset_id_to_contiguous_id': stuff_dataset_id_to_contiguous_id, 'stuff_classes': stuff_classes}
return ret |
class WaveStream():
def __init__(self, filename, is_tmp=False):
self.is_tmp = None
self.file = open(filename, 'rb')
self.wave = wave.open(HackExtensibleWave(self.file))
self.smpsize = (self.wave.getnchannels() * self.wave.getsampwidth())
self.sample_rate = self.wave.getframerate()
self.nchannels = self.wave.getnchannels()
if (self.wave.getsampwidth() != 2):
raise NotImplementedError('wave stream currently only supports 16bit wav')
self.stream = self.gen_stream()
self.is_tmp = (filename if is_tmp else None)
def gen_stream(self):
num = (yield np.array([], dtype=np.int16))
if (not num):
num = 1024
while True:
dat = self.wave.readframes(num)
num = (yield np.frombuffer(dat, dtype=np.int16))
if (not num):
num = 1024
if (len(dat) < (num * self.smpsize)):
break
def __del__(self):
if self.is_tmp:
os.unlink(self.is_tmp) |
class TrainableSupportsPredictJointHasReparamSampler(TrainableSupportsPredictJoint, HasReparamSampler, Protocol):
pass |
def test_render_coverage_report(sample_report, tmp_path: Path):
report_path = (tmp_path / 'report.html')
render_coverage_report(sample_report, report_path, datetime.datetime(1970, 1, 1))
with report_path.open(encoding='utf-8', mode='r') as file:
content = file.readlines()
assert (content == ['<!DOCTYPE html>\n', '<html lang="en">\n', '<head>\n', ' <meta charset="UTF-8">\n', ' <title>Pynguin coverage report</title>\n', ' <style>\n', '\n', 'pre { line-height: 125%; }\n', 'td.linenos .normal { color: #586e75; background-color: #073642; padding-left: 5px; padding-right: 5px; }\n', 'span.linenos { color: #586e75; background-color: #073642; padding-left: 5px; padding-right: 5px; }\n', 'td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }\n', 'span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }\n', '.highlight .hll { background-color: #073642 }\n', '.highlight { background: #002b36; color: #839496 }\n', '.highlight .c { color: #586e75; font-style: italic } /* Comment */\n', '.highlight .err { color: #839496; background-color: #dc322f } /* Error */\n', '.highlight .esc { color: #839496 } /* Escape */\n', '.highlight .g { color: #839496 } /* Generic */\n', '.highlight .k { color: #859900 } /* Keyword */\n', '.highlight .l { color: #839496 } /* Literal */\n', '.highlight .n { color: #839496 } /* Name */\n', '.highlight .o { color: #586e75 } /* Operator */\n', '.highlight .x { color: #839496 } /* Other */\n', '.highlight .p { color: #839496 } /* Punctuation */\n', '.highlight .ch { color: #586e75; font-style: italic } /* Comment.Hashbang */\n', '.highlight .cm { color: #586e75; font-style: italic } /* Comment.Multiline */\n', '.highlight .cp { color: #d33682 } /* Comment.Preproc */\n', '.highlight .cpf { color: #586e75 } /* Comment.PreprocFile */\n', '.highlight .c1 { color: #586e75; font-style: italic } /* Comment.Single */\n', '.highlight .cs { color: #586e75; font-style: italic } /* Comment.Special */\n', '.highlight .gd { color: #dc322f } /* Generic.Deleted */\n', '.highlight .ge { color: #839496; font-style: italic } /* Generic.Emph */\n', '.highlight .ges { color: #839496; font-weight: bold; font-style: italic } /* Generic.EmphStrong */\n', '.highlight .gr { color: #dc322f } /* Generic.Error */\n', '.highlight .gh { color: #839496; font-weight: bold } /* Generic.Heading */\n', '.highlight .gi { color: #859900 } /* Generic.Inserted */\n', '.highlight .go { color: #839496 } /* Generic.Output */\n', '.highlight .gp { color: #268bd2; font-weight: bold } /* Generic.Prompt */\n', '.highlight .gs { color: #839496; font-weight: bold } /* Generic.Strong */\n', '.highlight .gu { color: #839496; text-decoration: underline } /* Generic.Subheading */\n', '.highlight .gt { color: #268bd2 } /* Generic.Traceback */\n', '.highlight .kc { color: #2aa198 } /* Keyword.Constant */\n', '.highlight .kd { color: #2aa198 } /* Keyword.Declaration */\n', '.highlight .kn { color: #cb4b16 } /* Keyword.Namespace */\n', '.highlight .kp { color: #859900 } /* Keyword.Pseudo */\n', '.highlight .kr { color: #859900 } /* Keyword.Reserved */\n', '.highlight .kt { color: #b58900 } /* Keyword.Type */\n', '.highlight .ld { color: #839496 } /* Literal.Date */\n', '.highlight .m { color: #2aa198 } /* Literal.Number */\n', '.highlight .s { color: #2aa198 } /* Literal.String */\n', '.highlight .na { color: #839496 } /* Name.Attribute */\n', '.highlight .nb { color: #268bd2 } /* Name.Builtin */\n', '.highlight .nc { color: #268bd2 } /* Name.Class */\n', '.highlight .no { color: #268bd2 } /* Name.Constant */\n', '.highlight .nd { color: #268bd2 } /* Name.Decorator */\n', '.highlight .ni { color: #268bd2 } /* Name.Entity */\n', '.highlight .ne { color: #268bd2 } /* Name.Exception */\n', '.highlight .nf { color: #268bd2 } /* Name.Function */\n', '.highlight .nl { color: #268bd2 } /* Name.Label */\n', '.highlight .nn { color: #268bd2 } /* Name.Namespace */\n', '.highlight .nx { color: #839496 } /* Name.Other */\n', '.highlight .py { color: #839496 } /* Name.Property */\n', '.highlight .nt { color: #268bd2 } /* Name.Tag */\n', '.highlight .nv { color: #268bd2 } /* Name.Variable */\n', '.highlight .ow { color: #859900 } /* Operator.Word */\n', '.highlight .pm { color: #839496 } /* Punctuation.Marker */\n', '.highlight .w { color: #839496 } /* Text.Whitespace */\n', '.highlight .mb { color: #2aa198 } /* Literal.Number.Bin */\n', '.highlight .mf { color: #2aa198 } /* Literal.Number.Float */\n', '.highlight .mh { color: #2aa198 } /* Literal.Number.Hex */\n', '.highlight .mi { color: #2aa198 } /* Literal.Number.Integer */\n', '.highlight .mo { color: #2aa198 } /* Literal.Number.Oct */\n', '.highlight .sa { color: #2aa198 } /* Literal.String.Affix */\n', '.highlight .sb { color: #2aa198 } /* Literal.String.Backtick */\n', '.highlight .sc { color: #2aa198 } /* Literal.String.Char */\n', '.highlight .dl { color: #2aa198 } /* Literal.String.Delimiter */\n', '.highlight .sd { color: #586e75 } /* Literal.String.Doc */\n', '.highlight .s2 { color: #2aa198 } /* Literal.String.Double */\n', '.highlight .se { color: #2aa198 } /* Literal.String.Escape */\n', '.highlight .sh { color: #2aa198 } /* Literal.String.Heredoc */\n', '.highlight .si { color: #2aa198 } /* Literal.String.Interpol */\n', '.highlight .sx { color: #2aa198 } /* Literal.String.Other */\n', '.highlight .sr { color: #cb4b16 } /* Literal.String.Regex */\n', '.highlight .s1 { color: #2aa198 } /* Literal.String.Single */\n', '.highlight .ss { color: #2aa198 } /* Literal.String.Symbol */\n', '.highlight .bp { color: #268bd2 } /* Name.Builtin.Pseudo */\n', '.highlight .fm { color: #268bd2 } /* Name.Function.Magic */\n', '.highlight .vc { color: #268bd2 } /* Name.Variable.Class */\n', '.highlight .vg { color: #268bd2 } /* Name.Variable.Global */\n', '.highlight .vi { color: #268bd2 } /* Name.Variable.Instance */\n', '.highlight .vm { color: #268bd2 } /* Name.Variable.Magic */\n', '.highlight .il { color: #2aa198 } /* Literal.Number.Integer.Long */\n', '\n', 'body{\n', ' color: #c9d1d9;\n', ' background: #0d1117;\n', ' font-family: monospace;\n', ' font-size: 16px;\n', '}\n', '\n', 'td.lines span{\n', ' display: block;\n', ' padding-right: 8px;\n', ' line-height: 125%;\n', '}\n', '\n', '.notCovered{\n', ' border-right: 5px solid darkred;\n', '}\n', '.partiallyCovered{\n', ' border-right: 5px solid orangered;\n', '}\n', '.fullyCovered{\n', ' border-right: 5px solid darkgreen;\n', '}\n', '.notRelevant{\n', ' border-right: 5px solid transparent;\n', '}\n', '\n', '</style>\n', '</head>\n', '<body>\n', "<h1>Pynguin coverage report for module 'cov_demo'</h1>\n", '<p>Achieved 37.50% branch coverage:\n', '1/2 branchless code objects covered.\n', '2/6 branches covered.</p>\n', '<p>Achieved 25.00% line coverage:\n', '2/8 lines covered. </p>\n', '<table>\n', ' <tbody>\n', ' <tr>\n', ' <td style="width: 40px; text-align: right;" class="lines">\n', ' <span class="partiallyCovered" title="1/2 branchless code objects covered; Line 1 covered">1</span>\n', ' <span class="notCovered" title="Line 2 not covered">2</span>\n', ' <span class="notRelevant">3</span>\n', ' <span class="notRelevant">4</span>\n', ' <span class="fullyCovered" title="Line 5 covered">5</span>\n', ' <span class="partiallyCovered" title="2/4 branches covered; Line 6 not covered">6</span>\n', ' <span class="notRelevant">7</span>\n', ' <span class="notRelevant">8</span>\n', ' <span class="notCovered" title="Line 9 not covered">9</span>\n', ' <span class="notCovered" title="0/2 branches covered; Line 10 not covered">10</span>\n', ' <span class="notCovered" title="Line 11 not covered">11</span>\n', ' <span class="notRelevant">12</span>\n', ' <span class="notCovered" title="Line 13 not covered">13</span>\n', ' </td>\n', ' <td style="width: 100%;"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">foo</span><span class="p">():</span>\n', ' <span class="k">pass</span>\n', '\n', '\n', '<span class="k">def</span> <span class="nf">baz</span><span class="p">():</span>\n', ' <span class="k">assert</span> <span class="mi">3</span> <span class="o">==</span> <span class="mi">5</span> <span class="ow">and</span> <span class="mi">3</span> <span class="o">==</span> <span class="o">-</span><span class="mi">3</span>\n', '\n', '\n', '<span class="k">def</span> <span class="nf">bar</span><span class="p">(</span><span class="n">x</span><span class="p">:</span> <span class="nb">int</span><span class="p">):</span>\n', ' <span class="k">if</span> <span class="n">x</span><span class="p">:</span>\n', ' <span class="k">return</span> <span class="mi">5</span>\n', ' <span class="k">else</span><span class="p">:</span>\n', ' <span class="k">return</span> <span class="mi">6</span>\n', '</pre></div>\n', '</td>\n', ' </tr>\n', ' </tbody>\n', '</table>\n', '<footer>\n', ' <p>Created at 1970-01-01 00:00:00</p>\n', '</footer>\n', '</body>\n', '</html>']) |
_mode('generic')
class GenericHyperparameterOptimizationReporter(HyperparameterOptimizationReporter):
def __init__(self, reference_date=None, output=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output = (output or sys.stdout)
self.reference_date = reference_date
self._trial_id = None
def report_objective(self, result):
json.dump(dict(result, objective=result[self.objective_key]), self.output)
def trial_id(self):
if (self._trial_id is None):
date = (self.reference_date or datetime.now())
self._trial_id = date.strftime(FORMAT_TIMESTAMP)
return self._trial_id |
def register_Ns3RrcAsn1Header_methods(root_module, cls):
cls.add_constructor([param('ns3::RrcAsn1Header const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetMessageType', 'int', [])
cls.add_method('BandwidthToEnum', 'int', [param('uint8_t', 'bandwidth')], is_const=True, visibility='protected')
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'bIterator')], is_pure_virtual=True, visibility='protected', is_virtual=True)
cls.add_method('DeserializeCellIdentification', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::CellIdentification *', 'ci'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeDrbToAddModList', 'ns3::Buffer::Iterator', [param('std::list< ns3::LteRrcSap::DrbToAddMod > *', 'drbToAddModLis'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeLogicalChannelConfig', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::LogicalChannelConfig *', 'logicalChannelConfig'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeMeasConfig', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::MeasConfig *', 'measConfig'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeMeasResults', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::MeasResults *', 'measResults'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeNonCriticalExtensionConfig', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::NonCriticalExtensionConfiguration *', 'nonCriticalExtension'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializePhysicalConfigDedicated', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::PhysicalConfigDedicated *', 'physicalConfigDedicated'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializePhysicalConfigDedicatedSCell', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::PhysicalConfigDedicatedSCell *', 'pcdsc'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializePlmnIdentity', 'ns3::Buffer::Iterator', [param('uint32_t *', 'plmnId'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeQoffsetRange', 'ns3::Buffer::Iterator', [param('int8_t *', 'qOffsetRange'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeRachConfigCommon', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::RachConfigCommon *', 'rachConfigCommon'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeRadioResourceConfigCommon', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::RadioResourceConfigCommon *', 'radioResourceConfigCommon'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeRadioResourceConfigCommonSCell', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::RadioResourceConfigCommonSCell *', 'rrccsc'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeRadioResourceConfigCommonSib', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::RadioResourceConfigCommonSib *', 'radioResourceConfigCommonSib'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeRadioResourceConfigDedicated', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::RadioResourceConfigDedicated *', 'radioResourceConfigDedicated'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeRadioResourceConfigDedicatedSCell', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::RadioResourceConfigDedicatedSCell *', 'rrcdsc'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSrbToAddModList', 'ns3::Buffer::Iterator', [param('std::list< ns3::LteRrcSap::SrbToAddMod > *', 'srbToAddModList'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSystemInformationBlockType1', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::SystemInformationBlockType1 *', 'systemInformationBlockType1'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSystemInformationBlockType2', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::SystemInformationBlockType2 *', 'systemInformationBlockType2'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeThresholdEutra', 'ns3::Buffer::Iterator', [param('ns3::LteRrcSap::ThresholdEutra *', 'thresholdEutra'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('EnumToBandwidth', 'uint8_t', [param('int', 'n')], is_const=True, visibility='protected')
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, visibility='protected', is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True, visibility='protected')
cls.add_method('PreSerialize', 'void', [], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, visibility='protected', is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('ns3::LteRrcSap::RadioResourceConfigDedicated', 'radioResourceConfigDedicated')], is_const=True, visibility='protected')
cls.add_method('SerializeDrbToAddModList', 'void', [param('std::list< ns3::LteRrcSap::DrbToAddMod >', 'drbToAddModList')], is_const=True, visibility='protected')
cls.add_method('SerializeLogicalChannelConfig', 'void', [param('ns3::LteRrcSap::LogicalChannelConfig', 'logicalChannelConfig')], is_const=True, visibility='protected')
cls.add_method('SerializeMeasConfig', 'void', [param('ns3::LteRrcSap::MeasConfig', 'measConfig')], is_const=True, visibility='protected')
cls.add_method('SerializeMeasResults', 'void', [param('ns3::LteRrcSap::MeasResults', 'measResults')], is_const=True, visibility='protected')
cls.add_method('SerializeNonCriticalExtensionConfiguration', 'void', [param('ns3::LteRrcSap::NonCriticalExtensionConfiguration', 'nonCriticalExtensionConfiguration')], is_const=True, visibility='protected')
cls.add_method('SerializePhysicalConfigDedicated', 'void', [param('ns3::LteRrcSap::PhysicalConfigDedicated', 'physicalConfigDedicated')], is_const=True, visibility='protected')
cls.add_method('SerializePhysicalConfigDedicatedSCell', 'void', [param('ns3::LteRrcSap::PhysicalConfigDedicatedSCell', 'pcdsc')], is_const=True, visibility='protected')
cls.add_method('SerializePlmnIdentity', 'void', [param('uint32_t', 'plmnId')], is_const=True, visibility='protected')
cls.add_method('SerializeQoffsetRange', 'void', [param('int8_t', 'qOffsetRange')], is_const=True, visibility='protected')
cls.add_method('SerializeRachConfigCommon', 'void', [param('ns3::LteRrcSap::RachConfigCommon', 'rachConfigCommon')], is_const=True, visibility='protected')
cls.add_method('SerializeRadioResourceConfigCommon', 'void', [param('ns3::LteRrcSap::RadioResourceConfigCommon', 'radioResourceConfigCommon')], is_const=True, visibility='protected')
cls.add_method('SerializeRadioResourceConfigCommonSCell', 'void', [param('ns3::LteRrcSap::RadioResourceConfigCommonSCell', 'rrccsc')], is_const=True, visibility='protected')
cls.add_method('SerializeRadioResourceConfigCommonSib', 'void', [param('ns3::LteRrcSap::RadioResourceConfigCommonSib', 'radioResourceConfigCommonSib')], is_const=True, visibility='protected')
cls.add_method('SerializeRadioResourceConfigDedicated', 'void', [param('ns3::LteRrcSap::RadioResourceConfigDedicated', 'radioResourceConfigDedicated')], is_const=True, visibility='protected')
cls.add_method('SerializeRadioResourceDedicatedSCell', 'void', [param('ns3::LteRrcSap::RadioResourceConfigDedicatedSCell', 'rrcdsc')], is_const=True, visibility='protected')
cls.add_method('SerializeSrbToAddModList', 'void', [param('std::list< ns3::LteRrcSap::SrbToAddMod >', 'srbToAddModList')], is_const=True, visibility='protected')
cls.add_method('SerializeSystemInformationBlockType1', 'void', [param('ns3::LteRrcSap::SystemInformationBlockType1', 'systemInformationBlockType1')], is_const=True, visibility='protected')
cls.add_method('SerializeSystemInformationBlockType2', 'void', [param('ns3::LteRrcSap::SystemInformationBlockType2', 'systemInformationBlockType2')], is_const=True, visibility='protected')
cls.add_method('SerializeThresholdEutra', 'void', [param('ns3::LteRrcSap::ThresholdEutra', 'thresholdEutra')], is_const=True, visibility='protected')
return |
def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time())
controller_num_aggregate = 20
controller_train_steps = 50
controller_bl_dec = 0.99
controller_entropy_weight = 0.0001
network.eval()
network.controller.train()
network.controller.zero_grad()
loader_iter = iter(xloader)
for step in range((controller_train_steps * controller_num_aggregate)):
try:
(inputs, targets) = next(loader_iter)
except:
loader_iter = iter(xloader)
(inputs, targets) = next(loader_iter)
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
data_time.update((time.time() - xend))
(log_prob, entropy, sampled_arch) = network.controller()
with torch.no_grad():
network.set_cal_mode('dynamic', sampled_arch)
(_, logits) = network(inputs)
(val_top1, val_top5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
val_top1 = (val_top1.view((- 1)) / 100)
reward = (val_top1 + (controller_entropy_weight * entropy))
if (prev_baseline is None):
baseline = val_top1
else:
baseline = (prev_baseline - ((1 - controller_bl_dec) * (prev_baseline - reward)))
loss = (((- 1) * log_prob) * (reward - baseline))
RewardMeter.update(reward.item())
BaselineMeter.update(baseline.item())
ValAccMeter.update((val_top1.item() * 100))
LossMeter.update(loss.item())
EntropyMeter.update(entropy.item())
loss = (loss / controller_num_aggregate)
loss.backward(retain_graph=True)
batch_time.update((time.time() - xend))
xend = time.time()
if (((step + 1) % controller_num_aggregate) == 0):
grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0)
GradnormMeter.update(grad_norm)
optimizer.step()
network.controller.zero_grad()
if ((step % print_freq) == 0):
Sstr = (('*Train-Controller* ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, (controller_train_steps * controller_num_aggregate)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter)
Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Wstr) + ' ') + Estr))
return (LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.