code stringlengths 101 5.91M |
|---|
def run(flags):
flags.device = None
flags.fixed_seed = None
if torch.cuda.is_available():
print('Using CUDA.')
flags.device = torch.device('cuda')
else:
print('Not using CUDA.')
flags.device = torch.device('cpu')
keys = ['episode_return', 'episode_step', 'episode_win', 'interactions', 'visited_states']
envs = flags.to_env.split(',')
stats = dict()
tmp_env = make_gym_env(envs[0])
model = PolicyNet(tmp_env.observation_space.shape, tmp_env.action_space.n, envs[0])
tmp_env.close()
for env_id in envs:
flags.env = env_id
if ('MiniGrid' in env_id):
flags.no_reward = False
else:
flags.no_reward = True
print(' ', env_id)
for seed in range(1, (flags.n_seeds + 1)):
flags.seed = seed
flags.run_id = seed
flags.xpid = ''
flags.savedir = ''
if flags.checkpoint:
checkpoint = torch.load(flags.checkpoint)
model.load_state_dict(checkpoint['actor_model_state_dict'])
model.share_memory()
stats.update({(env_id, seed): test_model(model, keys, flags)})
for env_id in envs:
recap = {k: 0 for k in keys}
for seed in range(1, (flags.n_seeds + 1)):
for k in keys:
recap[k] += (np.mean(stats[(env_id, seed)][k]) / flags.n_seeds)
print(env_id, recap)
print() |
class Tracker():
module: nn.Module
traced: List[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
has_not_submodules = ((len(list(m.modules())) == 1) or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d))
if has_not_submodules:
if (not isinstance(m, VanLayerScaling)):
self.traced.append(m)
def __call__(self, x: Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(x)
[x.remove() for x in self.handles]
return self
def parametrized(self):
return list(filter((lambda x: (len(list(x.state_dict().keys())) > 0)), self.traced)) |
def test_nonlinearity_init(pretrain_file):
model = build_model(pretrain_file, '--nonlinearity', 'relu')
run_forward_checks(model)
model = build_model(pretrain_file, '--nonlinearity', 'tanh')
run_forward_checks(model)
model = build_model(pretrain_file, '--nonlinearity', 'silu')
run_forward_checks(model) |
_converter_regitstry('sSGL')
def sSGL_converter(context: 'BM1688Context', reg: sSGL_reg):
(n, c, h, w) = (reg[f'res0_{d}'] for d in 'nchw')
opd0 = dict(address=reg.opd0_addr, dtype=DType(reg.opt_res0_prec), layout=Layout.stride)
res0 = dict(address=reg.res0_addr, dtype=DType(reg.opt_res0_prec), shape=(n, c, h, w), layout=Layout.stride)
opd1 = dict(address=reg.opd0_addr, dtype=(reg.opt_opd1_prec, 0), layout=Layout.compact)
opd0['shape'] = (1, c, reg.opd0_h, w)
if (reg.short_short_opd0_str == 3):
opd0['layout'] = Layout.T3
else:
opd0['layout'] = Layout.T4
rets = [res0]
assert (reg.tsk_eu_typ in [17, 18])
opd1_h = (reg.res0_h if (reg.tsk_eu_typ == 17) else reg.opd0_h)
opd1['shape'] = (n, c, opd1_h, 1)
res0['layout'] = Layout.T3
attr = dict(limit_enable=bool(reg.opt_opd3_const), fill_const=bool(reg.opt_opd2_const), const_value=reg.opd2_addr)
operands = [get_value(context, **x) for x in (opd0, opd1)]
results = [get_value(context, **x) for x in rets]
return (results, attr, operands) |
def videohandler(extension, data):
if (extension not in 'mp4 ogv mjpeg avi mov h264 mpg webm wmv'.split()):
return None
try:
import torchvision.io
except ImportError as e:
raise ModuleNotFoundError('Package `torchvision` is required to be installed for default video file loader.Please use `pip install torchvision` or `conda install torchvision -c pytorch`to install the package')
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f'file.{extension}')
with open(fname, 'wb') as stream:
stream.write(data)
return torchvision.io.read_video(fname) |
def writefile(body, fname):
out = open(fname, 'w')
for line in body:
out.write('{}\n'.format(line))
out.close() |
def test_interpolators_public_api():
assert (dir(pyhf.interpolators) == ['code0', 'code1', 'code2', 'code4', 'code4p']) |
def transformer(inputs, seq_lengths, head_size, num_heads, attn_dropout, ff_dropout, prepost_dropout, relu_hidden_size, special_attention, special_values):
with tf.name_scope('transformer_layer'):
mask = attention_bias_ignore_padding(seq_lengths)
with tf.variable_scope('self_attention'):
x = nn_utils.layer_norm(inputs)
(y, attn_weights) = multihead_attention(x, mask, num_heads, head_size, attn_dropout, special_attention, special_values)
x = tf.add(x, tf.nn.dropout(y, prepost_dropout))
with tf.variable_scope('ffnn'):
x = nn_utils.layer_norm(x)
y = conv_hidden_relu(x, relu_hidden_size, (num_heads * head_size), ff_dropout)
x = tf.add(x, tf.nn.dropout(y, prepost_dropout))
return x |
_end_docstrings(PIPELINE_INIT_ARGS)
class ImageToTextPipeline(Pipeline):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, 'vision')
self.check_model_type((TF_MODEL_FOR_VISION_2_SEQ_MAPPING if (self.framework == 'tf') else MODEL_FOR_VISION_2_SEQ_MAPPING))
def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None):
forward_kwargs = {}
if (generate_kwargs is not None):
forward_kwargs['generate_kwargs'] = generate_kwargs
if (max_new_tokens is not None):
if ('generate_kwargs' not in forward_kwargs):
forward_kwargs['generate_kwargs'] = {}
if ('max_new_tokens' in forward_kwargs['generate_kwargs']):
raise ValueError("'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter, please use only one")
forward_kwargs['generate_kwargs']['max_new_tokens'] = max_new_tokens
return ({}, forward_kwargs, {})
def __call__(self, images: Union[(str, List[str], 'Image.Image', List['Image.Image'])], **kwargs):
return super().__call__(images, **kwargs)
def preprocess(self, image):
image = load_image(image)
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
return model_inputs
def _forward(self, model_inputs, generate_kwargs=None):
if (generate_kwargs is None):
generate_kwargs = {}
inputs = model_inputs.pop(self.model.main_input_name)
model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs)
return model_outputs
def postprocess(self, model_outputs):
records = []
for output_ids in model_outputs:
record = {'generated_text': self.tokenizer.decode(output_ids, skip_special_tokens=True)}
records.append(record)
return records |
class BoxERFNet(nn.Sequential):
def __init__(self, n_classes=19, max_input_h=512, max_input_w=1024):
(h, w) = (max_input_h, max_input_w)
super().__init__(Downsampler(3, 16, 0.0), Downsampler(16, 64, 0.03), NonBottleneck1D(64, 0.03), BottleneckBoxConv(64, 4, (h // 4), (w // 4), 0.03), Downsampler(64, 128, 0.3), NonBottleneck1D(128, 0.3, 2), BottleneckBoxConv(128, 4, (h // 8), (w // 8), 0.3), NonBottleneck1D(128, 0.3, 4), BottleneckBoxConv(128, 4, (h // 8), (w // 8), 0.3), NonBottleneck1D(128, 0.3, 2), BottleneckBoxConv(128, 4, (h // 8), (w // 8), 0.3), NonBottleneck1D(128, 0.3, 4), BottleneckBoxConv(128, 4, (h // 8), (w // 8), 0.3), Upsampler(128, 64), NonBottleneck1D(64), Upsampler(64, 16), NonBottleneck1D(16), nn.ConvTranspose2d(16, (n_classes + 1), (3, 3), 2, 1, 1)) |
def process_file(in_tsv, out_json):
with open(in_tsv, 'r', encoding='utf8') as tsv:
lines = tsv.readlines()
res_list = []
for line in lines:
(one_text, one_label) = parse_one_instance(line)
if (one_text is None):
continue
one_dict = {'text': one_text, 'paraphrase': one_label}
res_list.append(one_dict)
with open(out_json, 'w') as output:
json.dump(res_list, output, indent=4) |
def print_performance(jasonfile, model_name='model_1', figsize=(5, 5)):
records = json.load(open(jasonfile, 'r'))
print(('\n' + model_name))
print(' train_best_loss: {}'.format(records['train_best_loss']))
print(' valid_best_loss: {}'.format(records['valid_best_loss']))
print(' test_best_loss: {}'.format(records['test_best_loss']))
fig = plt.figure(figsize=figsize)
plt.plot(np.arange(len(records['test_epoch_loss'])), np.array(records['test_epoch_loss']), linestyle='-.', color='b', label='test epoch loss')
plt.plot(np.arange(len(records['train_epoch_loss']), dtype=float), np.array(records['train_epoch_loss']), color='r', linestyle='-', label='train epoch loss')
plt.legend(loc='upper right')
plt.ylabel('epoch wise loss (average CE loss)')
plt.xlabel('epoch number') |
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.18', 'l_1': 'encoder.block.19', 'l_2': 'encoder.block.20'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
.parametrize('input_dim, output_dim, hidden_sizes', plain_settings)
def test_std_share_network_output_values(input_dim, output_dim, hidden_sizes):
module = GaussianMLPTwoHeadedModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = module(torch.ones(input_dim))
exp_mean = torch.full((output_dim,), (input_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
exp_variance = (input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item()
assert dist.mean.equal(exp_mean)
assert dist.variance.equal(torch.full((output_dim,), exp_variance, dtype=torch.float))
assert (dist.rsample().shape == (output_dim,)) |
def to_google_drive_download_url(view_url: str) -> str:
splits = view_url.split('/')
assert (splits[(- 1)] == 'view')
file_id = splits[(- 2)]
return f' |
class BaseDataset(object):
def get_imagedata_info(self, data):
(pids, cams) = ([], [])
for (_, pid, camid) in data:
pids += [pid]
cams += [camid]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_imgs = len(data)
return (num_pids, num_imgs, num_cams)
def get_videodata_info(self, data, return_tracklet_stats=False):
(pids, cams, tracklet_stats) = ([], [], [])
for (img_paths, pid, camid) in data:
pids += [pid]
cams += [camid]
tracklet_stats += [len(img_paths)]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_tracklets = len(data)
if return_tracklet_stats:
return (num_pids, num_tracklets, num_cams, tracklet_stats)
return (num_pids, num_tracklets, num_cams)
def print_dataset_statistics(self):
raise NotImplementedError |
class MOABBBrain(sb.Brain):
def init_model(self, model):
for mod in model.modules():
if hasattr(mod, 'weight'):
if (not ('Norm' in mod.__class__.__name__)):
init.xavier_uniform_(mod.weight, gain=1)
else:
init.constant_(mod.weight, 1)
if hasattr(mod, 'bias'):
if (mod.bias is not None):
init.constant_(mod.bias, 0)
def compute_forward(self, batch, stage):
inputs = batch[0].to(self.device)
if ((stage == sb.Stage.TRAIN) and hasattr(self.hparams, 'augment')):
(inputs, _) = self.hparams.augment(inputs.squeeze(3), lengths=torch.ones(inputs.shape[0], device=self.device))
inputs = inputs.unsqueeze(3)
if hasattr(self.hparams, 'normalize'):
inputs = self.hparams.normalize(inputs)
return self.modules.model(inputs)
def compute_objectives(self, predictions, batch, stage):
targets = batch[1].to(self.device)
N_augments = int((predictions.shape[0] / targets.shape[0]))
targets = torch.cat((N_augments * [targets]), dim=0)
loss = self.hparams.loss(predictions, targets, weight=torch.FloatTensor(self.hparams.class_weights).to(self.device))
if (stage != sb.Stage.TRAIN):
tmp_preds = torch.exp(predictions)
self.preds.extend(tmp_preds.detach().cpu().numpy())
self.targets.extend(batch[1].detach().cpu().numpy())
elif hasattr(self.hparams, 'lr_annealing'):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
return loss
def on_fit_start(self):
self.init_model(self.hparams.model)
self.init_optimizers()
in_shape = (((1,) + tuple(np.floor(self.hparams.input_shape[1:(- 1)]).astype(int))) + (1,))
model_summary = summary(self.hparams.model, input_size=in_shape)
with open(os.path.join(self.hparams.exp_dir, 'model.txt'), 'w') as text_file:
text_file.write(str(model_summary))
def on_stage_start(self, stage, epoch=None):
if (stage != sb.Stage.TRAIN):
self.preds = []
self.targets = []
def on_stage_end(self, stage, stage_loss, epoch=None):
if (stage == sb.Stage.TRAIN):
self.train_loss = stage_loss
else:
preds = np.array(self.preds)
y_pred = np.argmax(preds, axis=(- 1))
y_true = self.targets
self.last_eval_stats = {'loss': stage_loss}
for metric_key in self.hparams.metrics.keys():
self.last_eval_stats[metric_key] = self.hparams.metrics[metric_key](y_true=y_true, y_pred=y_pred)
if (stage == sb.Stage.VALID):
if hasattr(self.hparams, 'lr_annealing'):
(old_lr, new_lr) = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(stats_meta={'epoch': epoch, 'lr': old_lr}, train_stats={'loss': self.train_loss}, valid_stats=self.last_eval_stats)
else:
self.hparams.train_logger.log_stats(stats_meta={'epoch': epoch}, train_stats={'loss': self.train_loss}, valid_stats=self.last_eval_stats)
if (epoch == 1):
self.best_eval_stats = self.last_eval_stats
is_best = self.check_if_best(self.last_eval_stats, self.best_eval_stats, keys=[self.hparams.test_key])
is_last = (epoch > (self.hparams.number_of_epochs - self.hparams.avg_models))
if ((self.hparams.test_with == 'last') and is_last):
save_ckpt = True
elif ((self.hparams.test_with == 'best') and is_best):
save_ckpt = True
else:
save_ckpt = False
if save_ckpt:
(min_keys, max_keys) = ([], [])
if (self.hparams.test_key == 'loss'):
min_keys = [self.hparams.test_key]
else:
max_keys = [self.hparams.test_key]
meta = {}
for eval_key in self.last_eval_stats.keys():
if (eval_key != 'cm'):
meta[str(eval_key)] = float(self.last_eval_stats[eval_key])
self.checkpointer.save_and_keep_only(meta=meta, num_to_keep=self.hparams.avg_models, min_keys=min_keys, max_keys=max_keys)
elif (stage == sb.Stage.TEST):
self.hparams.train_logger.log_stats(stats_meta={'epoch loaded': self.hparams.epoch_counter.current}, test_stats=self.last_eval_stats)
if (self.hparams.avg_models > 1):
(min_keys, max_keys) = ([], [])
if (self.hparams.test_key == 'loss'):
min_keys = [self.hparams.test_key]
fake_meta = {self.hparams.test_key: 0.0, 'epoch': epoch}
else:
max_keys = [self.hparams.test_key]
fake_meta = {self.hparams.test_key: 1.1, 'epoch': epoch}
self.checkpointer.save_and_keep_only(meta=fake_meta, min_keys=min_keys, max_keys=max_keys, num_to_keep=1)
def on_evaluate_start(self, max_key=None, min_key=None):
super().on_evaluate_start()
ckpts = self.checkpointer.find_checkpoints(max_key=max_key, min_key=min_key)
ckpt = sb.utils.checkpoints.average_checkpoints(ckpts, recoverable_name='model', device=self.device)
self.hparams.model.load_state_dict(ckpt, strict=True)
self.hparams.model.eval()
def check_if_best(self, last_eval_stats, best_eval_stats, keys):
is_best = False
for key in keys:
if (key == 'loss'):
if (last_eval_stats[key] < best_eval_stats[key]):
is_best = True
best_eval_stats[key] = last_eval_stats[key]
break
elif (last_eval_stats[key] > best_eval_stats[key]):
is_best = True
best_eval_stats[key] = last_eval_stats[key]
break
return is_best |
class WindTemplate(object):
def __init__(self):
def update(self, t, position):
return np.array([0, 0, 0]) |
class DAU(nn.Module):
def __init__(self, n_feat, kernel_size=3, reduction=8, bias=False, bn=False, act=nn.PReLU(), res_scale=1):
super(DAU, self).__init__()
modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
self.body = nn.Sequential(*modules_body)
self.SA = spatial_attn_layer()
self.CA = ca_layer(n_feat, reduction, bias=bias)
self.conv1x1 = nn.Conv2d((n_feat * 2), n_feat, kernel_size=1, bias=bias)
def forward(self, x):
res = self.body(x)
sa_branch = self.SA(res)
ca_branch = self.CA(res)
res = torch.cat([sa_branch, ca_branch], dim=1)
res = self.conv1x1(res)
res += x
return res |
class PointNetDenseCls(nn.Module):
def __init__(self, k=2):
super(PointNetDenseCls, self).__init__()
self.k = k
self.feat = PointNetfeat(global_feat=False)
self.conv1 = torch.nn.Conv1d(1088, 512, 1)
self.conv2 = torch.nn.Conv1d(512, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 128, 1)
self.conv4 = torch.nn.Conv1d(128, self.k, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
(x, trans) = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2, 1).contiguous()
x = F.log_softmax(x, dim=(- 1))
return (x, trans) |
class mTEDx(Dataset):
SPLITS = ['train', 'valid', 'test']
LANGPAIRS = ['es-es', 'fr-fr', 'pt-pt', 'it-it', 'ru-ru', 'el-el', 'ar-ar', 'de-de', 'es-en', 'es-fr', 'es-pt', 'es-it', 'fr-en', 'fr-es', 'fr-pt', 'pt-en', 'pt-es', 'it-en', 'it-es', 'ru-en', 'el-en']
def __init__(self, root: str, lang: str, split: str) -> None:
assert ((split in self.SPLITS) and (lang in self.LANGPAIRS))
_root = (((Path(root) / f'{lang}') / 'data') / split)
(wav_root, txt_root) = ((_root / 'wav'), (_root / 'txt'))
assert (_root.is_dir() and wav_root.is_dir() and txt_root.is_dir())
try:
import yaml
except ImportError:
print('Please install PyYAML to load the Multilingual TEDx YAML files')
with open((txt_root / f'{split}.yaml')) as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
(src, tgt) = lang.split('-')
for _lang in [src, tgt]:
with open((txt_root / f'{split}.{_lang}')) as f:
utterances = [r.strip() for r in f]
assert (len(segments) == len(utterances))
for (i, u) in enumerate(utterances):
segments[i][_lang] = u
self.data = []
for (wav_filename, _seg_group) in groupby(segments, (lambda x: x['wav'])):
wav_filename = wav_filename.replace('.wav', '.flac')
wav_path = (wav_root / wav_filename)
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=(lambda x: float(x['offset'])))
for (i, segment) in enumerate(seg_group):
offset = int((float(segment['offset']) * sample_rate))
n_frames = int((float(segment['duration']) * sample_rate))
_id = f'{wav_path.stem}_{i}'
self.data.append((wav_path.as_posix(), offset, n_frames, sample_rate, segment[src], segment[tgt], segment['speaker_id'], tgt, _id))
def __getitem__(self, n: int) -> Tuple[(torch.Tensor, int, str, str, str, str, str)]:
(wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id) = self.data[n]
(waveform, _) = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return (waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id)
def __len__(self) -> int:
return len(self.data) |
class Conv2d(torch.nn.Conv2d):
def __init__(self, *args, **kwargs):
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if ((x.numel() == 0) and self.training):
assert (not isinstance(self.norm, torch.nn.SyncBatchNorm)), 'SyncBatchNorm does not support empty inputs!'
if ((x.numel() == 0) and (TORCH_VERSION <= (1, 4))):
assert (not isinstance(self.norm, torch.nn.GroupNorm)), 'GroupNorm does not support empty inputs in PyTorch <=1.4!'
output_shape = [((((i + (2 * p)) - ((di * (k - 1)) + 1)) // s) + 1) for (i, p, di, k, s) in zip(x.shape[(- 2):], self.padding, self.dilation, self.kernel_size, self.stride)]
output_shape = ([x.shape[0], self.weight.shape[0]] + output_shape)
empty = _NewEmptyTensorOp.apply(x, output_shape)
if self.training:
_dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0)
return (empty + _dummy)
else:
return empty
x = super().forward(x)
if (self.norm is not None):
x = self.norm(x)
if (self.activation is not None):
x = self.activation(x)
return x |
class SymbolicLogic():
def statement(self, s):
global vars, vars_order
(toks, vars, vars_order) = (['OPAREN'], {}, [])
tokenize(s, toks)
statement = [toks, vars, vars_order]
try:
eval(toks)
except (KeyError, RuntimeError):
print('Malformed Statement')
return []
return statement
def truthtable(self, statement, start=0, end=(- 1)):
global vars, vars_order
(toks, vars, vars_order) = statement
if (end == (- 1)):
end = (2 ** len(vars))
table = [statement]
keys = vars_order
for i in range(start, end):
j = 0
row = []
for key in reversed(keys):
bit = get_bit(i, j)
vars[key] = bit
j += 1
row.insert(0, bit)
row.append(eval(toks))
table.append(row)
return table
def print_table(self, table):
statement = table[0]
table = table[1:]
vars_order = statement[2].copy()
vars_len = []
line = s = ''
vars_order.append('value')
for var in vars_order:
vars_len.append(len(var))
s = (var + ' ')
while (len(s) < len('False ')):
s += ' '
s += '| '
line += s
print(line)
print((len(line) * '-'))
for row in table:
line = s = ''
i = 0
for e in row:
if (e == 'True'):
j = 2
else:
j = 1
s = (e + (' ' * j))
if (i < len(vars_len)):
while (len(s) <= vars_len[i]):
s += ' '
s += '| '
line += s
i += 1
print(line)
print('')
def combine(self, statement1, statement2):
toks = ((((['OPAREN'] + statement1[0]) + ['OR']) + statement2[0]) + ['CPAREN'])
variables = dict(statement1[1])
variables.update(statement2[1])
var_order = (statement1[2] + statement2[2])
return [toks, variables, var_order]
def simplify(self, table):
raise NotImplementedError
def prove(self, statement):
raise NotImplementedError |
class Proposer():
def __init__(self, model_name, template_path):
self.proposer_name = model_name
self.prompt_template = open(template_path).read().strip()
def preprocess_texts(self, x2score):
return [self.normalize(x) for x in sort_by_score(x2score)]
def create_prompt(self, A_block, B_block):
prompt = self.prompt_template.format(A_block=A_block, B_block=B_block)
return prompt
def propose_hypothesis(self, pos2score, neg2score, hyp_count, num_incontext_samples, temperature):
raise NotImplementedError
def normalize(self, x):
raise NotImplementedError |
.cpublas
.pure
def test_bert(sdfg_name, gpu):
batch_size = 2
seq_len = 512
hidden_size = 768
class BertTokenSoftmaxClf(nn.Module):
def __init__(self):
super(BertTokenSoftmaxClf, self).__init__()
self.bert = BertLayer(BertConfig(hidden_act='relu')).eval()
self.sm = nn.LogSoftmax(dim=(- 1))
def forward(self, x):
embs = self.bert(x)[0]
return self.sm(embs.sum(dim=(- 1)))
input = torch.randn([batch_size, seq_len, hidden_size])
labels = torch.tensor([0, 123], dtype=torch.long)
training_step(BertTokenSoftmaxClf(), BertTokenSoftmaxClf(), (input, labels), sdfg_name, gpu) |
class SparseGTMetrics(object):
def __init__(self):
self._rank_list = []
def observe(self, predicted_scores: torch.Tensor, target_ranks: torch.Tensor):
predicted_scores = predicted_scores.detach()
predicted_ranks = scores_to_ranks(predicted_scores)
(batch_size, num_rounds, num_options) = predicted_ranks.size()
predicted_ranks = predicted_ranks.view((batch_size * num_rounds), num_options)
target_ranks = target_ranks.view((batch_size * num_rounds)).long()
predicted_gt_ranks = predicted_ranks[(torch.arange((batch_size * num_rounds)), target_ranks)]
self._rank_list.extend(list(predicted_gt_ranks.cpu().numpy()))
def retrieve(self, reset: bool=True):
num_examples = len(self._rank_list)
if (num_examples > 0):
__rank_list = torch.tensor(self._rank_list).float()
metrics = {'': torch.mean((__rank_list <= 1).float()).item(), '': torch.mean((__rank_list <= 5).float()).item(), '': torch.mean((__rank_list <= 10).float()).item(), 'mean': torch.mean(__rank_list).item(), 'mrr': torch.mean(__rank_list.reciprocal()).item()}
else:
metrics = {}
if reset:
self.reset()
return metrics
def reset(self):
self._rank_list = [] |
def fully_conneted(x, units, use_bias=True, sn=False, name='fully_0', is_training=None):
x = tf.compat.v1.layers.flatten(x)
shape = x.get_shape().as_list()
channels = shape[(- 1)]
if sn:
w = tf.compat.v1.get_variable(f'{name}_kernel', [channels, units], tf.float32, initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform'))
if use_bias:
bias = tf.compat.v1.get_variable(f'{name}_bias', [units], initializer=tf.compat.v1.constant_initializer(0.0))
x = (tf.matmul(x, spectral_norm(w, name=name, is_training=is_training)) + bias)
else:
x = tf.matmul(x, spectral_norm(w, name=name, is_training=is_training))
else:
x = tf.compat.v1.layers.dense(x, units=units, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform'), use_bias=use_bias, name=name)
return x |
def test_from_jax_tolist():
jax_array_1d = jax.numpy.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
ak_jax_array_1d = ak.from_jax(jax_array_1d)
assert (ak.to_list(ak_jax_array_1d.layout) == [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) |
def Repeat(t, max=, ctx=None):
t = _to_tactic(t, ctx)
return Tactic(Z3_tactic_repeat(t.ctx.ref(), t.tactic, max), t.ctx) |
def test_ufunc_add_where1():
A = np.random.randint(1, 10, size=(1,), dtype=np.int32)
B = np.random.randint(1, 10, size=(1,), dtype=np.int32)
W = np.random.randint(2, size=(1,), dtype=np.bool_)
C = ufunc_add_where1(A, B, W)
if W[0]:
assert np.array_equal((A + B), C)
else:
assert (not np.array_equal((A + B), C)) |
class WebServer(Server):
__port: int
__index: str
def __init__(self):
super().__init__()
self.__port = 80
self.__index = '<h1>{nodeName} at {asn}</h1>'
def setPort(self, port: int) -> WebServer:
self.__port = port
return self
def setIndexContent(self, content: str) -> WebServer:
self.__index = content
return self
def install(self, node: Node):
node.addSoftware('nginx-light')
node.setFile('/var/www/html/index.html', self.__index.format(asn=node.getAsn(), nodeName=node.getName()))
node.setFile('/etc/nginx/sites-available/default', WebServerFileTemplates['nginx_site'].format(port=self.__port))
node.appendStartCommand('service nginx start')
node.appendClassName('WebService')
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'Web server object.\n'
return out |
def load_library(libname):
import sys
if sys.platform.startswith('win'):
from ctypes.util import find_library
lib_fname = find_library(libname)
if (lib_fname is None):
lib_fname = find_library(('lib' + libname))
else:
lib_fname = (('lib' + libname) + '.so')
lib = ctypes.cdll.LoadLibrary(lib_fname)
return lib |
class Sandpile(DiGraph):
def version():
print('Sage Sandpiles Version 2.4')
def help(verbose=True):
_sandpile_help(Sandpile, dedent(' For detailed help with any method FOO listed below,\n enter "Sandpile.FOO?" or enter "S.FOO?" for any Sandpile S.'), verbose=verbose)
def __init__(self, g, sink=None):
if isinstance(g, (Graph, DiGraph)):
name = g.name()
if (name == ''):
name = 'sandpile graph'
else:
p = name.lower().find('graph')
if (p == (- 1)):
name = (name + ' sandpile graph')
else:
name = ((name[:p] + 'sandpile graph') + name[(p + 5):])
self._name = name
else:
self._name = 'sandpile graph'
if (isinstance(g, dict) and isinstance(next(iter(g.values())), dict)):
pass
elif (isinstance(g, dict) and isinstance(next(iter(g.values())), list)):
processed_g = {i: dict(Counter(g[i])) for i in g}
g = processed_g
elif isinstance(g, (Graph, DiGraph)):
if (not g.weighted()):
h = g.to_dictionary(multiple_edges=True)
g = {i: dict(Counter(h[i])) for i in h}
else:
vi = {v: i for (i, v) in enumerate(g.vertices(sort=True))}
ad = g.weighted_adjacency_matrix()
g = {v: {w: ad[(vi[v], vi[w])] for w in g.neighbor_iterator(v)} for v in g}
else:
raise SyntaxError(g)
DiGraph.__init__(self, g, weighted=True)
self._dict = deepcopy(g)
vertices = self.vertices(sort=True)
if (sink is None):
self._sink = vertices[0]
self._sink_ind = 0
else:
self._sink = sink
self._sink_ind = vertices.index(sink)
self._nonsink_vertices = vertices
del self._nonsink_vertices[self._sink_ind]
self._laplacian = self.laplacian_matrix(indegree=False)
temp = list(range(self.num_verts()))
del temp[self._sink_ind]
self._reduced_laplacian = self._laplacian[(temp, temp)]
def __copy__(self):
return self.__class__(self, self._sink)
def __getattr__(self, name):
if (name not in self.__dict__):
if (name == '_max_stable'):
self._set_max_stable()
return deepcopy(self.__dict__[name])
if (name == '_max_stable_div'):
self._set_max_stable_div()
return deepcopy(self.__dict__[name])
elif (name == '_out_degrees'):
self._set_out_degrees()
return deepcopy(self.__dict__[name])
elif (name == '_in_degrees'):
self._set_in_degrees()
return deepcopy(self.__dict__[name])
elif ((name == '_burning_config') or (name == '_burning_script')):
self._set_burning_config()
return deepcopy(self.__dict__[name])
elif (name == '_identity'):
self._set_identity()
return deepcopy(self.__dict__[name])
elif (name == '_recurrents'):
self._set_recurrents()
return deepcopy(self.__dict__[name])
elif (name == '_min_recurrents'):
self._set_min_recurrents()
return deepcopy(self.__dict__[name])
elif (name == '_superstables'):
self._set_superstables()
return deepcopy(self.__dict__[name])
elif (name == '_group_gens'):
self._set_group_gens()
return deepcopy(self.__dict__[name])
elif (name == '_group_order'):
self.__dict__[name] = det(self._reduced_laplacian.dense_matrix())
return self.__dict__[name]
elif (name == '_invariant_factors'):
self._set_invariant_factors()
return deepcopy(self.__dict__[name])
elif (name == '_smith_form'):
self._set_smith_form()
return deepcopy(self.__dict__[name])
elif (name == '_jacobian_representatives'):
self._set_jacobian_representatives()
return deepcopy(self.__dict__[name])
elif (name == '_avalanche_polynomial'):
self._set_avalanche_polynomial()
return deepcopy(self.__dict__[name])
elif (name == '_stationary_density'):
self._set_stationary_density()
return self.__dict__[name]
elif (name == '_betti_complexes'):
self._set_betti_complexes()
return deepcopy(self.__dict__[name])
elif (name in ['_postulation', '_h_vector', '_hilbert_function']):
self._set_hilbert_function()
return deepcopy(self.__dict__[name])
elif ((name == '_ring') or (name == '_unsaturated_ideal')):
self._set_ring()
return self.__dict__[name]
elif (name == '_ideal'):
self._set_ideal()
return self.__dict__[name]
elif (name in ['_resolution', '_betti', '_singular_resolution']):
self._set_resolution()
return self.__dict__[name]
elif (name == '_groebner'):
self._set_groebner()
return self.__dict__[name]
elif (name == '_points'):
self._set_points()
return self.__dict__[name]
else:
raise AttributeError(name)
def __str__(self):
return self.name()
def _repr_(self):
return ((((self._name + ': ') + str(self.num_verts())) + ' vertices, sink = ') + str(self.sink()))
def show(self, **kwds):
if self.is_undirected():
Graph(self).show(**kwds)
else:
DiGraph(self).show(**kwds)
def show3d(self, **kwds):
if self.is_undirected():
Graph(self).show3d(**kwds)
else:
DiGraph(self).show3d(**kwds)
def dict(self):
return deepcopy(self._dict)
def sink(self):
return self._sink
def laplacian(self):
return deepcopy(self._laplacian)
def reduced_laplacian(self):
return deepcopy(self._reduced_laplacian)
def group_order(self):
return self._group_order
def _set_max_stable(self):
m = {v: (self.out_degree(v) - 1) for v in self._nonsink_vertices}
self._max_stable = SandpileConfig(self, m)
def max_stable(self):
return deepcopy(self._max_stable)
def _set_max_stable_div(self):
m = {v: (self.out_degree(v) - 1) for v in self.vertices(sort=False)}
self._max_stable_div = SandpileDivisor(self, m)
def max_stable_div(self):
return deepcopy(self._max_stable_div)
def _set_out_degrees(self):
self._out_degrees = {v: 0 for v in self.vertices(sort=False)}
for v in self.vertices(sort=False):
for e in self.edges_incident(v):
self._out_degrees[v] += e[2]
def out_degree(self, v=None):
if (v is not None):
return self._out_degrees[v]
return self._out_degrees
def _set_in_degrees(self):
self._in_degrees = {v: 0 for v in self}
for e in self.edge_iterator():
self._in_degrees[e[1]] += e[2]
def in_degree(self, v=None):
if (v is not None):
return self._in_degrees[v]
return self._in_degrees
def _set_burning_config(self):
d = self._reduced_laplacian.nrows()
burn = sum(self._reduced_laplacian)
script = ([1] * d)
done = False
while (not done):
bad = (- 1)
for i in range(d):
if (burn[i] < 0):
bad = i
break
if (bad == (- 1)):
done = True
else:
burn += self._reduced_laplacian[bad]
script[bad] += 1
b = iter(burn)
s = iter(script)
bc = {}
bs = {}
for v in self._nonsink_vertices:
bc[v] = next(b)
bs[v] = next(s)
self._burning_config = SandpileConfig(self, bc)
self._burning_script = SandpileConfig(self, bs)
def burning_config(self):
return deepcopy(self._burning_config)
def burning_script(self):
return deepcopy(self._burning_script)
def nonsink_vertices(self):
return self._nonsink_vertices
def all_k_config(self, k):
return SandpileConfig(self, ([k] * (self.num_verts() - 1)))
def zero_config(self):
return self.all_k_config(0)
def _set_identity(self):
m = self._max_stable
self._identity = ((m & m).dualize() & m)
def identity(self, verbose=True):
if verbose:
return deepcopy(self._identity)
else:
return self._identity.values()
def _set_recurrents(self):
if (self.name() == 'Complete sandpile graph'):
n = self.num_verts()
self._recurrents = [SandpileConfig(self, [((n - 1) - i) for i in p]) for p in ParkingFunctions((n - 1))]
elif (self.name() == 'Cycle sandpile graph'):
n = self.num_verts()
one = ([1] * (n - 2))
self._recurrents = ([SandpileConfig(self, ([1] * (n - 1)))] + [SandpileConfig(self, ((one[:i] + [0]) + one[i:])) for i in range((n - 1))])
else:
self._recurrents = []
active = [self._max_stable]
while active:
c = active.pop()
self._recurrents.append(c)
for v in self._nonsink_vertices:
cnext = deepcopy(c)
cnext[v] += 1
cnext = (~ cnext)
if ((cnext not in active) and (cnext not in self._recurrents)):
active.append(cnext)
self._recurrents = self._recurrents
def recurrents(self, verbose=True):
if verbose:
return deepcopy(self._recurrents)
else:
return [r.values() for r in self._recurrents]
def _set_superstables(self):
self._superstables = [c.dualize() for c in self._recurrents]
def superstables(self, verbose=True):
if verbose:
return deepcopy(self._superstables)
else:
return [s.values() for s in self._superstables]
def _set_group_gens(self):
(D, U, _) = self.reduced_laplacian().transpose().smith_form()
F = U.inverse()
self._group_gens = [SandpileConfig(self, [Integer(j) for j in F.column(i)]).equivalent_recurrent() for i in range(F.nrows()) if (D[i][i] != 1)]
def group_gens(self, verbose=True):
if verbose:
return deepcopy(self._group_gens)
else:
return [c.values() for c in self._group_gens]
def genus(self):
if self.is_undirected():
return (((self.laplacian().trace() / 2) - self.num_verts()) + 1)
else:
raise UserWarning('The underlying graph must be undirected.')
def is_undirected(self):
return self.laplacian().is_symmetric()
def _set_min_recurrents(self):
if self.is_undirected():
m = min([r.deg() for r in self.recurrents()])
rec = [r for r in self.recurrents() if (r.deg() == m)]
else:
rec = list(self.recurrents())
for r in self.recurrents():
if exists(rec, (lambda x: (r > x)))[0]:
rec.remove(r)
self._min_recurrents = rec
def min_recurrents(self, verbose=True):
if verbose:
return deepcopy(self._min_recurrents)
else:
return [r.values() for r in self._min_recurrents]
def max_superstables(self, verbose=True):
result = [r.dualize() for r in self.min_recurrents()]
if verbose:
return result
else:
return [r.values() for r in result]
def tutte_polynomial(self):
if self.is_undirected():
return Graph(self).tutte_polynomial()
else:
raise UserWarning('The underlying graph must be undirected.')
def _set_avalanche_polynomial(self):
n = (self.num_verts() - 1)
R = PolynomialRing(QQ, 'x', n)
A = R(0)
V = []
for i in range(n):
c = self.zero_config()
c[self.nonsink_vertices()[i]] += 1
V.append(c)
for r in self.recurrents():
for i in range(n):
e = tuple((r + V[i]).stabilize(True)[1].values())
A += R({e: 1})
self._avalanche_polynomial = A
def avalanche_polynomial(self, multivariable=True):
if multivariable:
return deepcopy(self._avalanche_polynomial)
X = self._avalanche_polynomial.parent().gens()
return self._avalanche_polynomial.subs({X[i]: X[0] for i in range(1, (self.num_verts() - 1))})
def nonspecial_divisors(self, verbose=True):
if self.is_undirected():
result = []
for s in self.max_superstables():
D = dict(s)
D[self._sink] = (- 1)
D = SandpileDivisor(self, D)
result.append(D)
if verbose:
return result
else:
return [r.values() for r in result]
else:
raise UserWarning('The underlying graph must be undirected.')
def canonical_divisor(self):
if self.is_undirected():
return SandpileDivisor(self, [(self.laplacian()[i][i] - 2) for i in range(self.num_verts())])
raise UserWarning('Only for undirected graphs.')
def _set_invariant_factors(self):
A = self.reduced_laplacian().dense_matrix()
self._invariant_factors = A.elementary_divisors()
def invariant_factors(self):
return deepcopy(self._invariant_factors)
def _set_hilbert_function(self):
v = [i.deg() for i in self._superstables]
self._postulation = max(v)
self._h_vector = [v.count(i) for i in range((self._postulation + 1))]
self._hilbert_function = [1]
for i in range(self._postulation):
self._hilbert_function.append((self._hilbert_function[i] + self._h_vector[(i + 1)]))
def h_vector(self):
return deepcopy(self._h_vector)
def hilbert_function(self):
return deepcopy(self._hilbert_function)
def postulation(self):
return self._postulation
def _set_smith_form(self):
self._smith_form = self.laplacian().transpose().smith_form()
def smith_form(self):
return deepcopy(self._smith_form)
def reorder_vertices(self):
distance_to_sink = self.reverse().shortest_path_lengths(self._sink)
verts = sorted(self, key=(lambda v: distance_to_sink[v]), reverse=True)
perm = {v: i for (i, v) in enumerate(verts)}
old = self.dict()
new = {}
for i in old:
entry = {}
for j in old[i]:
entry[perm[j]] = old[i][j]
new[perm[i]] = entry
return Sandpile(new, (len(verts) - 1))
def _set_jacobian_representatives(self):
if self.is_undirected():
easy = True
else:
ker = self.laplacian().left_kernel().basis()
tau = abs(ker[self._sink_ind])
easy = bool((tau == 1))
if easy:
result = []
for r in self.superstables():
D = {v: r[v] for v in self._nonsink_vertices}
D[self._sink] = (- r.deg())
result.append(SandpileDivisor(self, D))
self._jacobian_representatives = result
else:
result = []
sr = self.superstables()
order = (self.group_order() / tau)
while (len(result) < order):
r = sr.pop()
active = {v: r[v] for v in self._nonsink_vertices}
active[self._sink] = (- r.deg())
active = SandpileDivisor(self, active)
repeated = False
for D in result:
if active.is_linearly_equivalent(D):
repeated = True
break
if (not repeated):
result.append(active)
self._jacobian_representatives = result
def jacobian_representatives(self, verbose=True):
if verbose:
return deepcopy(self._jacobian_representatives)
else:
return [D.values() for D in self._jacobian_representatives]
def picard_representatives(self, d, verbose=True):
D = self.zero_div()
D[self._sink] = d
if verbose:
return [(E + D) for E in self._jacobian_representatives]
else:
return [(E + D).values() for E in self._jacobian_representatives]
def stable_configs(self, smax=None):
if (smax is None):
smax = self.max_stable().values()
else:
c = SandpileConfig(self, smax)
if (not (c <= self.max_stable())):
smax = [min(c[v], self.max_stable()[v]) for v in self.nonsink_vertices()]
else:
smax = c.values()
for c in IntegerVectorsIterator(smax):
(yield SandpileConfig(self, c))
def markov_chain(self, state, distrib=None):
st = deepcopy(state)
V = self.vertices(sort=True)
n = len(V)
if isinstance(st, list):
if (len(st) == (self.num_verts() - 1)):
st = SandpileConfig(self, st)
elif (len(st) == self.num_verts()):
st = SandpileDivisor(self, st)
else:
raise SyntaxError(state)
if (distrib is None):
distrib = ([(QQ.one() / n)] * n)
X = GeneralDiscreteDistribution(distrib)
if isinstance(st, SandpileConfig):
while True:
i = X.get_random_element()
if (V[i] != self.sink()):
st[V[i]] += 1
st = st.stabilize()
(yield st)
elif isinstance(st, SandpileDivisor):
alive = st.is_alive()
while True:
i = X.get_random_element()
st[V[i]] += 1
if alive:
(yield st)
else:
if st.is_alive():
alive = True
else:
st = st.stabilize()
(yield st)
else:
raise SyntaxError(state)
def _set_stationary_density(self):
if (self.name() == 'Complete sandpile graph'):
n = Integer(self.num_verts())
self._stationary_density = ((((n + (QQ.one() / n)) + sum(((falling_factorial(n, i) / (n ** i)) for i in range(1, (n + 1))))) - 3) / 2)
elif (self.is_undirected() and ('_h_vector' not in self.__dict__)):
t = Graph(self).tutte_polynomial().subs(x=1)
myR = PolynomialRing(QQ, 'y')
y = myR.gens()[0]
t = myR(t)
dt = derivative(t, y).subs(y=1)
t = t.subs(y=1)
self._stationary_density = (((self.num_edges() / 2) + (dt / t)) / self.num_verts())
else:
sink_deg = self.out_degree(self.sink())
h = vector(ZZ, self.h_vector())
m = self.max_stable().deg()
d = vector(ZZ, range(m, (m - len(h)), (- 1)))
self._stationary_density = ((((h * d) / self.group_order()) + sink_deg) / self.num_verts())
def stationary_density(self):
return self._stationary_density
def all_k_div(self, k):
return SandpileDivisor(self, ([k] * self.num_verts()))
def zero_div(self):
return self.all_k_div(0)
def _set_betti_complexes(self):
results = []
verts = self.vertices(sort=True)
r = self.recurrents()
for D in r:
d = D.deg()
D = dict(D)
D[self.sink()] = (- d)
D = SandpileDivisor(self, D)
test = True
while test:
D[self.sink()] += 1
complex = D.Dcomplex()
if (sum(complex.betti().values()) > 1):
results.append([deepcopy(D), complex])
if ((len(complex.maximal_faces()) == 1) and (list(complex.maximal_faces()[0]) == verts)):
test = False
self._betti_complexes = results
def betti_complexes(self):
return deepcopy(self._betti_complexes)
def _set_ring(self):
distance_to_sink = self.reverse().shortest_path_lengths(self._sink)
verts = sorted(self, key=(lambda v: distance_to_sink[v]))
vertex_to_int = {v: i for (i, v) in enumerate(self.vertices(sort=True))}
names = [vertex_to_int[v] for v in reversed(verts)]
vars = ''
for i in names:
vars += (('x' + str(i)) + ',')
vars = vars[:(- 1)]
self._ring = PolynomialRing(QQ, vars)
gens = []
for i in self.nonsink_vertices():
new_gen = ('x' + str(vertex_to_int[i]))
new_gen += ('^' + str(self.out_degree(i)))
new_gen += '-'
for j in self._dict[i]:
new_gen += ('x' + str(vertex_to_int[j]))
new_gen += (('^' + str(self._dict[i][j])) + '*')
new_gen = new_gen[:(- 1)]
gens.append(new_gen)
self._unsaturated_ideal = self._ring.ideal(gens)
def _set_ideal(self):
from sage.libs.singular.function_factory import ff
try:
sat = ff.elim__lib.sat_with_exp
except NameError:
sat = ff.elim__lib.sat
R = self.ring()
I = self._unsaturated_ideal
I_sat_gens = sat(I, prod(R.gens()))[0]
self._ideal = R.ideal(I_sat_gens)
def unsaturated_ideal(self):
return self._unsaturated_ideal
def ideal(self, gens=False):
if gens:
return self._ideal.gens()
else:
return self._ideal
def ring(self):
return self._ring
def _set_resolution(self):
res = self.ideal()._singular_().mres(0)
self._betti = ([1] + [len(x) for x in res])
result = []
zero = (self._ring.gens()[0] * 0)
for i in range(1, (len(res) + 1)):
syz_mat = []
new = [res[i][j] for j in range(1, (int(res[i].size()) + 1))]
for j in range(self._betti[i]):
row = new[j].transpose().sage_matrix(self._ring)
row = [r for r in row[0]]
if (len(row) < self._betti[(i - 1)]):
row += ([zero] * (self._betti[(i - 1)] - len(row)))
syz_mat.append(row)
syz_mat = matrix(self._ring, syz_mat).transpose()
result.append(syz_mat)
self._resolution = result
self._singular_resolution = res
def resolution(self, verbose=False):
if verbose:
return self._resolution
else:
r = [('R^' + str(i)) for i in self._betti]
return ' <-- '.join(r)
def _set_groebner(self):
self._groebner = self._ideal.groebner_basis()
def groebner(self):
return self._groebner
def betti(self, verbose=True):
if verbose:
print(singular.eval(('print(betti(%s), "betti")' % self._singular_resolution.name())))
else:
return self._betti
def solve(self):
singular.setring(self._ring._singular_())
v = [singular.var(i) for i in range(1, int(singular.nvars(self._ring)))]
vars_ = '({})'.format(','.join((str(i) for i in v)))
L = singular.subst(self._ideal, singular.var(singular.nvars(self._ring)), 1)
_ = singular.ring(0, vars_, 'lp')
K = singular.fetch(self._ring, L)
K = singular.groebner(K)
singular.LIB('solve.lib')
M = K.solve(5, 1)
singular.setring(M)
sol = singular('SOL').sage_structured_str_list()
sol = sol[0][0]
sol = [[SR(j) for j in k] for k in sol]
return sol
def _set_points(self):
L = self._reduced_laplacian.transpose().dense_matrix()
n = (self.num_verts() - 1)
(D, U, V) = L.smith_form()
self._points = []
one = ([1] * n)
twopii = ((2 * pi) * I)
for k in range(n):
x = [exp(((twopii * U[(k, t)]) / D[(k, k)])) for t in range(n)]
if ((x not in self._points) and (x != one)):
self._points.append(x)
def points(self):
return self._points
def symmetric_recurrents(self, orbits):
sym_recurrents = []
active = [self._max_stable]
while active:
c = active.pop()
sym_recurrents.append(c)
for orb in orbits:
cnext = deepcopy(c)
for v in orb:
cnext[v] += 1
cnext = cnext.stabilize()
if ((cnext not in active) and (cnext not in sym_recurrents)):
active.append(cnext)
return deepcopy(sym_recurrents) |
def check_list_path_option(options):
if (options.path and (options.user or options.local)):
raise CommandError("Cannot combine '--path' with '--user' or '--local'") |
def sorted_stage_to_device_map(n_partitions, stages_on_same_gpu):
pipeline_representation_stage_to_device_map = list()
for stage_id in range(n_partitions):
seen_devices = set()
if (stage_id in stages_on_same_gpu):
device_id = min(stages_on_same_gpu[stage_id])
else:
device_id = len(seen_devices)
seen_devices.add(device_id)
pipeline_representation_stage_to_device_map.append(device_id)
tmp = sorted(set(pipeline_representation_stage_to_device_map))
tmp = {v: i for (i, v) in enumerate(tmp)}
pipeline_representation_stage_to_device_map = [tmp[i] for i in pipeline_representation_stage_to_device_map]
return pipeline_representation_stage_to_device_map |
def setup_module():
Image = pytest.importorskip('PIL.Image')
global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix='scikit_learn_lfw_test_')
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix='scikit_learn_empty_test_')
if (not os.path.exists(LFW_HOME)):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if (not os.path.exists(folder_name)):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, (name + ('_%04d.jpg' % i)))
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
img = Image.fromarray(uniface.astype(np.uint8))
img.save(file_path)
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(b'Text file to be ignored by the dataset loader.')
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(b'10\n')
more_than_two = [name for (name, count) in counts.items() if (count >= 2)]
for i in range(5):
name = random_state.choice(more_than_two)
(first, second) = random_state.sample(range(counts[name]), 2)
f.write(('%s\t%d\t%d\n' % (name, first, second)).encode())
for i in range(5):
(first_name, second_name) = random_state.sample(FAKE_NAMES, 2)
first_index = np_rng.choice(np.arange(counts[first_name]))
second_index = np_rng.choice(np.arange(counts[second_name]))
f.write(('%s\t%d\t%s\t%d\n' % (first_name, first_index, second_name, second_index)).encode())
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(b"Fake place holder that won't be tested")
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(b"Fake place holder that won't be tested") |
def main(args):
(model, device, confidence_estimators, estimator_filenames, ned_model) = init(args)
server = Server(args, model, device, confidence_estimators, estimator_filenames, ned_model)
server.run() |
def MSD_processor(msd_path):
meta_path = os.path.join(msd_path, 'track_metadata.db')
lastfm_path = os.path.join(msd_path, 'lastfm_annotation')
allmusic_path = os.path.join(msd_path, 'allmusic_annotation')
msd500_path = os.path.join(msd_path, 'msd500_annotation')
cals_path = os.path.join(msd_path, 'cals_annotation')
ecals_path = os.path.join(msd_path, 'ecals_annotation')
os.makedirs(ecals_path, exist_ok=True)
MSD_id_to_7D_id = pickle.load(open(os.path.join(lastfm_path, 'MSD_id_to_7D_id.pkl'), 'rb'))
id_to_path = pickle.load(open(os.path.join(lastfm_path, '7D_id_to_path.pkl'), 'rb'))
lastfm_tags = [i.lower() for i in open(os.path.join(lastfm_path, '50tagList.txt'), 'r').read().splitlines()]
cals_split = pd.read_csv(os.path.join(cals_path, 'msd_splits.tsv'), sep='\t').rename(columns={'clip_ids': 'track_id'}).set_index('track_id')
df_msdmeta = getMsdInfo(meta_path)
df_cals = cals_processor(cals_path)
(df_lastfm, _) = lastfm_processor(lastfm_path)
df_msd500 = msd500_processor(msd500_path)
df_allmusic = allmusic_processor(allmusic_path)
cals_lastfm = pd.merge(df_cals, df_lastfm, how='outer', on='track_id')
cals_lastfm_msd500 = pd.merge(cals_lastfm, df_msd500, how='outer', on='track_id')
cals_lastfm_msd500_allmusic = pd.merge(cals_lastfm_msd500, df_allmusic, how='outer', on='track_id')
df_tags = pd.merge(cals_split, cals_lastfm_msd500_allmusic, how='outer', on='track_id')
df_tags['length'] = (df_tags['length'] / 22050)
for column in ['cals', 'lastfm', 'msd500', 'allmusic', 'is_cals', 'is_lastfm', 'is_msd500', 'is_allmusic']:
if ('is_' in column):
df_tags[column] = df_tags[column].fillna(False)
else:
df_tags[column] = df_tags[column].apply(NaN_to_emptylist)
df_merge = pd.merge(df_tags, df_msdmeta, how='left', on='track_id')
df_merge['splits'] = df_merge['splits'].fillna('NONE')
df_final = df_merge[(df_merge['splits'] != 'NONE')]
target_col = ['splits', 'length', 'cals', 'lastfm', 'msd500', 'allmusic', 'is_cals', 'is_lastfm', 'is_msd500', 'is_allmusic', 'release', 'artist_name', 'year', 'title']
df_target = df_final[target_col]
(df_target, mp3_path) = _check_mp3_file(df_target, id_to_path, MSD_id_to_7D_id)
with poolcontext(processes=multiprocessing.cpu_count()) as pool:
pool.starmap(msd_resampler, zip(list(mp3_path.keys()), list(mp3_path.values())))
print('finish extract')
error_ids = [msdid.replace('.npy', '') for msdid in os.listdir(os.path.join(msd_path, 'error'))]
df_target = df_target.drop(error_ids)
tr_track = list(df_target[(df_target['splits'] == 'TRAIN')].index)
va_track = list(df_target[(df_target['splits'] == 'VALID')].index)
te_track = list(df_target[(df_target['splits'] == 'TEST')].index)
(filtered_tag, df_binary) = filtering(df_target, tr_track, va_track, te_track)
df_target['tag'] = filtered_tag
binary_error = [i for i in error_ids if (i in df_binary.index)]
df_binary = df_binary.drop(binary_error)
df_binary.to_csv(os.path.join(ecals_path, 'ecals_binary.csv'))
df_target['track_id'] = df_target.index
track_split = _track_split(df_target, ecals_path, types='ecals')
ecals_track = (((track_split['train_track'] + track_split['valid_track']) + track_split['test_track']) + track_split['extra_track'])
annotation_dict = df_target[['tag', 'release', 'artist_name', 'year', 'title', 'track_id']].to_dict('index')
target_anotation_dict = {i: annotation_dict[i] for i in ecals_track}
print(len(target_anotation_dict))
with open(os.path.join(ecals_path, f'annotation.json'), mode='w') as io:
json.dump(target_anotation_dict, io) |
def _make_dense_dataset(float_dtype):
if (float_dtype == np.float32):
return ArrayDataset32(X32, y32, sample_weight32, seed=42)
return ArrayDataset64(X64, y64, sample_weight64, seed=42) |
def train_one_epoch(train_loader, model, device, criterion, optimizer, epoch, writer, cfg, update_train_step):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
for (i, (input, target)) in enumerate(train_loader):
update_train_step += 1
target = target.to(device)
input = input.to(device)
cls_logits = model(input, return_cam=False)
loss = criterion(cls_logits, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
(prec1, prec5) = accuracy(cls_logits.data.contiguous(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
writer.add_scalar('loss_iter/train', loss.item(), update_train_step)
writer.add_scalar('acc_iter/train_top1', prec1.item(), update_train_step)
writer.add_scalar('acc_iter/train_top5', prec5.item(), update_train_step)
if (((i % cfg.BASIC.DISP_FREQ) == 0) or (i == (len(train_loader) - 1))):
print('Train Epoch: [{0}][{1}/{2}],lr: {lr:.5f}\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, (i + 1), len(train_loader), loss=losses, top1=top1, top5=top5, lr=optimizer.param_groups[(- 1)]['lr']))
return (update_train_step, losses.avg, top1.avg, top5.avg) |
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
if args.cuda:
torch.cuda.manual_seed(args.seed) |
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict) |
def main(cfg):
pl.seed_everything(cfg.General.seed)
cfg.load_loggers = load_loggers(cfg)
cfg.callbacks = load_callbacks(cfg)
DataInterface_dict = {'train_batch_size': cfg.Data.train_dataloader.batch_size, 'train_num_workers': cfg.Data.train_dataloader.num_workers, 'test_batch_size': cfg.Data.test_dataloader.batch_size, 'test_num_workers': cfg.Data.test_dataloader.num_workers, 'dataset_name': cfg.Data.dataset_name, 'dataset_cfg': cfg.Data}
dm = DataInterface(**DataInterface_dict)
ModelInterface_dict = {'model': cfg.Model, 'loss': cfg.Loss, 'optimizer': cfg.Optimizer, 'data': cfg.Data, 'log': cfg.log_path}
model = ModelInterface(**ModelInterface_dict)
trainer = Trainer(num_sanity_val_steps=0, logger=cfg.load_loggers, callbacks=cfg.callbacks, max_epochs=cfg.General.epochs, gpus=cfg.General.gpus, amp_level=cfg.General.amp_level, precision=cfg.General.precision, accumulate_grad_batches=cfg.General.grad_acc, deterministic=True, check_val_every_n_epoch=1)
if (cfg.General.server == 'train'):
trainer.fit(model=model, datamodule=dm)
else:
model_paths = list(cfg.log_path.glob('*.ckpt'))
model_paths = [str(model_path) for model_path in model_paths if ('epoch' in str(model_path))]
for path in model_paths:
print(path)
new_model = model.load_from_checkpoint(checkpoint_path=path, cfg=cfg)
trainer.test(model=new_model, datamodule=dm) |
class FFHQValidation(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = 'data/ffhq'
with open('data/ffhqvalidation.txt', 'r') as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
self.keys = keys |
def test_base_functions_values(gels):
from sfepy.base.base import ordered_iteritems
from sfepy.discrete import PolySpace
ok = True
for (key, val) in ordered_iteritems(test_bases):
gel = gels[key[:3]]
diff = (key[(- 4):] == 'grad')
order = int(key[5])
force_bubble = (key[6:7] == 'B')
ps = PolySpace.any_from_args('aux', gel, order, base='lagrange', force_bubble=force_bubble)
dim = ps.geometry.dim
coors = nm.r_[(ps.geometry.coors, [([0.2] * dim)])]
bf = ps.eval_base(coors, diff=diff)
_ok = nm.allclose(val, bf, rtol=0.0, atol=1e-14)
if (not diff):
_ok = (_ok and nm.allclose(bf.sum(axis=2), 1.0, rtol=0.0, atol=1e-14))
tst.report(('%s: %s' % (key, _ok)))
ok = (ok and _ok)
assert ok |
class CylinderFlow(ShapeFlow):
radius: float = 100
height: float = 100
num_points: int = 32 |
_model
def convformer_s18(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s18']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
class TFMPNetForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_stop_event_stream_immediately(event_stream):
event_stream.stop()
assert isinstance(next(event_stream), events.Finished)
assert (next(event_stream, None) is None) |
_args('v', 'v', 'v', 'i', 'i', 'i', 'v', 'i')
def embedding_bag(g, embedding_matrix, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset):
if (scale_grad_by_freq and sym_help._training_mode):
return sym_help._onnx_unsupported('embedding_bag with scale_grad_by_freq for training mode')
from torch.onnx.symbolic_opset9 import select
import warnings
warnings.warn("Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. Please use opset 11 or higher to export model for dynamic input shape.'")
if (offsets.type().sizes() is not None):
if include_last_offset:
offset_len = (offsets.type().sizes()[0] - 1)
offsets_extended = offsets
else:
offset_len = offsets.type().sizes()[0]
offsets_extended = [offsets, g.op('Constant', value_t=torch.tensor([maxsize]))]
offsets_extended = g.op('Concat', *offsets_extended, axis_i=0)
list_ = []
for i in range(offset_len):
start_ = g.op('Unsqueeze', select(g, offsets_extended, torch.tensor(0), torch.tensor(i)), axes_i=[0])
end_ = g.op('Unsqueeze', select(g, offsets_extended, torch.tensor(0), torch.tensor((i + 1))), axes_i=[0])
axes_ = g.op('Constant', value_t=torch.tensor([0]))
indices_row = g.op('Slice', indices, start_, end_, axes_)
embeddings = g.op('Gather', embedding_matrix, indices_row)
if (not sym_help._is_none(per_sample_weights)):
per_sample_weights_row = g.op('Slice', per_sample_weights, start_, end_, axes_)
per_sample_weights_row = g.op('Unsqueeze', per_sample_weights_row, axes_i=[1])
embeddings = g.op('Mul', embeddings, per_sample_weights_row)
if (mode == 0):
embeddings = g.op('ReduceSum', embeddings, axes_i=[0], keepdims_i=0)
elif (mode == 1):
embeddings = g.op('ReduceMean', embeddings, axes_i=[0], keepdims_i=0)
else:
embeddings = g.op('ReduceMax', embeddings, axes_i=[0], keepdims_i=0)
embeddings = g.op('Unsqueeze', embeddings, axes_i=[0])
list_.append(embeddings)
output = g.op('Concat', *list_, axis_i=0)
return (output, None, None, None)
else:
return sym_help._onnx_unsupported('embedding_bag with unknown shape of offsets for opset 10 is not supported. please use opset 11 or higher.') |
class FlattenAgents(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
sa_action_space = [len(Action), *(env.msg_bits * (2,))]
if ((len(sa_action_space) == 1) and (self.n_agents == 1)):
sa_action_space = spaces.Discrete(sa_action_space[0])
else:
sa_action_space = spaces.MultiDiscrete((self.n_agents * sa_action_space))
self.action_space = sa_action_space
self.observation_space = spaces.Tuple(tuple((space for space in self.observation_space)))
def reset(self, **kwargs):
observation = super().reset(**kwargs)
return np.concatenate([spaces.flatten(s, o) for (s, o) in zip(self.observation_space, observation)])
def step(self, action):
try:
action = np.split(action, self.n_agents)
except (AttributeError, IndexError):
action = [action]
(observation, reward, done, info) = super().step(action)
observation = np.concatenate([spaces.flatten(s, o) for (s, o) in zip(self.observation_space, observation)])
reward = np.sum(reward)
done = all(done)
return (observation, reward, done, info) |
class StandardPermutations_n_abstract(Permutations):
def __init__(self, n, category=None):
self.n = ZZ(n)
if (category is None):
category = FiniteEnumeratedSets()
Permutations.__init__(self, category=category)
_keyword(deprecation=35233, check_input='check')
def _element_constructor_(self, x, check=True):
if (len(x) < self.n):
x = (list(x) + list(range((len(x) + 1), (self.n + 1))))
return self.element_class(self, x, check=check)
def __contains__(self, x):
return (Permutations.__contains__(self, x) and (len(x) == self.n)) |
def evaluate(dataset, predictions):
count = 0
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if (qa['id'] not in predictions):
message = (('Unanswered question ' + qa['id']) + ' will receive score 0.')
count += 1
continue
ground_truths = list(map((lambda x: x['text']), qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = ((100.0 * exact_match) / total)
f1 = ((100.0 * f1) / total)
if count:
print(('There are %d unanswered question(s)' % count))
return {'exact_match': exact_match, 'f1': f1} |
class NodeSpec():
def __init__(self, op, target):
self.op = op
self.target = target
def call_function(cls, target):
return NodeSpec('call_function', target)
def call_method(cls, target):
return NodeSpec('call_method', target)
def call_module(cls, target):
return NodeSpec('call_module', target)
def __hash__(self):
return hash((self.op, self.target))
def __eq__(self, other):
if (not isinstance(other, NodeSpec)):
return NotImplemented
return ((self.op == other.op) and (self.target == other.target))
def __repr__(self):
return ((repr(self.op) + ' ') + repr(self.target)) |
def read_data(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data |
def binary_crossentropy(output, target, from_logits=False):
if (not from_logits):
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, (1 - epsilon))
output = tf.log((output / (1 - output)))
try:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
except TypeError:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target) |
def get_salient_frequent_verb_lemmas(verb2local_freq, verb2global_freq, top_ratio=0.8, min_freq=5):
verb2salience = get_salience(verb2local_freq, verb2global_freq)
stopword_verbs = (stop_words | {'could', 'can', 'may', 'might', 'will', 'would', 'should', 'shall', 'be', "'d'", ',', '', 'take', 'use', 'make', 'have', 'go', 'come', 'get', 'do', 'give', 'put', 'set', 'argue', 'say', 'claim', 'suggest', 'tell'})
V = int((len(verb2salience) * top_ratio))
salient_verbs = {}
for ele in sorted(verb2salience.items(), key=(lambda x: (- x[1]))):
if (ele[0] not in stopword_verbs):
salient_verbs[ele[0]] = ele[1]
if (len(salient_verbs) == V):
break
print(f'Select {len(salient_verbs)} salient verbs')
frequent_salient_verbs = {}
for (verb, saliency) in salient_verbs.items():
if (verb2local_freq[verb] >= min_freq):
frequent_salient_verbs[verb] = saliency
print(f'Select {len(frequent_salient_verbs)} frequent and salient verbs')
return frequent_salient_verbs |
def validate(model=None, data_loader=None, args=None):
(preds, gts, cams, cams_aux) = ([], [], [], [])
model.eval()
avg_meter = AverageMeter()
with torch.no_grad():
for (_, data) in tqdm(enumerate(data_loader), total=len(data_loader), ncols=100, ascii=' >='):
(name, inputs, labels, cls_label) = data
inputs = inputs.cuda()
labels = labels.cuda()
cls_label = cls_label.cuda()
inputs = F.interpolate(inputs, size=[args.crop_size, args.crop_size], mode='bilinear', align_corners=False)
(cls, segs, _, _) = model(inputs)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_score': _f1})
(_cams, _cams_aux) = multi_scale_cam2(model, inputs, args.cam_scales)
resized_cam = F.interpolate(_cams, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label = cam_to_label(resized_cam, cls_label, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
resized_cam_aux = F.interpolate(_cams_aux, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label_aux = cam_to_label(resized_cam_aux, cls_label, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_score': _f1})
resized_segs = F.interpolate(segs, size=labels.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_segs, dim=1).cpu().numpy().astype(np.int16))
cams += list(cam_label.cpu().numpy().astype(np.int16))
gts += list(labels.cpu().numpy().astype(np.int16))
cams_aux += list(cam_label_aux.cpu().numpy().astype(np.int16))
cls_score = avg_meter.pop('cls_score')
seg_score = evaluate.scores(gts, preds)
cam_score = evaluate.scores(gts, cams)
cam_aux_score = evaluate.scores(gts, cams_aux)
model.train()
tab_results = format_tabs([cam_score, cam_aux_score, seg_score], name_list=['CAM', 'aux_CAM', 'Seg_Pred'], cat_list=voc.class_list)
return (cls_score, tab_results) |
class Dataset(data.Dataset):
def __init__(self, dataPath, loadSize, fineSize, test=False, video=False):
super(Dataset, self).__init__()
self.dataPath = dataPath
self.image_list = [x for x in os.listdir(dataPath) if is_image_file(x)]
self.image_list = sorted(self.image_list)
if video:
self.image_list = sorted(self.image_list)
if (not test):
self.transform = transforms.Compose([transforms.Resize(fineSize), transforms.RandomCrop(fineSize), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
else:
self.transform = transforms.Compose([transforms.Resize(fineSize), transforms.ToTensor()])
self.test = test
def __getitem__(self, index):
dataPath = os.path.join(self.dataPath, self.image_list[index])
Img = default_loader(dataPath)
ImgA = self.transform(Img)
imgName = self.image_list[index]
imgName = imgName.split('.')[0]
return (ImgA, imgName)
def __len__(self):
return len(self.image_list) |
def commits_since_previous(*seed_commits: Commit) -> Tuple[(Dict[(str, Commit)], Optional[Commit])]:
stack = list(seed_commits)
commits = {}
previous = None
while stack:
commit = stack.pop()
if (commit.binsha in commits):
continue
matches = VERSION_REG.findall(commit.message)
if matches:
previous = commit
continue
commits[commit.binsha] = commit
stack.extend(commit.parents)
return (commits, previous) |
def get_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument('--env-name', default='simple_spread', help='one from {simple_spread, simple_formation, simple_line})')
parser.add_argument('--num-agents', type=int, default=3)
parser.add_argument('--masking', action='store_true', help='restrict communication to within some threshold')
parser.add_argument('--mask-dist', type=float, default=1.0, help='distance to restrict comms')
parser.add_argument('--dropout-masking', action='store_true', help='dropout masking enabled')
parser.add_argument('--entity-mp', action='store_true', help='enable entity message passing')
parser.add_argument('--identity-size', default=0, type=int, help='size of identity vector')
parser.add_argument('--seed', type=int, default=None, help='random seed (default: None)')
parser.add_argument('--num-processes', type=int, default=32, help='how many training CPU processes to use (default: 32)')
parser.add_argument('--num-steps', type=int, default=128, help='number of forward steps in PPO (default: 128)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--num-frames', type=int, default=int(.0), help='number of frames to train (default: 50e6)')
parser.add_argument('--arena-size', type=int, default=1, help='size of arena')
parser.add_argument('--num-eval-episodes', type=int, default=30, help='number of episodes to evaluate with')
parser.add_argument('--dist-threshold', type=float, default=0.1, help='distance within landmark is considered covered (for simple_spread)')
parser.add_argument('--render', action='store_true')
parser.add_argument('--record-video', action='store_true', default=False, help='record evaluation video')
parser.add_argument('--algo', default='ppo', help='algorithm to use: a2c | ppo | acktr')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate (default: 1e-4)')
parser.add_argument('--gamma', type=float, default=0.99, help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.95, help='gae parameter (default: 0.95)')
parser.add_argument('--entropy-coef', type=float, default=0.01, help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5, help='value loss coefficient (default: 0.05)')
parser.add_argument('--max-grad-norm', type=float, default=0.5, help='max norm of gradients (default: 0.5)')
parser.add_argument('--ppo-epoch', type=int, default=4, help='number of ppo epochs (default: 4)')
parser.add_argument('--num-mini-batch', type=int, default=32, help='number of batches for ppo (default: 32)')
parser.add_argument('--clip-param', type=float, default=0.2, help='ppo clip parameter (default: 0.2)')
parser.add_argument('--save-dir', default='tmp', help='directory to save models (default: tmp)')
parser.add_argument('--log-dir', default='logs', help='directory to save logs')
parser.add_argument('--save-interval', type=int, default=200, help='save interval, one save per n updates (default: 200)')
parser.add_argument('--log-interval', type=int, default=10, help='log interval, one log per n updates (default: 10)')
parser.add_argument('--test', action='store_true')
parser.add_argument('--load-dir', default=None, help='filename to load all policies from')
parser.add_argument('--eval-interval', default=50, type=int)
parser.add_argument('--continue-training', action='store_true')
parser.add_argument('--no-clipped-value-loss', action='store_true')
args = parser.parse_args()
args.clipped_value_loss = (not args.no_clipped_value_loss)
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
args.device = torch.device(('cuda' if args.cuda else 'cpu'))
args.save_dir = ('../marlsave/save_new/' + args.save_dir)
args.log_dir = ((args.save_dir + '/') + args.log_dir)
if args.continue_training:
assert ((args.load_dir is not None) and os.path.exists(args.load_dir)), 'Please specify valid model file to load if you want to continue training'
if (args.identity_size > 0):
assert (args.identity_size >= args.num_agents), 'identity size should either be 0 or >= number of agents!'
if (not args.masking):
args.mask_dist = None
elif (args.masking and args.dropout_masking):
args.mask_dist = (- 10)
if (not args.test):
if os.path.exists(args.save_dir):
print('\nSave directory exists already! Enter')
ch = input('c (rename the existing directory with _old and continue)\ns (stop)!\ndel (delete existing dir): ')
if (ch == 's'):
sys.exit(0)
elif (ch == 'c'):
os.rename(args.save_dir, (args.save_dir + '_old'))
elif (ch == 'del'):
shutil.rmtree(args.save_dir)
else:
raise NotImplementedError('Unknown input')
os.makedirs(args.save_dir)
return args |
_numpy_output()
def test_reduce_global_None(A: dace.float64[(10, 5, 3)]):
return np.mean(A, axis=my_none) |
def p_positional_and_keyword_args(s, end_sy_set, templates=None):
positional_args = []
keyword_args = []
pos_idx = 0
while (s.sy not in end_sy_set):
if ((s.sy == '*') or (s.sy == '**')):
s.error('Argument expansion not allowed here.', fatal=False)
parsed_type = False
if ((s.sy == 'IDENT') and (s.peek()[0] == '=')):
ident = s.systring
s.next()
s.next()
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates=templates)
declarator = p_c_declarator(s, empty=1)
arg = Nodes.CComplexBaseTypeNode(base_type.pos, base_type=base_type, declarator=declarator)
parsed_type = True
keyword_node = ExprNodes.IdentifierStringNode(arg.pos, value=ident)
keyword_args.append((keyword_node, arg))
was_keyword = True
else:
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates=templates)
declarator = p_c_declarator(s, empty=1)
arg = Nodes.CComplexBaseTypeNode(base_type.pos, base_type=base_type, declarator=declarator)
parsed_type = True
positional_args.append(arg)
pos_idx += 1
if (len(keyword_args) > 0):
s.error('Non-keyword arg following keyword arg', pos=arg.pos)
if (s.sy != ','):
if (s.sy not in end_sy_set):
if parsed_type:
s.error(('Unmatched %s' % ' or '.join(end_sy_set)))
break
s.next()
return (positional_args, keyword_args) |
def _try_get_shapes(nets):
try:
(shapes, _) = workspace.InferShapesAndTypes(nets)
return shapes
except Exception as e:
logging.warning('Failed to compute shapes: %s', e)
return {} |
def result_level(finding):
level = finding.get('level', '').strip().lower()
return (level if (level in ('none', 'note', 'warning', 'error')) else None) |
def rebuild_tensor(cls, storage, metadata):
(storage_offset, size, stride) = metadata
return torch._utils._rebuild_tensor(storage, storage_offset, size, stride) |
def load_constituency_tree(parents, words):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
word_idx = 0
for i in xrange(size):
if (not trees[i]):
idx = i
prev = None
prev_idx = None
word = words[word_idx]
word_idx += 1
while True:
tree = ConstTree()
parent = (parents[idx] - 1)
(tree.word, tree.parent, tree.idx) = (word, parent, idx)
word = None
if (prev is not None):
if (tree.left is None):
tree.left = prev
else:
tree.right = prev
trees[idx] = tree
if ((parent >= 0) and (trees[parent] is not None)):
if (trees[parent].left is None):
trees[parent].left = tree
else:
trees[parent].right = tree
break
elif (parent == (- 1)):
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root |
class LocalWindowService(WindowService, ABC):
def __init__(self, service: TokenizerService):
self.service: TokenizerService = service
def encode(self, text: str, truncation: bool=False, max_length: Optional[int]=None) -> EncodeResult:
if (max_length is None):
max_length = self.max_request_length
response: TokenizationRequestResult = self.service.tokenize(TokenizationRequest(text, tokenizer=self.tokenizer_name, encode=True, truncation=truncation, max_length=max_length))
return EncodeResult(text=text, tokens=response.tokens)
def decode(self, tokens: List[TokenizationToken], normalized_text: Optional[str]=None) -> str:
response: DecodeRequestResult = self.service.decode(DecodeRequest([token.value for token in tokens], tokenizer=self.tokenizer_name, clean_up_tokenization_spaces=False))
return response.text
def tokenize(self, text: str) -> List[str]:
response: TokenizationRequestResult = self.service.tokenize(TokenizationRequest(text, tokenizer=self.tokenizer_name))
tokens: List[str] = cast(List[str], response.raw_tokens)
tokens = cleanup_tokens(tokens, self.tokenizer_name)
return tokens
def get_num_tokens(self, text: str) -> int:
return len(self.encode(text).tokens)
def fits_within_context_window(self, text: str, expected_completion_token_length: int=0) -> bool:
return ((self.get_num_tokens(text) + expected_completion_token_length) <= self.max_request_length)
def truncate_from_right(self, text: str, expected_completion_token_length: int=0) -> str:
max_length: int = (self.max_request_length - expected_completion_token_length)
result: str = self.decode(self.encode(text, truncation=True, max_length=max_length).tokens)
while (not self.fits_within_context_window(result, expected_completion_token_length)):
result = result[:(- 1)]
return result |
class DownloadImage():
def __init__(self, out_path, img_url_file, proxies, header, retries, timeout):
self.header = header
self.proxies = proxies
self.out_path = out_path
self.retries = retries
self.timeout = timeout
self.img_url_file = img_url_file
self.file_path = os.path.join(out_path, 'image')
if (not os.path.isdir(self.file_path)):
os.makedirs(self.file_path)
self.url_json = os.path.join(out_path, 'urls.json')
self.failedpath = os.path.join(out_path, 'failedurl.json')
self.successurl = []
self.failedUrl = []
logfilepath = os.path.join(out_path, 'errors.log')
formater = logging.Formatter('Time:%(asctime)s Level: %(levelname)s URL: %(URL)s STATSUSCODE: %(STATSUSCODE)s MESSAGE: %(message)s')
self.logger = logging.getLogger()
fileHandler = logging.FileHandler(logfilepath)
fileHandler.setFormatter(formater)
self.logger.addHandler(fileHandler)
self.logger.setLevel(logging.ERROR)
self.urllist = self.read_url()
def read_url(self):
raw_urls = set()
with open(self.img_url_file, 'r', encoding='utf-8') as f:
res = [json.loads(i) for i in f.readlines()]
for weibo in res:
if (not weibo['weibo_img']):
continue
for url in weibo['weibo_img'].split(';'):
raw_urls.add(url)
if ('' in raw_urls):
raw_urls.remove('')
if os.path.exists(self.url_json):
with open(self.url_json, 'r', encoding='utf-8') as f:
existing_urls = set([i.strip() for i in f.readlines() if (len(i.strip()) != 0)])
print('{} imgs downloaded'.format(len(existing_urls)))
if (len(raw_urls) > len(existing_urls)):
return list((raw_urls - existing_urls))
else:
return None
else:
return list(raw_urls)
def get_proxies(self):
if (self.proxies is not None):
return random.choice(self.proxies)
else:
return None
def set_request(self):
s = requests.Session()
s.proxies = self.get_proxies()
s.mount(' HTTPAdapter(max_retries=self.retries))
s.keep_alive = False
return s
def getDownload(self, url_list):
if url_list:
for (i, url) in enumerate(url_list):
try:
down_res = self.set_request().get(url, timeout=self.timeout, verify=False, allow_redirects=False)
if (down_res.status_code == 200):
filenamepath = os.path.join(self.file_path, url2imgid(url))
with open(filenamepath, 'wb') as fb:
fb.write(down_res.content)
with open(self.url_json, 'a+') as fp:
fp.write((url + '\n'))
self.successurl.append(url)
else:
self.failedUrl.append(url)
self.logger.error(down_res.reason, extra={'URL': url, 'STATSUSCODE': down_res.status_code})
except requests.exceptions.RequestException as e:
self.failedUrl.append(url)
self.logger.error(e, extra={'URL': url, 'STATSUSCODE': ''})
else:
tqdm.write('')
def dump_failed_url(self):
for url in self.failedUrl:
with open(self.failedpath, 'w', encoding='utf-8') as fp:
fp.write((url + '\n'))
print('urlfailedurl.json,{}'.format(len(self.failedUrl)))
def run(self):
with trange(len(self.urllist), dynamic_ncols=True) as t:
for i in t:
time.sleep(0.3)
t.set_description('FAILED NUM {},SUCCESS NUM {}'.format(len(self.failedUrl), len(self.successurl)))
thread = threading.Thread(target=self.getDownload, args=([self.urllist[i]],))
thread.start()
self.dump_failed_url() |
def register_Ns3Queue__Ns3QueueDiscItem_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Dequeue', 'ns3::Ptr< ns3::QueueDiscItem >', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('Remove', 'ns3::Ptr< ns3::QueueDiscItem >', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('Peek', 'ns3::Ptr< ns3::QueueDiscItem const >', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Flush', 'void', [])
cls.add_constructor([param('ns3::Queue< ns3::QueueDiscItem > const &', 'arg0')])
cls.add_method('Head', 'ns3::Queue< ns3::QueueDiscItem > ConstIterator', [], is_const=True, visibility='protected')
cls.add_method('Tail', 'ns3::Queue< ns3::QueueDiscItem > ConstIterator', [], is_const=True, visibility='protected')
cls.add_method('DoEnqueue', 'bool', [param('std::list< ns3::Ptr< ns3::QueueDiscItem > > const_iterator', 'pos'), param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], visibility='protected')
cls.add_method('DoDequeue', 'ns3::Ptr< ns3::QueueDiscItem >', [param('std::list< ns3::Ptr< ns3::QueueDiscItem > > const_iterator', 'pos')], visibility='protected')
cls.add_method('DoRemove', 'ns3::Ptr< ns3::QueueDiscItem >', [param('std::list< ns3::Ptr< ns3::QueueDiscItem > > const_iterator', 'pos')], visibility='protected')
cls.add_method('DoPeek', 'ns3::Ptr< ns3::QueueDiscItem const >', [param('std::list< ns3::Ptr< ns3::QueueDiscItem > > const_iterator', 'pos')], is_const=True, visibility='protected')
cls.add_method('DropBeforeEnqueue', 'void', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], visibility='protected')
cls.add_method('DropAfterDequeue', 'void', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], visibility='protected')
return |
def grey_closing(input, size=None, footprint=None, structure=None, output=None, mode='reflect', cval=0.0, origin=0):
if ((size is not None) and (footprint is not None)):
warnings.warn('ignoring size because footprint is set', UserWarning, stacklevel=2)
tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin)
return grey_erosion(tmp, size, footprint, structure, output, mode, cval, origin) |
def _init_impl(path, trigger_lazy=True):
with dll_lock:
_IMPORTED_DYNDEPS.add(path)
with extension_loader.DlopenGuard():
ctypes.CDLL(path)
core.RefreshRegisteredOperators(trigger_lazy) |
def house(metadata: bool=False) -> Union[(sparse.csr_matrix, Bunch)]:
row = np.array([0, 0, 1, 1, 2, 3])
col = np.array([1, 4, 2, 4, 3, 4])
adjacency = sparse.csr_matrix((np.ones(len(row), dtype=int), (row, col)), shape=(5, 5))
adjacency = (adjacency + adjacency.T).astype(bool)
if metadata:
x = np.array([0, (- 1), (- 1), 1, 1])
y = np.array([2, 1, (- 1), (- 1), 1])
graph = Bunch()
graph.adjacency = adjacency
graph.position = np.vstack((x, y)).T
graph.name = 'house'
return graph
else:
return adjacency |
def Attention_transfer(student, teacher, beta=1000.0):
def Attention(source, target):
with tf.variable_scope('Attention'):
(B, _, _, Ds) = source.get_shape().as_list()
Dt = target.get_shape().as_list()[(- 1)]
if (Ds != Dt):
with tf.variable_scope('Map'):
source = tf.contrib.layers.fully_connected(source, Ds, biases_initializer=None, trainable=True, scope='fc')
Qt = tf.reduce_mean(tf.square(source), (- 1))
Qt = tf.nn.l2_normalize(Qt, [1, 2])
Qs = tf.reduce_mean(tf.square(target), (- 1))
Qs = tf.nn.l2_normalize(Qs, [1, 2])
return ((tf.reduce_mean(tf.square((Qt - Qs))) * beta) / 2)
return tf.add_n([Attention(std, tch) for (i, std, tch) in zip(range(len(student)), student, teacher)]) |
def count_arithmetic_ops_code(code_or_block: Union[(List[ast.AST], ast.AST, str)]) -> int:
ctr = ArithmeticCounter()
if isinstance(code_or_block, (tuple, list)):
for stmt in code_or_block:
ctr.visit(stmt)
elif isinstance(code_or_block, str):
ctr.visit(ast.parse(code_or_block))
else:
ctr.visit(code_or_block)
return ctr.count |
class MLPPreprocessor(MLPFunction):
def __init__(self, env_spec, layer_sizes=(128, 16), output_nonlinearity=None, name='observations_preprocessor'):
Parameterized.__init__(self)
Serializable.quick_init(self, locals())
self._name = name
self._Do = env_spec.observation_space.flat_dim
obs_ph = tf.placeholder(tf.float32, shape=(None, self._Do), name='observations')
self._input_pls = (obs_ph,)
self._layer_sizes = layer_sizes
self._output_nonlinearity = output_nonlinearity
self._output_t = self.get_output_for(obs_ph, reuse=tf.AUTO_REUSE) |
class TweedieRegressor(_GeneralizedLinearRegressor):
_parameter_constraints: dict = {**_GeneralizedLinearRegressor._parameter_constraints, 'power': [Interval(Real, None, None, closed='neither')], 'link': [StrOptions({'auto', 'identity', 'log'})]}
def __init__(self, *, power=0.0, alpha=1.0, fit_intercept=True, link='auto', solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept, solver=solver, max_iter=max_iter, tol=tol, warm_start=warm_start, verbose=verbose)
self.link = link
self.power = power
def _get_loss(self):
if (self.link == 'auto'):
if (self.power <= 0):
return HalfTweedieLossIdentity(power=self.power)
else:
return HalfTweedieLoss(power=self.power)
if (self.link == 'log'):
return HalfTweedieLoss(power=self.power)
if (self.link == 'identity'):
return HalfTweedieLossIdentity(power=self.power) |
class TestStacking2(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='stacking2')
self.env = CausalWorld(task=self.task, enable_visualization=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print(i)
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_1(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((observations_1[i] - observations_2[i]))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_2(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
intervention = {'joint_positions': [0, 0, 0, 0, 0, 0, 0, 0, 0]}
self.env.set_starting_state(interventions_dict=intervention)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_block_1': {'cylindrical_position': [0, 0, 0.2]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2) |
def validate_axes_specs(positions, specs, is_c_contig, is_f_contig):
packing_specs = ('contig', 'strided', 'follow')
access_specs = ('direct', 'ptr', 'full')
has_contig = has_follow = has_strided = has_generic_contig = False
last_indirect_dimension = (- 1)
for (idx, (access, packing)) in enumerate(specs):
if (access == 'ptr'):
last_indirect_dimension = idx
for (idx, (pos, (access, packing))) in enumerate(zip(positions, specs)):
if (not ((access in access_specs) and (packing in packing_specs))):
raise CompileError(pos, 'Invalid axes specification.')
if (packing == 'strided'):
has_strided = True
elif (packing == 'contig'):
if has_contig:
raise CompileError(pos, 'Only one direct contiguous axis may be specified.')
valid_contig_dims = ((last_indirect_dimension + 1), (len(specs) - 1))
if ((idx not in valid_contig_dims) and (access != 'ptr')):
if ((last_indirect_dimension + 1) != (len(specs) - 1)):
dims = ('dimensions %d and %d' % valid_contig_dims)
else:
dims = ('dimension %d' % valid_contig_dims[0])
raise CompileError(pos, ('Only %s may be contiguous and direct' % dims))
has_contig = (access != 'ptr')
elif (packing == 'follow'):
if has_strided:
raise CompileError(pos, 'A memoryview cannot have both follow and strided axis specifiers.')
if (not (is_c_contig or is_f_contig)):
raise CompileError(pos, 'Invalid use of the follow specifier.')
if (access in ('ptr', 'full')):
has_strided = False |
def _should_use_custom_op():
if (not enabled):
return False
if (LooseVersion(torch.__version__) >= LooseVersion('1.7.0')):
return True
warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
return False |
(0.2)
def collect_info(entities, *argv, **kargs):
en_name = kargs['cur_entity_name']
return resp(True, msg=('Info collected: ' + str(entities[en_name]))) |
def register_Ns3TcpOptionSack_methods(root_module, cls):
cls.add_constructor([param('ns3::TcpOptionSack const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddSackBlock', 'void', [param('std::pair< ns3::SequenceNumber< unsigned int, int >, ns3::SequenceNumber< unsigned int, int > >', 's')])
cls.add_method('ClearSackList', 'void', [])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetKind', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetNumSackBlocks', 'uint32_t', [], is_const=True)
cls.add_method('GetSackList', 'ns3::TcpOptionSack::SackList', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
return |
def prepare_transfoxl_input(args, _, tokenizer, prompt_text):
prompt_text = ((args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text)
return prompt_text |
def write_release_task(filename='NOTES.txt'):
idirs = Path('release')
source = Path(get_latest_release_doc('doc/source/release'))
target = Path(filename)
if target.exists():
target.remove()
tmp_target = Path((filename + '.txt'))
os.system(f'cp {source} {tmp_target}')
with open(str(tmp_target), 'a') as ftarget:
ftarget.writelines('\nChecksums\n\n\nMD5\n~~~\n\n')
ftarget.writelines([('%s\n' % c) for c in compute_md5(idirs)])
ftarget.writelines('\nSHA256\n~~~~~~\n\n')
ftarget.writelines([('%s\n' % c) for c in compute_sha256(idirs)])
print('Release README generated successfully') |
def retrieve_tigge_data():
date1 = [(str(i) + '-01-01') for i in xrange(2016, 2017)]
date2 = [(str(i) + '-12-31') for i in xrange(2016, 2017)]
dates = date1
for j in range(0, 10):
dates[j] = ((date1[j] + '/to/') + date2[j])
data_dir = '/media/sebastian/Elements/Postproc_NN/data/forecasts/auxiliary/'
for date in dates:
target = (((data_dir + 'ecmwf_aux_pl850_') + date[:4]) + '.grib')
tigge_request(date, target) |
def register_Ns3Icmpv4DestinationUnreachable_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv4DestinationUnreachable const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetData', 'void', [param('uint8_t *', 'payload')], is_const=True)
cls.add_method('GetHeader', 'ns3::Ipv4Header', [], is_const=True)
cls.add_method('GetNextHopMtu', 'uint16_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetData', 'void', [param('ns3::Ptr< ns3::Packet const >', 'data')])
cls.add_method('SetHeader', 'void', [param('ns3::Ipv4Header', 'header')])
cls.add_method('SetNextHopMtu', 'void', [param('uint16_t', 'mtu')])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], visibility='private', is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, visibility='private', is_virtual=True)
return |
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x) |
def test_unconstrained0():
def fg(x):
f = (x ** 2)
g = (2 * x)
return (f, g)
res = minimize(fg, 100.0, np=np)
print(res)
assert_allclose(res.x, [0], atol=0.0001) |
def get_p_and_g_mean_norm2(it):
size = 1e-08
su_p = 0
su_g = 0
for x in it:
if (x.grad is None):
continue
size += 1.0
su_p += x.norm()
su_g += x.grad.norm()
return ((su_p / size), (su_g / size)) |
def _test_torch_onnx_inference_seq_lens_in_out(out_onnx_model: str):
print(out_onnx_model)
import onnxruntime as ort
torch.manual_seed(42)
dummy_data = torch.randn([3, 50, 9])
dummy_seq_lens = torch.tensor([27, 50, 43], dtype=torch.int32)
session = ort.InferenceSession(out_onnx_model)
outputs_onnx = session.run(None, {'data': dummy_data.numpy(), 'data:size1': dummy_seq_lens.numpy()})
out_result = torch.FloatTensor(outputs_onnx[0])
out_seq_lens = torch.IntTensor(outputs_onnx[1])
print('*** Result:', out_result)
print('*** Sequence lengths:', out_seq_lens)
assert (out_result.shape == torch.Size([3, 50, 2])) |
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, phi=0.05):
super(Actor, self).__init__()
self.l1 = nn.Linear((state_dim + action_dim), 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
self.phi = phi
def forward(self, state, action):
a = F.relu(self.l1(torch.cat([state, action], 1)))
a = F.relu(self.l2(a))
a = ((self.phi * self.max_action) * torch.tanh(self.l3(a)))
return (a + action).clamp((- self.max_action), self.max_action) |
def test_metricscallback_init():
def dummy_metric(model) -> float:
return 0.0
callback = MetricsCallback(dummy_metric)
assert (callback.metric_fns == {'dummy_metric': dummy_metric})
metrics = [dummy_metric]
callback = MetricsCallback(metrics)
assert (callback.metric_fns == {'dummy_metric': dummy_metric})
metrics = {'dummy_metric': dummy_metric, 'dummy_metric2': dummy_metric}
callback = MetricsCallback(metrics)
assert (len(callback.metric_fns) == 2)
with pytest.raises(TypeError):
MetricsCallback(0)
with pytest.raises(TypeError):
MetricsCallback([0]) |
class _JumpF():
def __init__(self):
self.nfe = 0
def __call__(self, t, x):
self.nfe += 1
if (t < 0.5):
return ((- 0.5) * x)
else:
return (x ** 2) |
class RandomSplitter(Splitter):
_init_arg_names = ['test_size', 'drop_cold_users', 'drop_cold_items', 'seed', 'query_column', 'item_column']
def __init__(self, test_size: float, drop_cold_items: bool=False, drop_cold_users: bool=False, seed: Optional[int]=None, query_column: str='query_id', item_column: str='item_id'):
super().__init__(drop_cold_items=drop_cold_items, drop_cold_users=drop_cold_users, query_column=query_column, item_column=item_column)
self.seed = seed
if ((test_size < 0) or (test_size > 1)):
raise ValueError('test_size must between 0 and 1')
self.test_size = test_size
def _random_split_spark(self, interactions: SparkDataFrame, threshold: float) -> Union[(SparkDataFrame, SparkDataFrame)]:
(train, test) = interactions.randomSplit([(1 - threshold), threshold], self.seed)
if self.session_id_column:
test = test.withColumn('is_test', sf.lit(True))
interactions = interactions.join(test, on=interactions.schema.names, how='left').na.fill({'is_test': False})
interactions = self._recalculate_with_session_id_column(interactions)
train = interactions.filter((~ sf.col('is_test'))).drop('is_test')
test = interactions.filter(sf.col('is_test')).drop('is_test')
return (train, test)
def _random_split_pandas(self, interactions: PandasDataFrame, threshold: float) -> Union[(PandasDataFrame, PandasDataFrame)]:
train = interactions.sample(frac=(1 - threshold), random_state=self.seed)
test = interactions.drop(train.index)
if self.session_id_column:
interactions['is_test'] = False
interactions.loc[(test.index, 'is_test')] = True
interactions = self._recalculate_with_session_id_column(interactions)
train = interactions[(~ interactions['is_test'])].drop(columns=['is_test'])
test = interactions[interactions['is_test']].drop(columns=['is_test'])
interactions = interactions.drop(columns=['is_test'])
return (train, test)
def _core_split(self, interactions: DataFrameLike) -> SplitterReturnType:
split_method = self._random_split_spark
if isinstance(interactions, PandasDataFrame):
split_method = self._random_split_pandas
return split_method(interactions, self.test_size) |
class TFBertForTokenClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class CosineLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(default=0, metadata={'help': 'warmup the learning rate linearly for the first N updates'})
warmup_init_lr: float = field(default=(- 1), metadata={'help': 'initial learning rate during warmup phase; default is cfg.lr'})
lr: List[float] = field(default=II('optimization.lr'), metadata={'help': 'max learning rate, must be more than cfg.min_lr'})
min_lr: float = field(default=0.0, metadata={'help': 'min learning rate'})
t_mult: float = field(default=1.0, metadata={'help': 'factor to grow the length of each period'})
lr_period_updates: float = field(default=(- 1), metadata={'help': 'initial number of updates per period'})
lr_shrink: float = field(default=0.1, metadata={'help': 'shrink factor for annealing'})
max_update: int = II('optimization.max_update') |
def check_files(check_str, output_folder, recipe_id, pattern='file_exists=\\[(.*?)\\]'):
check = True
files_to_check = re.search(pattern, check_str)
files_to_check = files_to_check.group(1).split(',')
for file_to_check in files_to_check:
check_path = os.path.join(output_folder, file_to_check)
if (not os.path.exists(check_path)):
print(('\tERROR: The recipe %s does not contain the expected file %s' % (recipe_id, check_path)))
check = False
return check |
def broadcast_types(src, dst):
n = abs((src.ndim - dst.ndim))
if (src.ndim < dst.ndim):
return (insert_newaxes(src, n), dst)
else:
return (src, insert_newaxes(dst, n)) |
class SpeedtestBenchmark(Benchmark):
def __init__(self, server_id=13658):
self.server_id = server_id
super().__init__(name='SpeedTest')
def run(self):
logger.debug('Launching Speedtest CLI')
docker_client = docker.from_env()
terminal_workstation = docker_client.containers.get(os.getenv('WS_ST_CONTAINER_NAME'))
speedtest_results = terminal_workstation.exec_run(('python3 /tmp/speedtest.py --json --server ' + str(self.server_id)))
json_string = speedtest_results.output.decode('unicode_escape').rstrip('\n')
json_data = json.loads(json_string)
logger.success(((('Speedtest Complete' + str(json_data['upload'])) + '/') + str(json_data['download'])))
return {'sent_bytes': json_data['bytes_sent'], 'received_bytes': json_data['bytes_received'], 'sent_bps': json_data['upload'], 'received_bps': json_data['download']} |
class KaldiDecoderConfig(FairseqDataclass):
hlg_graph_path: Optional[str] = None
output_dict: str = MISSING
kaldi_initializer_config: Optional[KaldiInitializerConfig] = None
acoustic_scale: float = 0.5
max_active: int = 10000
beam_delta: float = 0.5
hash_ratio: float = 2.0
is_lattice: bool = False
lattice_beam: float = 10.0
prune_interval: int = 25
determinize_lattice: bool = True
prune_scale: float = 0.1
max_mem: int = 0
phone_determinize: bool = True
word_determinize: bool = True
minimize: bool = True
num_threads: int = 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.