code stringlengths 101 5.91M |
|---|
def has_nonnegative_entries(input_matrix: Union[(sparse.csr_matrix, np.ndarray)]) -> bool:
if (type(input_matrix) == sparse.csr_matrix):
return np.all((input_matrix.data >= 0))
else:
return np.all((input_matrix >= 0)) |
def generate_stack(node_name, in_name, out_name, axis, base_name, func_counter):
sp = nnabla_pb2.Function()
sp.type = 'Stack'
set_function_name(sp, node_name, base_name, func_counter)
sp.input.extend(in_name)
sp.output.extend([out_name])
spp = sp.stack_param
spp.axis = axis
return sp |
class FixedProblemSet(Dataset):
def __init__(self, probs: list[tuple[(type[Problem], tuple)]], paradigm, vocab):
self.probs = probs
self.paradigm = paradigm
self.vocab = vocab
def __getitem__(self, item):
(prob_cls, args) = self.probs[item]
(x, y, label) = prob_cls.solve(args, paradigm=self.paradigm)
return (self.vocab(x), self.vocab(y), label)
def __len__(self):
return len(self.probs) |
def main():
sc = sp.Client()
def make_blurred_frame(streams):
frames = sc.io.Input(streams)
blurred_frames = sc.ops.Blur(frame=frames, kernel_size=3, sigma=0.5)
sampled_frames = sc.streams.Range(blurred_frames, [(0, 30)])
return (frames, sampled_frames)
example_video_path = util.download_video()
video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)
(frame, blurred_frame) = make_blurred_frame([video_stream])
stream = sp.NamedVideoStream(sc, 'output_table_name')
output = sc.io.Output(blurred_frame, [stream])
sc.run(output, sp.PerfParams.estimate())
stream.delete(sc)
(frame, blurred_frame) = make_blurred_frame([video_stream])
low_quality_frame = blurred_frame.compress_video(quality=35)
low_quality_stream = sp.NamedVideoStream(sc, 'low_quality_video')
output = sc.io.Output(low_quality_frame, [low_quality_stream])
sc.run(output, sp.PerfParams.estimate())
(frame, blurred_frame) = make_blurred_frame([video_stream])
lossless_frame = blurred_frame.lossless()
lossless_stream = sp.NamedVideoStream(sc, 'lossless_video')
output = sc.io.Output(lossless_frame, [lossless_stream])
sc.run(output, sp.PerfParams.estimate())
low_quality_stream.save_mp4('low_quality_video')
low_quality_stream.delete(sc)
lossless_stream.delete(sc) |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f}'.format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable)))) |
class ComparisonModule(nn.Module):
def __init__(self, dim_v):
super().__init__()
self.projection = nn.Sequential(nn.Linear(dim_v, 128), nn.ReLU(), nn.Linear(128, dim_v))
def forward(self, enc1, enc2):
input = (enc1 - enc2)
out = self.projection(input)
return out |
class NILMDataloader():
def __init__(self, args, ds_parser, pretrain=False):
self.args = args
self.mask_prob = args.mask_prob
self.batch_size = args.batch_size
if pretrain:
(self.train_dataset, self.val_dataset) = ds_parser.get_pretrain_datasets(mask_prob=self.mask_prob)
else:
(self.train_dataset, self.val_dataset) = ds_parser.get_train_datasets()
def get_dataloaders(self):
train_loader = self._get_loader(self.train_dataset)
val_loader = self._get_loader(self.val_dataset)
return (train_loader, val_loader)
def _get_loader(self, dataset):
dataloader = data_utils.DataLoader(dataset, batch_size=self.batch_size, shuffle=False, pin_memory=True)
return dataloader |
.usefixtures('spark', 'schema')
()
def dataframe_two_columns_no_cut(spark, schema):
data_two_columns_no_cut = [(1, [2, 0, 0, 0, 0], [19842, (- 1), (- 1), (- 1), (- 1)]), (1, [2, 4, 0, 0, 0], [19842, 19844, (- 1), (- 1), (- 1)]), (1, [2, 4, 3, 0, 0], [19842, 19844, 19843, (- 1), (- 1)]), (1, [2, 4, 3, 5, 0], [19842, 19844, 19843, 19845, (- 1)]), (1, [2, 4, 3, 5, 6], [19842, 19844, 19843, 19845, 19846]), (1, [2, 4, 3, 5, 6, 7], [19842, 19844, 19843, 19845, 19846, 19847]), (2, [1, 0, 0, 0, 0], [19841, (- 1), (- 1), (- 1), (- 1)]), (2, [1, 2, 0, 0, 0], [19841, 19842, (- 1), (- 1), (- 1)]), (2, [1, 2, 3, 0, 0], [19841, 19842, 19843, (- 1), (- 1)]), (2, [1, 2, 3, 4, 0], [19841, 19842, 19843, 19844, (- 1)]), (3, [10, 0, 0, 0, 0], [19844, (- 1), (- 1), (- 1), (- 1)]), (4, [10, 11, 0, 0, 0], [19844, 19843, (- 1), (- 1), (- 1)]), (4, [10, 11, 12, 0, 0], [19844, 19843, 19845, (- 1), (- 1)]), (10, [1, 0, 0, 0, 0], [19841, (- 1), (- 1), (- 1), (- 1)])]
return spark.createDataFrame(data_two_columns_no_cut, schema=schema) |
def variable_recurrent_factory(inner, reverse=False):
if reverse:
return VariableRecurrentReverse(inner)
else:
return VariableRecurrent(inner) |
class ConvCrossAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, resolution=1.0):
super().__init__()
self.norm0 = norm_layer(dim)
self.conv0 = nn.Conv2d(dim, dim, 3, 1, 1)
self.selfattn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path0 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.act0 = act_layer()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path1 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int((dim * mlp_ratio)), act_layer=act_layer, drop=drop)
self.drop_path2 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.interpolate_scale = resolution
self.interploate_layer = None
self.resolution = resolution
if (resolution == 1.0):
self.interpolate_layer = nn.Identity()
elif (resolution < 1.0):
self.interpolate_layer = nn.AvgPool2d(kernel_size=int((1 / resolution)), stride=int((1 / resolution)))
else:
self.interpolate_layer = nn.Upsample(scale_factor=resolution, mode='bilinear', align_corners=False)
def forward(self, x, y):
if (self.resolution != 1):
x = self.seq_to_2d(x)
x = (x + self.act0(self.conv0(x)))
x = self.interpolate_layer(x)
x = self._2d_to_seq(x)
x = (x + self.drop_path0(self.selfattn(self.norm0(x))))
x = (x + self.drop_path1(self.attn(self.norm1(x), y)))
x = (x + self.drop_path2(self.mlp(self.norm2(x))))
return x
def seq_to_2d(self, x):
(n, hw, c) = x.shape
h = w = int(math.sqrt(hw))
x = x.transpose(1, 2).reshape(n, c, h, w)
return x
def _2d_to_seq(self, x):
(n, c, h, w) = x.shape
x = x.reshape(n, c, (h * w)).transpose(1, 2)
return x |
class TestCpow(object):
def setup(self):
self.olderr = np.seterr(invalid='ignore')
def teardown(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([(1 + 1j), (0 + 2j), (1 + 2j), np.inf, np.nan])
y_r = (x ** 2)
y = np.power(x, 2)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_scalar(self):
x = np.array([1, 1j, 2, (2.5 + 0.37j), np.inf, np.nan])
y = np.array([1, 1j, ((- 0.5) + 1.5j), ((- 0.5) + 1.5j), 2, 3])
lx = list(range(len(x)))
p_r = [(complex(x[i]) ** complex(y[i])) for i in lx]
p_r[4] = complex(np.inf, np.nan)
n_r = [(x[i] ** y[i]) for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg=('Loop %d\n' % i))
def test_array(self):
x = np.array([1, 1j, 2, (2.5 + 0.37j), np.inf, np.nan])
y = np.array([1, 1j, ((- 0.5) + 1.5j), ((- 0.5) + 1.5j), 2, 3])
lx = list(range(len(x)))
p_r = [(complex(x[i]) ** complex(y[i])) for i in lx]
p_r[4] = complex(np.inf, np.nan)
n_r = (x ** y)
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg=('Loop %d\n' % i)) |
def sub(g, self, other, alpha):
if (_scalar(alpha) != 1):
return _unimplemented('sub', 'alpha != 1')
return g.op('Sub', self, _if_scalar_type_as(other, self), **_broadcast_if_scalar(other)) |
def get_results(deployment, experiment, output_dir):
if (deployment.name == 'aws'):
return get_results_aws(deployment, experiment, output_dir) |
class TilingStrategy():
def __init__(self, window_size: Tuple[(int, int)]=None, image_shape: Tuple[(int, int)]=None, **kwargs):
if window_size:
self.window_size = window_size
else:
self.window_size = image_shape
(self.image_width, self.image_height) = image_shape
self.number_of_spatial_sample_per_image = self.get_number_of_spatial_sample_per_image()
def get_number_of_spatial_sample_per_image(self):
raise NotImplementedError
def get_window(self, idx):
raise NotImplementedError |
_function_dispatch(_irr_dispatcher)
def irr(values):
res = np.roots(values[::(- 1)])
mask = ((res.imag == 0) & (res.real > 0))
if (not mask.any()):
return np.nan
res = res[mask].real
rate = ((1 / res) - 1)
rate = rate.item(np.argmin(np.abs(rate)))
return rate |
def test_convert():
pt = np.array([3.76632, 0.072447, 0.30173])
assert np.allclose(pt, mp3d_to_habitat(habitat_to_mp3d(pt))) |
def register_bdd_panoptic(name, metadata, image_root, panoptic_root, panoptic_json):
panoptic_name = name
DatasetCatalog.register(panoptic_name, (lambda : load_bdd_panoptic_json(panoptic_json, image_root, panoptic_root, metadata)))
MetadataCatalog.get(panoptic_name).set(panoptic_root=panoptic_root, image_root=image_root, panoptic_json=panoptic_json, evaluator_type='bdd_panoptic_pano', ignore_label=0, label_divisor=1000, **metadata) |
class OutputSceneJmol(OutputBase):
def __init__(self, scene_zip, preview_png):
self.scene_zip = OutputBuffer(scene_zip)
self.preview_png = OutputBuffer(preview_png)
def launch_script_filename(self):
from sage.misc.temporary_file import tmp_dir
basedir = tmp_dir()
scene_filename = os.path.join(basedir, 'scene.spt.zip')
script_filename = os.path.join(basedir, 'scene.spt')
self.scene_zip.save_as(scene_filename)
with open(script_filename, 'w') as f:
f.write('set defaultdirectory "{0}"\n'.format(scene_filename))
f.write('script SCRIPT\n')
return script_filename
def example(cls):
example_png = importlib.resources.read_binary(__package__, 'example.png')
scene_zip = importlib.resources.read_binary(__package__, 'example_jmol.spt.zip')
return cls(scene_zip, example_png) |
class _conv_dw(nn.Module):
def __init__(self, inp, oup, stride):
super(_conv_dw, self).__init__()
self.conv = nn.Sequential(nn.Sequential(Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), nn.BatchNorm2d(inp), nn.ReLU6(inplace=True)), nn.Sequential(Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True)))
self.depth = oup
def forward(self, x):
return self.conv(x) |
def save_model_state(model, optimizer, trn_param, filename):
torch.save({'trn_param': trn_param, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, filename) |
def handle_encoding_declaration(contents, out):
lines = contents.splitlines()
for (num, line) in enumerate(lines[:2]):
if re.search('coding[:=]\\s*([-\\w.]+)', line):
out.write((line + '\n'))
return '\n'.join((lines[:num] + lines[(num + 1):]))
out.write('# -*- coding: utf-8 -*-\n')
return contents |
def load_tf_basicConv2d(weights, layer):
load_tf_conv2d(weights[0], layer.conv)
load_tf_batchNorm(weights[1:], layer.bn) |
def filename_to_url(filename: str, cache_dir: Union[(str, Path)]=None) -> Tuple[(str, str)]:
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise FileNotFoundError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise FileNotFoundError('file {} not found'.format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag) |
def deconv(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, activation_fn=None, use_batchnorm=False, pre_activation=False, bias=True, weight_init_fn=None):
if ((not pre_activation) and use_batchnorm):
assert (not bias)
layers = []
if pre_activation:
if use_batchnorm:
layers.append(nn.BatchNorm2d(in_channels))
if (activation_fn is not None):
layers.append(activation_fn())
deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=bias)
if (weight_init_fn is None):
weight_init_fn = get_weight_init_fn(activation_fn)
weight_init_fn(deconv.weight)
layers.append(deconv)
if (not pre_activation):
if use_batchnorm:
layers.append(nn.BatchNorm2d(out_channels))
if (activation_fn is not None):
layers.append(activation_fn())
return nn.Sequential(*layers) |
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096))
self.last_relu = nn.ReLU(inplace=True)
self.last_relu_fake = FakeReLUM()
self.last_layer = nn.Linear(4096, num_classes)
def forward(self, x, with_latent=False, fake_relu=False, no_relu=False):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), ((256 * 6) * 6))
x_latent = self.classifier(x)
x_relu = (self.last_relu_fake(x_latent) if fake_relu else self.last_relu(x_latent))
x_out = self.last_layer(x_relu)
if (with_latent and no_relu):
return (x_out, x_latent)
if with_latent:
return (x_out, x_relu)
return x_out |
def perfect_uplift_curve(y_true: np.ndarray, treatment: np.ndarray) -> np.ndarray:
if (type_of_target(y_true) == 'binary'):
perfect_control_score = ((treatment == 0).astype(int) * ((2 * (y_true != 1).astype(int)) - 1))
perfect_treatment_score = (((treatment == 1).astype(int) * 2) * (y_true == 1).astype(int))
perfect_uplift = (perfect_treatment_score + perfect_control_score)
elif (type_of_target(y_true) == 'continuous'):
raise NotImplementedError("Can't calculate perfect curve for continuous target")
else:
raise RuntimeError("Only 'binary' and 'continuous' targets are available")
return perfect_uplift |
def test_cartesian():
with pytest.raises(ValueError, match='cannot operate on arrays with incompatible backends'):
ak.cartesian((left, right), axis=0)
result = ak.cartesian((left, typetracer), axis=0)
assert (ak.backend(result) == 'typetracer') |
def load_rcv1():
data_home = get_data_home()
train_file = os.path.join(data_home, 'rcv1_train.multiclass')
test_file = os.path.join(data_home, 'rcv1_test.multiclass')
return _load(train_file, test_file, 'rcv1') |
def _assert_equal_on_sequences(actual, desired, err_msg=''):
assert_equal(len(actual), len(desired), err_msg)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], ('item=%r\n%s' % (k, err_msg)))
return |
def pad_tensor(x, max_len, mode='zero'):
padding = np.zeros_like(x[0])
if (mode == 'last'):
padding = x[(- 1)]
return np.concatenate([x, np.tile(padding, (((max_len - len(x)),) + ((1,) * np.ndim(x[0]))))]) |
def base_transform(image, size, mean):
x = cv2.resize(image, (size, size)).astype(np.float32)
x -= mean
x = x.astype(np.float32)
return x |
('Eltwise')
def TranslateElementWise(layer, pretrained_blobs, is_test, **kwargs):
param = layer.eltwise_param
if (len(param.coeff) or (param.operation != 1)):
raise RuntimeError('This eltwise layer is not yet supported.')
caffe_op = BaseTranslate(layer, 'Sum')
return (caffe_op, []) |
def test_dbscan_metric_params():
eps = 0.8
min_samples = 10
p = 1
with warnings.catch_warnings(record=True) as warns:
db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps, p=None, min_samples=min_samples, algorithm='ball_tree').fit(X)
assert (not warns), warns[0].message
(core_sample_1, labels_1) = (db.core_sample_indices_, db.labels_)
db = DBSCAN(metric='minkowski', eps=eps, min_samples=min_samples, algorithm='ball_tree', p=p).fit(X)
(core_sample_2, labels_2) = (db.core_sample_indices_, db.labels_)
assert_array_equal(core_sample_1, core_sample_2)
assert_array_equal(labels_1, labels_2)
db = DBSCAN(metric='manhattan', eps=eps, min_samples=min_samples, algorithm='ball_tree').fit(X)
(core_sample_3, labels_3) = (db.core_sample_indices_, db.labels_)
assert_array_equal(core_sample_1, core_sample_3)
assert_array_equal(labels_1, labels_3)
with pytest.warns(SyntaxWarning, match='Parameter p is found in metric_params. The corresponding parameter from __init__ is ignored.'):
db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps, p=(p + 1), min_samples=min_samples, algorithm='ball_tree').fit(X)
(core_sample_4, labels_4) = (db.core_sample_indices_, db.labels_)
assert_array_equal(core_sample_1, core_sample_4)
assert_array_equal(labels_1, labels_4) |
def fit_encoder_only(surrogate, optimizer, mll, train_loader, num_epochs):
assert hasattr(surrogate, 'encoder')
surrogate.requires_grad_(False)
surrogate.encoder.requires_grad_(True)
for epoch_idx in range(num_epochs):
surrogate.train()
avg_loss = 0.0
for (inputs, targets) in train_loader:
loss = gp_train_step(surrogate, optimizer, inputs, targets, mll)
avg_loss += (loss.detach() / len(train_loader))
return avg_loss.item() |
def save_model(model, optimizer, save_variable_list, args):
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({**save_variable_list, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, os.path.join(args.save_path, 'checkpoint')) |
()
('workspace-one', default='-')
('workspace-two', default='-')
('-j', '--join', default='none', type=click.Choice(Workspace.valid_joins), help='The join operation to apply when combining the two workspaces.')
('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
('--merge-channels/--no-merge-channels', help='Whether or not to deeply merge channels. Can only be done with left/right outer joins.', default=False)
def combine(workspace_one, workspace_two, join, output_file, merge_channels):
with click.open_file(workspace_one, 'r', encoding='utf-8') as specstream:
spec_one = json.load(specstream)
with click.open_file(workspace_two, 'r', encoding='utf-8') as specstream:
spec_two = json.load(specstream)
ws_one = Workspace(spec_one)
ws_two = Workspace(spec_two)
combined_ws = Workspace.combine(ws_one, ws_two, join=join, merge_channels=merge_channels)
if (output_file is None):
click.echo(json.dumps(combined_ws, indent=4, sort_keys=True))
else:
with open(output_file, 'w+', encoding='utf-8') as out_file:
json.dump(combined_ws, out_file, indent=4, sort_keys=True)
log.debug(f'Written to {output_file:s}') |
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU(inplace=True)
if (not residual):
self.residual = (lambda x: 0)
elif ((in_channels == out_channels) and (stride == 1)):
self.residual = (lambda x: x)
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
y = self.relu((self.tcn1(self.gcn1(x)) + self.residual(x)))
return y |
def test_inout_connector_validation_success():
sdfg = dace.SDFG('test_inout_connector_validation_success')
sdfg.add_array('A', [1], dace.int32)
sdfg.add_array('B', [1], dace.int32)
nsdfg = dace.SDFG('nested_sdfg')
nsdfg.add_array('C', [1], dace.int32)
nstate = nsdfg.add_state()
read_c = nstate.add_access('C')
write_c = nstate.add_access('C')
tasklet = nstate.add_tasklet('tasklet', {'__inp'}, {'__out'}, '__out = __inp + 5')
nstate.add_edge(read_c, None, tasklet, '__inp', dace.Memlet.from_array('C', nsdfg.arrays['C']))
nstate.add_edge(tasklet, '__out', write_c, None, dace.Memlet.from_array('C', nsdfg.arrays['C']))
state = sdfg.add_state()
read_b = state.add_access('B')
write_b = state.add_access('B')
tasklet = state.add_nested_sdfg(nsdfg, sdfg, {'C'}, {'C'})
state.add_edge(read_b, None, tasklet, 'C', dace.Memlet.from_array('B', sdfg.arrays['B']))
state.add_edge(tasklet, 'C', write_b, None, dace.Memlet.from_array('B', sdfg.arrays['B']))
try:
sdfg.validate()
except dace.sdfg.InvalidSDFGError:
assert False, 'SDFG should validate'
return |
class UnetSimpleCondMerge(nn.Module):
def __init__(self, in_ch, out_ch, nf=3, cond_nf=64, norm_layer=nn.InstanceNorm2d):
super(UnetSimpleCondMerge, self).__init__()
self.downscale = 16
self.in_ch = in_ch
self.out_ch = out_ch
self.nf = nf
self.cond_nf = cond_nf
self.merge_cond = nn.Sequential(nn.Conv1d(cond_nf, cond_nf, 2, 1, 0, bias=True), nn.LeakyReLU(0.1, inplace=True), nn.Conv1d(cond_nf, cond_nf, 1, 1, 0, bias=True), nn.LeakyReLU(0.1, inplace=True))
if (self.nf != self.in_ch):
self.conv_in = nn.Conv2d(in_ch, nf, 1, 1, 0, bias=False)
self.down_conv_0 = nn.Conv2d(nf, (nf * 2), 1, 1, 0, padding_mode='reflect', bias=False)
self.down_0 = nn.Conv2d((nf * 2), (nf * 2), 4, 2, 1, groups=(nf * 2), padding_mode='reflect', bias=False)
self.down_conv_1 = nn.Conv2d((nf * 2), (nf * 4), 1, 1, 0, padding_mode='reflect', bias=False)
self.down_1 = nn.Conv2d((nf * 4), (nf * 4), 4, 2, 1, groups=(nf * 4), padding_mode='reflect', bias=False)
self.down_conv_2 = nn.Conv2d((nf * 4), (nf * 8), 1, 1, 0, padding_mode='reflect', bias=False)
self.down_2 = nn.Conv2d((nf * 8), (nf * 8), 4, 2, 1, groups=(nf * 8), padding_mode='reflect', bias=False)
if (self.downscale == 16):
self.down_conv_3 = nn.Conv2d((nf * 8), (nf * 16), 1, 1, 0, padding_mode='reflect', bias=False)
self.down_3 = nn.Conv2d((nf * 16), (nf * 16), 4, 2, 1, groups=(nf * 16), padding_mode='reflect', bias=False)
self.up_3 = nn.Conv2d((nf * 16), (nf * 16), 3, 1, 1, padding_mode='reflect', groups=(nf * 16), bias=False)
self.conv_up_3 = nn.Conv2d((nf * 16), (nf * 8), 1, 1, 0, padding_mode='reflect', bias=False)
self.modulate_3 = SpatialOffsetBlock((nf * 8), (nf * 8), ks=3)
self.retouch_3 = RetouchBlock((nf * 8), (nf * 8), base_nf=cond_nf, cond_nf=cond_nf)
self.up = nn.Upsample(scale_factor=2, mode='nearest')
self.up_2 = nn.Conv2d((nf * 8), (nf * 8), 3, 1, 1, padding_mode='reflect', groups=(nf * 8), bias=False)
self.conv_up_2 = nn.Conv2d((nf * 8), (nf * 4), 1, 1, 0, padding_mode='reflect', bias=False)
self.modulate_2 = SpatialOffsetBlock((nf * 4), (nf * 4), ks=3)
self.retouch_2 = RetouchBlock((nf * 4), (nf * 4), base_nf=cond_nf, cond_nf=cond_nf)
self.up_1 = nn.Conv2d((nf * 4), (nf * 4), 3, 1, 1, padding_mode='reflect', groups=(nf * 4), bias=False)
self.conv_up_1 = nn.Conv2d((nf * 4), (nf * 2), 1, 1, 0, padding_mode='reflect', bias=False)
self.modulate_1 = SpatialOffsetBlock((nf * 2), (nf * 2), ks=5)
self.retouch_1 = RetouchBlock((nf * 2), (nf * 2), base_nf=cond_nf, cond_nf=cond_nf)
self.up_0 = nn.Conv2d((nf * 2), (nf * 2), 3, 1, 1, padding_mode='reflect', groups=(nf * 2), bias=False)
self.conv_up_0 = nn.Conv2d((nf * 2), (nf * 1), 1, 1, 0, padding_mode='reflect', bias=False)
self.modulate_0 = SpatialOffsetBlock((nf * 1), (nf * 1), ks=5)
self.retouch_0 = RetouchBlock((nf * 1), (nf * 1), base_nf=cond_nf, cond_nf=cond_nf)
if (self.nf != self.out_ch):
self.conv_out = nn.Conv2d(nf, out_ch, 1, 1, 0, bias=False)
def forward(self, netC, x, ref):
cond_x_code = torch.mean(netC(x), dim=[2, 3], keepdim=False)
cond_ref_code = torch.mean(netC(ref), dim=[2, 3], keepdim=False)
cond_stack = torch.stack([cond_x_code, cond_ref_code], dim=2)
cond_retouch_code = self.merge_cond(cond_stack).squeeze(2)
(x, pad_left, pad_right, pad_top, pad_bottom) = pad_tensor(x, divide=self.downscale)
if (self.nf != self.in_ch):
x0 = self.conv_in(x)
else:
x0 = x
x1 = self.down_0(self.down_conv_0(x0))
x2 = self.down_1(self.down_conv_1(x1))
x3 = self.down_2(self.down_conv_2(x2))
if (self.downscale == 16):
x4 = self.down_3(self.down_conv_3(x3))
up_x3 = self.conv_up_3(self.up_3(self.up(x4)))
up_x3 = self.modulate_3(up_x3, x3)
up_x3 = self.retouch_3(up_x3, cond_retouch_code)
else:
up_x3 = x3
up_x2 = self.conv_up_2(self.up_2(self.up(up_x3)))
up_x2 = self.modulate_2(up_x2, x2)
up_x2 = self.retouch_2(up_x2, cond_retouch_code)
up_x1 = self.conv_up_1(self.up_1(self.up(up_x2)))
up_x1 = self.modulate_1(up_x1, x1)
up_x1 = self.retouch_1(up_x1, cond_retouch_code)
up_x0 = self.conv_up_0(self.up_0(self.up(up_x1)))
up_x0 = self.modulate_0(up_x0, x0)
up_x0 = self.retouch_0(up_x0, cond_retouch_code)
if (self.nf != self.in_ch):
out = self.conv_out(up_x0)
else:
out = up_x0
out = pad_tensor_back(out, pad_left, pad_right, pad_top, pad_bottom)
return out |
def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print('Adding sample versioneer config to setup.cfg', file=sys.stderr)
with open(os.path.join(root, 'setup.cfg'), 'a') as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print((' creating %s' % cfg.versionfile_source))
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source}))
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), '__init__.py')
if os.path.exists(ipy):
try:
with open(ipy, 'r') as f:
old = f.read()
except OSError:
old = ''
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if (OLD_SNIPPET in old):
print((' replacing boilerplate in %s' % ipy))
with open(ipy, 'w') as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif (snippet not in old):
print((' appending to %s' % ipy))
with open(ipy, 'a') as f:
f.write(snippet)
else:
print((' %s unmodified' % ipy))
else:
print((" %s doesn't exist, ok" % ipy))
ipy = None
do_vcs_install(cfg.versionfile_source, ipy)
return 0 |
class FreeGradedModule(CombinatorialFreeModule):
def __classcall__(cls, algebra, generator_degrees, category=None, names=None, prefix=None, **kwds):
if (algebra.base_ring() not in PrincipalIdealDomains()):
raise ValueError('the ground ring of the algebra must be a PID')
generator_degrees = tuple(generator_degrees)
category = GradedModules(algebra).WithBasis().FiniteDimensional().or_subcategory(category)
if (names is not None):
from sage.structure.category_object import normalize_names
names = normalize_names((- 1), names)
if (len(generator_degrees) > 1):
if (len(names) == 1):
if (prefix is None):
prefix = names[0]
names = None
if ((names is not None) and (len(names) != len(generator_degrees))):
raise ValueError('the names do not correspond to the generators')
if (prefix is None):
prefix = 'g'
return super().__classcall__(cls, algebra=algebra, generator_degrees=generator_degrees, category=category, names=names, prefix=prefix, **kwds)
def __init__(self, algebra, generator_degrees, category, names=None, **kwds):
keys = list(enumerate(generator_degrees))
self._generator_degrees = generator_degrees
keys = []
degs_so_far = {}
unique = True
for i in generator_degrees:
if (i in degs_so_far):
idx = (degs_so_far[i] + 1)
degs_so_far[i] += 1
unique = False
else:
idx = 0
degs_so_far[i] = 0
keys.append((i, idx))
if unique:
keys = [i[0] for i in keys]
kwds['iterate_key'] = True
CombinatorialFreeModule.__init__(self, algebra, basis_keys=keys, category=category, names=names, **kwds)
Element = FreeGradedModuleElement
def change_ring(self, algebra):
return type(self).__base__(algebra, self.generator_degrees(), prefix=self.prefix(), names=self._names)
def _repr_(self):
return ('Free graded left module on %s generator%s over %s' % (len(self._generator_degrees), ('' if (len(self._generator_degrees) == 1) else 's'), self.base_ring()))
def generator_degrees(self):
return self._generator_degrees
def is_trivial(self):
return (not self._generator_degrees)
def connectivity(self):
return min((self.generator_degrees() + (infinity,)))
def _element_constructor_(self, coefficients):
if isinstance(coefficients, self.element_class):
return coefficients
if (not coefficients):
return self.zero()
A = self.base_ring()
return self._from_dict({b: A(c) for (c, b) in zip(coefficients, self._indices) if c}, remove_zeros=False)
def an_element(self, n=None):
if (not self._generator_degrees):
return self.zero()
if (n is None):
n = (max(self.generator_degrees()) + 7)
coefficients = []
for g in self.generator_degrees():
basis = (self.base_ring().basis((n - g)) if (n >= g) else ())
l = len(basis)
if l:
coefficients.append(basis[(g % l)])
else:
coefficients.append(self.base_ring().zero())
return self(coefficients)
def basis_elements(self, n):
return tuple([self.term(self._indices[i], coeff) for i in range(len(self._generator_degrees)) for coeff in self._basis_coeffs(n, i)])
def _basis_coeffs(self, d, i):
return self._cached_basis_coeffs((d - self._generator_degrees[i]))
_method
def _cached_basis_coeffs(self, d):
return tuple(self.base_ring().basis(d))
_method
def element_from_coordinates(self, coordinates, n):
D = self.vector_presentation(n).dimension()
if (len(coordinates) != D):
raise ValueError(('the given coordinate vector has incorrect length (%d); it should have length %d' % (len(coordinates), D)))
ret = {}
A = self.base_ring()
j = 0
for (i, key) in enumerate(self._indices):
B = self._basis_coeffs(n, i)
coeff = A.linear_combination(((b, coordinates[(j + ind)]) for (ind, b) in enumerate(B)))
if coeff:
ret[key] = coeff
j += len(B)
if (not ret):
return self.zero()
return self.element_class(self, ret)
_method
def vector_presentation(self, n):
m = len(self._generator_degrees)
return FreeModule(self.base_ring().base_ring(), sum((len(self._basis_coeffs(n, i)) for i in range(m))))
__getitem__ = vector_presentation
def generator(self, index):
try:
return self.gens()[index]
except IndexError:
raise ValueError(('the parent module has generators in the index range [0, %s]; generator %s does not exist' % ((len(self.generator_degrees()) - 1), index)))
gen = generator
def generators(self):
return self.gens()
def _Hom_(self, Y, category):
from .free_homspace import FreeGradedModuleHomspace
return FreeGradedModuleHomspace(self, Y, category)
def suspension(self, t):
degs = tuple(((g + t) for g in self.generator_degrees()))
return FreeGradedModule(algebra=self.base_ring(), generator_degrees=degs)
def has_relations(self):
return False
def relations(self):
return ()
def resolution(self, k, top_dim=None, verbose=False):
if (k < 0):
raise ValueError('the length of the resolution must be non-negative')
ret_complex = [Hom(self, self).identity()]
if (k == 0):
return ret_complex
T = self.base_ring().free_graded_module(())
ret_complex.append(Hom(T, self).zero())
if (k == 1):
return ret_complex
return (ret_complex + ([Hom(T, T).zero()] * (k - 1)))
def minimal_presentation(self, top_dim=None, verbose=False):
return Hom(self, self).identity() |
def filter_attrs(attr_list):
valid_attrs = []
reserved_words = ['next', 'runtime', 'execute_next']
for attr in attr_list:
if ((not attr[0].startswith('_')) and (attr[0] not in reserved_words) and (not hasattr(TestFlowReferenceWithExclude, attr[0]))):
if (not isinstance(attr[1], MethodType)):
valid_attrs.append(attr[0])
return valid_attrs |
class ChannelPool(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.mean(dim=1, keepdim=True) |
def remove_spatial_bn_layers(caffenet, caffenet_weights):
remove_types = ['BatchNorm', 'Scale']
def _remove_layers(net):
for i in reversed(range(len(net.layer))):
if (net.layer[i].type in remove_types):
net.layer.pop(i)
_remove_layers(caffenet)
bn_layers = [layer for layer in caffenet_weights.layer if (layer.type in remove_types)]
_remove_layers(caffenet_weights)
def _create_tensor(arr, shape, name):
t = caffe2_pb2.TensorProto()
t.name = name
t.data_type = caffe2_pb2.TensorProto.FLOAT
t.dims.extend(shape.dim)
t.float_data.extend(arr)
assert (len(t.float_data) == np.prod(t.dims)), 'Data size, shape mismatch'
return t
bn_tensors = []
for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]):
assert (bn.name[len('bn'):] == scl.name[len('scale'):]), 'Pair mismatch'
blob_out = (('res' + bn.name[len('bn'):]) + '_bn')
bn_mean = np.asarray(bn.blobs[0].data)
bn_var = np.asarray(bn.blobs[1].data)
scale = np.asarray(scl.blobs[0].data)
bias = np.asarray(scl.blobs[1].data)
std = np.sqrt((bn_var + 1e-05))
new_scale = (scale / std)
new_bias = (bias - ((bn_mean * scale) / std))
new_scale_tensor = _create_tensor(new_scale, bn.blobs[0].shape, (blob_out + '_s'))
new_bias_tensor = _create_tensor(new_bias, bn.blobs[0].shape, (blob_out + '_b'))
bn_tensors.extend([new_scale_tensor, new_bias_tensor])
return bn_tensors |
def register_Ns3Icmpv6DestinationUnreachable_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv6DestinationUnreachable const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetPacket', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')])
return |
def register_Ns3RandomDirection2dMobilityModel_methods(root_module, cls):
cls.add_constructor([param('ns3::RandomDirection2dMobilityModel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'arg0')], visibility='private', is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoGetPosition', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetVelocity', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoSetPosition', 'void', [param('ns3::Vector const &', 'position')], visibility='private', is_virtual=True)
return |
def preprocess_args(args):
if ((args.mode == 'local') and (args.label == '')):
args.label = 'local' |
class Concatenate(Model):
def __init__(self, *, input_shape=None, name=None, core_model=None):
if (core_model is None):
core_creator = search_core_model('Concatenate', []).create
core_model = core_creator()
super(Concatenate, self).__init__(core_model=core_model, input_shape=input_shape, name=name) |
def get_max_batch_size(gpu_mem, max_bsz_dict):
quantized_gpu_mem = floor_quantize(gpu_mem, max_bsz_dict.keys())
return max_bsz_dict[quantized_gpu_mem] |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', dir_name='./ckpts/moco'):
torch.save(state, os.path.join(dir_name, filename))
if is_best:
shutil.copyfile(os.path.join(dir_name, filename), os.path.join(dir_name, 'model_best.pth.tar')) |
_wrapped_func
def async_update(args, emb, queue):
th.set_num_threads(args.num_thread)
while True:
(grad_indices, grad_values, gpu_id) = queue.get()
clr = emb.args.lr
if (grad_indices is None):
return
with th.no_grad():
grad_sum = (grad_values * grad_values).mean(1)
device = emb.state_sum.device
if (device != grad_indices.device):
grad_indices = grad_indices.to(device)
if (device != grad_sum.device):
grad_sum = grad_sum.to(device)
emb.state_sum.index_add_(0, grad_indices, grad_sum)
std = emb.state_sum[grad_indices]
if (gpu_id >= 0):
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (((- clr) * grad_values) / std_values)
if (tmp.device != device):
tmp = tmp.to(device)
emb.emb.index_add_(0, grad_indices, tmp) |
.parametrize('url, is_login, is_ajax', [(' 0, 0), (' 0, 1), (' 1, 0), (' 1, 1)], ids=['req_without_login', 'ajax_req_without_login', 'req_with_login', 'ajax_req_with_login'])
def test_parse_home_info(url, is_login, is_ajax, cookies, session):
if (is_login == 1):
content = session.get(url).text
if (not is_ajax):
assert (len(home.get_data(content)) > 0)
else:
assert (len(home.get_ajax_data(content)) > 0)
else:
content = requests.get(url, cookies=cookies).text
if (not is_ajax):
assert (len(home.get_data(content)) > 0)
else:
assert (len(home.get_ajax_data(content)) > 0)
time.sleep(REQUEST_INTERNAL) |
def print_losses(current_losses, i_iter):
list_strings = []
for (loss_name, loss_value) in current_losses.items():
list_strings.append(f'{loss_name} = {to_numpy(loss_value):.6f} ')
full_string = ' '.join(list_strings)
print(f'iter = {i_iter} {full_string}') |
class SawyerDrawerOpenV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'drwr_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
pos_curr = o_d['hand_pos']
pos_drwr = (o_d['drwr_pos'] + np.array([0.0, 0.0, (- 0.02)]))
if (np.linalg.norm((pos_curr[:2] - pos_drwr[:2])) > 0.06):
to_pos = (pos_drwr + np.array([0.0, 0.0, 0.3]))
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=4.0)
elif (abs((pos_curr[2] - pos_drwr[2])) > 0.04):
to_pos = pos_drwr
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=4.0)
else:
to_pos = (pos_drwr + np.array([0.0, (- 0.06), 0.0]))
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=50.0)
action['grab_effort'] = (- 1.0)
return action.array |
def convconcat(tensor_in, condition, reshape_shape):
reshaped = tf.reshape(condition, reshape_shape)
out_shape = (([tf.shape(tensor_in)[0]] + tensor_in.get_shape().as_list()[1:(- 1)]) + [condition.get_shape().as_list()[1]])
to_concat = (reshaped * tf.ones(out_shape))
return tf.concat([tensor_in, to_concat], (- 1)) |
def build(num_classes, num_keypoints=0, pretrained=True, freeze_base=False, use_dcn=False, use_skip=False, rotated_boxes=False):
heads = {'hm': num_classes, 'wh': (2 if (not rotated_boxes) else 3), 'reg': 2}
if (num_keypoints > 0):
heads['kps'] = (num_keypoints * 2)
return CenterMobileNetV2(heads, pretrained=pretrained, freeze_base=freeze_base, use_dcn=use_dcn, use_skip=use_skip, rotated_boxes=rotated_boxes) |
_HEADS.register_module
class ResLayer(nn.Module):
def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None):
super(ResLayer, self).__init__()
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
(block, stage_blocks) = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = (64 * (2 ** stage))
inplanes = ((64 * (2 ** (stage - 1))) * block.expansion)
res_layer = make_res_layer(block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn)
self.add_module('layer{}'.format((stage + 1)), res_layer)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
_fp16()
def forward(self, x):
res_layer = getattr(self, 'layer{}'.format((self.stage + 1)))
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval() |
def auto_select_c(d):
dim2 = (d / 2.0)
R = (gamma((dim2 + 1)) / (np.pi ** (dim2 - 1)))
R = (R ** (1 / float(d)))
c = (1 / (R ** 2))
return c |
class MgpstrEncoder(nn.Module):
def __init__(self, config: MgpstrConfig):
super().__init__()
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
self.blocks = nn.Sequential(*[MgpstrLayer(config=config, drop_path=dpr[i]) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = (() if output_hidden_states else None)
all_self_attentions = (() if output_attentions else None)
for (_, blk) in enumerate(self.blocks):
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
layer_outputs = blk(hidden_states)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = (all_self_attentions + (layer_outputs[1],))
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
if (not return_dict):
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if (v is not None)))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) |
def animate_imlist(im_list, anim_name='movie'):
(fig, ax) = plt.subplots()
ims = []
for p in im_list:
im = plt.imshow(p)
ims.append(im)
import ipdb
ipdb.set_trace()
ani = animation.ArtistAnimation(fig, ims, interval=10, blit=True, repeat_delay=1000)
ani.save(f'{anim_name}.mp4') |
class MPNetForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestsOmniglot(unittest.TestCase):
def test_2_way_batch_4(self):
config = {'data.dataset_path': '/home/igor/dl/siamese-networks-tf/data/omniglot', 'data.dataset': 'omniglot', 'data.train_way': 2, 'data.test_way': 2, 'data.split': 'vinyals', 'data.batch': 4, 'data.episodes': 2, 'data.cuda': 1, 'data.gpu': gpu_num, 'train.epochs': 1, 'train.lr': 0.001, 'train.patience': 100, 'train.tb_dir': 'results/logs/gradient_tape/', 'train.log_dir': 'results/logs', 'train.restore': 0, 'model.x_dim': '105,105,1', 'model.save_dir': 'results/models/omniglot'}
train(config)
config['train.restore'] = 1
train(config)
def test_2_way_batch_16(self):
config = {'data.dataset_path': '/home/igor/dl/siamese-networks-tf/data/omniglot', 'data.dataset': 'omniglot', 'data.train_way': 2, 'data.test_way': 2, 'data.split': 'vinyals', 'data.batch': 16, 'data.episodes': 2, 'data.cuda': 1, 'data.gpu': gpu_num, 'train.epochs': 1, 'train.lr': 0.001, 'train.patience': 100, 'train.tb_dir': 'results/logs/gradient_tape/', 'train.log_dir': 'results/logs', 'train.restore': 0, 'model.x_dim': '105,105,1', 'model.save_dir': 'results/models/omniglot'}
train(config)
config['train.restore'] = 1
train(config)
def test_2_way_batch_1(self):
config = {'data.dataset_path': '/home/igor/dl/siamese-networks-tf/data/omniglot', 'data.dataset': 'omniglot', 'data.train_way': 2, 'data.test_way': 2, 'data.split': 'vinyals', 'data.batch': 1, 'data.episodes': 2, 'data.cuda': 1, 'data.gpu': gpu_num, 'train.epochs': 1, 'train.lr': 0.001, 'train.patience': 100, 'train.tb_dir': 'results/logs/gradient_tape/', 'train.log_dir': 'results/logs', 'train.restore': 0, 'model.x_dim': '105,105,1', 'model.save_dir': 'results/models/omniglot'}
train(config)
config['train.restore'] = 1
train(config) |
class EncodeText(Dataset):
def __init__(self, text: List[str], tokenizer: Tokenizer, iob: List[str]=None) -> None:
super().__init__()
self.text = text
self.iob = iob
if (iob is not None):
assert (len(text) == len(iob))
self.tokenizer = tokenizer
def __len__(self):
return len(self.text)
def __getitem__(self, index: int):
text = self.text[index]
if (self.iob is not None):
iob = self.iob[index]
tokenized_ids = self.tokenizer.encode(text, iob)
text = self.tokenizer.decode(tokenized_ids)
else:
tokenized_ids = self.tokenizer.encode(text)
return {'labels': text, 'class_ids': torch.LongTensor(tokenized_ids)} |
def wel_maker(file_name, min_weight, max_weight, vertices, min_edge, max_edge, sign, direct, self_loop, multigraph):
(edge_dic, weight_dic, edge_number) = edge_gen(vertices, min_weight, max_weight, min_edge, max_edge, sign, direct, self_loop, multigraph)
with open((file_name + '.wel'), 'w') as buf:
_write_separated_file(buf, edge_dic, weight_dic, separator=' ')
return edge_number |
def _remove_dup_items(lst):
new_lst = []
for item in lst:
if (item not in new_lst):
new_lst.append(item)
return new_lst |
def get_kernel_embedding(args, train_files, val_files, test_files):
print('\n******Running WL Kernel on train set******')
gk = GraphKernel(kernel=[{'name': 'weisfeiler_lehman', 'n_iter': args['n_iter']}, 'subtree_wl'], normalize=True, n_jobs=args['n_cores'])
graphs = Parallel(n_jobs=args['n_cores'])((delayed(process_file_grakel)(file) for file in tqdm(train_files)))
x_train = gk.fit_transform(graphs)
print('\n******Running WL Kernel on val set******')
x_val = kernel_transform(args, val_files, gk)
print('\n******Running WL Kernel on test set******')
x_test = kernel_transform(args, test_files, gk)
return (x_train, x_val, x_test) |
def extract_nth_traceback(trace: (TracebackType | None), n: int) -> (TracebackType | None):
depth = 0
while ((depth < n) and (trace is not None)):
trace = trace.tb_next
depth += 1
return trace |
class PoolingLayer(My2DLayer):
def __init__(self, in_channels, out_channels, pool_type, kernel_size=2, stride=2, use_bn=False, act_func=None, dropout_rate=0, ops_order='weight_bn_act'):
self.pool_type = pool_type
self.kernel_size = kernel_size
self.stride = stride
super(PoolingLayer, self).__init__(in_channels, out_channels, use_bn, act_func, dropout_rate, ops_order)
def weight_op(self):
if (self.stride == 1):
padding = get_same_padding(self.kernel_size)
else:
padding = 0
weight_dict = OrderedDict()
if (self.pool_type == 'avg'):
weight_dict['pool'] = nn.AvgPool2d(self.kernel_size, stride=self.stride, padding=padding, count_include_pad=False)
elif (self.pool_type == 'max'):
weight_dict['pool'] = nn.MaxPool2d(self.kernel_size, stride=self.stride, padding=padding)
else:
raise NotImplementedError
return weight_dict
def module_str(self):
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size, self.kernel_size)
else:
kernel_size = self.kernel_size
return ('%dx%d_%sPool' % (kernel_size[0], kernel_size[1], self.pool_type.upper()))
def config(self):
return {'name': PoolingLayer.__name__, 'pool_type': self.pool_type, 'kernel_size': self.kernel_size, 'stride': self.stride, **super(PoolingLayer, self).config}
def build_from_config(config):
return PoolingLayer(**config) |
def test_array_index_function_result():
p = sqlparse.parse('somefunc()[1]')[0].tokens
assert (len(p) == 1)
assert (len(list(p[0].get_array_indices())) == 1) |
def plotPoseDataset():
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 9], [9, 11], [0, 10], [10, 12]]
HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
connection = sqlite3.connect('..\\data\\db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT Rx1,Ry1'
for x in range(2, 22):
sql = ((((sql + ',Rx') + str(x)) + ',Ry') + str(x))
for x in range(1, 22):
sql = ((((sql + ',Lx') + str(x)) + ',Ly') + str(x))
for x in range(1, 14):
sql = ((((sql + ',Px') + str(x)) + ',Py') + str(x))
sql = (sql + ' FROM poseDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
feature_res = np.asarray(feature_res)
features = []
for x in feature_res:
features.append(x)
print(features[0][22])
for i in range(len(features)):
posePoints = []
for x in range(84, 110, 2):
posePoints.append((int(features[i][x]), int(features[i][(x + 1)])))
handRightPoints = []
for x in range(0, 42, 2):
handRightPoints.append((int(features[i][x]), int(features[i][(x + 1)])))
handLeftPoints = []
for x in range(0, 42, 2):
handLeftPoints.append((int(features[i][x]), int(features[i][(x + 1)])))
color = 'black'
color = color.capitalize()
background = (color + '_background.jpg')
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)):
if (color == 'White'):
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
if (color == 'White'):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handLeftPoints[partA] and handLeftPoints[partB]):
if (color == 'White'):
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
fig2 = plt.figure(figsize=(10, 10))
ax3 = fig2.add_subplot(111)
ax3.imshow(frame, interpolation='none')
plt.imshow(frame)
plt.show() |
class BatchNormLayer(L.Layer):
def __init__(self, incoming, axes='auto', epsilon=0.0001, alpha=0.1, mode='low_mem', beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1), mean=lasagne.init.Constant(0), std=lasagne.init.Constant(1), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
if (axes == 'auto'):
axes = ((0,) + tuple(range(2, len(self.input_shape))))
elif isinstance(axes, int):
axes = (axes,)
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
self.mode = mode
shape = [size for (axis, size) in enumerate(self.input_shape) if (axis not in self.axes)]
if any(((size is None) for size in shape)):
raise ValueError('BatchNormLayer needs specified input sizes for all axes not normalized over.')
if (beta is None):
self.beta = None
else:
self.beta = self.add_param(beta, shape, 'beta', trainable=True, regularizable=False)
if (gamma is None):
self.gamma = None
else:
self.gamma = self.add_param(gamma, shape, 'gamma', trainable=True, regularizable=False)
self.mean = self.add_param(mean, shape, 'mean', trainable=False, regularizable=False)
self.std = self.add_param(std, shape, 'std', trainable=False, regularizable=False)
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_std = TT.sqrt((input.var(self.axes) + self.epsilon))
use_averages = kwargs.get('batch_norm_use_averages', deterministic)
if use_averages:
mean = self.mean
std = self.std
else:
mean = input_mean
std = input_std
update_averages = kwargs.get('batch_norm_update_averages', (not deterministic))
if update_averages:
running_mean = theano.clone(self.mean, share_inputs=False)
running_std = theano.clone(self.std, share_inputs=False)
running_mean.default_update = (((1 - self.alpha) * running_mean) + (self.alpha * input_mean))
running_std.default_update = (((1 - self.alpha) * running_std) + (self.alpha * input_std))
mean += (0 * running_mean)
std += (0 * running_std)
param_axes = iter(list(range((input.ndim - len(self.axes)))))
pattern = [('x' if (input_axis in self.axes) else next(param_axes)) for input_axis in range(input.ndim)]
beta = (0 if (self.beta is None) else self.beta.dimshuffle(pattern))
gamma = (1 if (self.gamma is None) else self.gamma.dimshuffle(pattern))
mean = mean.dimshuffle(pattern)
std = std.dimshuffle(pattern)
normalized = (((input - mean) * (gamma * TT.inv(std))) + beta)
return normalized |
_utils.test(arch=supported_archs_texture)
def test_rw_texture_wrong_fmt():
tex = ti.Texture(ti.Format.rgba8, (32, 32))
def write(tex: ti.types.rw_texture(num_dimensions=2, fmt=ti.Format.r32f, lod=0)):
for (i, j) in tex:
tex.store(ti.Vector([i, j]), ti.Vector([1.0, 0.0, 0.0, 0.0]))
with pytest.raises(ti.TaichiRuntimeError, match='RWTextureType format mismatch for argument tex: expected Format.r32f, got Format.rgba8') as e:
write(tex) |
def restart():
operation_log = [('', ''), ('Try to upload your video and click the Get video info button to get started!', 'Normal')]
return ({'user_name': '', 'video_name': '', 'origin_images': None, 'painted_images': None, 'masks': None, 'inpaint_masks': None, 'logits': None, 'select_frame_number': 0, 'fps': 30}, {'inference_times': 0, 'negative_click_times': 0, 'positive_click_times': 0, 'mask_save': args.mask_save, 'multi_mask': {'mask_names': [], 'masks': []}, 'track_end_number': None}, [[], []], None, None, None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), '', gr.update(visible=True, value=operation_log), gr.update(visible=False, value=operation_log)) |
def print_fig(input, target=None, title=None, save_dir=None):
(fig, axes) = plt.subplots(1, len(input), figsize=((3 * len(input)), 3))
if title:
fig.suptitle(title, size=16)
if (len(input) == 1):
axes = [axes]
for (i, ax) in enumerate(axes):
if (len(input.shape) == 4):
ax.imshow(input[i].permute(1, 2, 0).numpy())
else:
ax.imshow(input[i].numpy(), cmap='gray', vmin=0.0, vmax=1.0)
if (target is not None):
output = net(((input[i].unsqueeze(0) - mean) / std))
loss = criterion(output, target[i:(i + 1)])
ax.set_title('loss: {:.3f}\n pred: {}\n true : {}'.format(loss, CIFAR100_LABELS_LIST[output.max(1)[1][0]], CIFAR100_LABELS_LIST[target[i]]))
ax.axis('off')
plt.subplots_adjust(wspace=0.1)
if (save_dir is not None):
plt.savefig(save_dir, bbox_inches='tight', pad_inches=0)
plt.show() |
class PolEmoOUTTask(BaseTask):
def __init__(self):
self._spec = TaskSpecification('POLEMO', 'classification', 4, 1)
self._spec.output_dir = 'POLEMO-OUT'
def read(self, data_path: str, split: str) -> Iterable[DataExample]:
split = (split if (split == 'train') else f'out-{split}')
path = self.get_split_path(data_path, split)
normalizer = TextNormalizer()
with open(path, 'r', encoding='utf-8') as input_file:
for line in input_file:
words = line.split()
label = words[(- 1)]
text = ' '.join(words[0:(- 1)])
text = text.replace(' em ', 'em ').replace(' smy ', 'smy ').replace(' m ', 'm ')
text = normalizer.process(text)
(yield DataExample(text, label)) |
('decomposable_attention')
class DecomposableAttention(Model):
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, attend_feedforward: FeedForward, similarity_function: SimilarityFunction, compare_feedforward: FeedForward, aggregate_feedforward: FeedForward, premise_encoder: Optional[Seq2SeqEncoder]=None, hypothesis_encoder: Optional[Seq2SeqEncoder]=None, initializer: InitializerApplicator=InitializerApplicator(), regularizer: Optional[RegularizerApplicator]=None) -> None:
super(DecomposableAttention, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._attend_feedforward = TimeDistributed(attend_feedforward)
self._matrix_attention = MatrixAttention(similarity_function)
self._compare_feedforward = TimeDistributed(compare_feedforward)
self._aggregate_feedforward = aggregate_feedforward
self._premise_encoder = premise_encoder
self._hypothesis_encoder = (hypothesis_encoder or premise_encoder)
self._num_labels = vocab.get_vocab_size(namespace='labels')
if (text_field_embedder.get_output_dim() != attend_feedforward.get_input_dim()):
raise ConfigurationError('Output dimension of the text_field_embedder (dim: {}), must match the input_dim of the FeedForward layer attend_feedforward, (dim: {}). '.format(text_field_embedder.get_output_dim(), attend_feedforward.get_input_dim()))
if (aggregate_feedforward.get_output_dim() != self._num_labels):
raise ConfigurationError(('Final output dimension (%d) must equal num labels (%d)' % (aggregate_feedforward.get_output_dim(), self._num_labels)))
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, premise: Dict[(str, torch.LongTensor)], hypothesis: Dict[(str, torch.LongTensor)], label: torch.IntTensor=None) -> Dict[(str, torch.Tensor)]:
embedded_premise = self._text_field_embedder(premise)
embedded_hypothesis = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise).float()
hypothesis_mask = get_text_field_mask(hypothesis).float()
if self._premise_encoder:
embedded_premise = self._premise_encoder(embedded_premise, premise_mask)
if self._hypothesis_encoder:
embedded_hypothesis = self._hypothesis_encoder(embedded_hypothesis, hypothesis_mask)
projected_premise = self._attend_feedforward(embedded_premise)
projected_hypothesis = self._attend_feedforward(embedded_hypothesis)
similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis)
p2h_attention = last_dim_softmax(similarity_matrix, hypothesis_mask)
attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention)
h2p_attention = last_dim_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
attended_premise = weighted_sum(embedded_premise, h2p_attention)
premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=(- 1))
hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=(- 1))
compared_premise = self._compare_feedforward(premise_compare_input)
compared_premise = (compared_premise * premise_mask.unsqueeze((- 1)))
compared_premise = compared_premise.sum(dim=1)
compared_hypothesis = self._compare_feedforward(hypothesis_compare_input)
compared_hypothesis = (compared_hypothesis * hypothesis_mask.unsqueeze((- 1)))
compared_hypothesis = compared_hypothesis.sum(dim=1)
aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=(- 1))
label_logits = self._aggregate_feedforward(aggregate_input)
label_probs = torch.nn.functional.softmax(label_logits, dim=(- 1))
output_dict = {'label_logits': label_logits, 'label_probs': label_probs}
if (label is not None):
loss = self._loss(label_logits, label.long().view((- 1)))
self._accuracy(label_logits, label.squeeze((- 1)))
output_dict['loss'] = loss
return output_dict
def get_metrics(self, reset: bool=False) -> Dict[(str, float)]:
return {'accuracy': self._accuracy.get_metric(reset)}
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
embedder_params = params.pop('text_field_embedder')
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
premise_encoder_params = params.pop('premise_encoder', None)
if (premise_encoder_params is not None):
premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
else:
premise_encoder = None
hypothesis_encoder_params = params.pop('hypothesis_encoder', None)
if (hypothesis_encoder_params is not None):
hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
else:
hypothesis_encoder = None
attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
similarity_function = SimilarityFunction.from_params(params.pop('similarity_function'))
compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer) |
class NLIDataReader(object):
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
s1 = gzip.open(os.path.join(self.dataset_folder, ('s1.' + filename)), mode='rt', encoding='utf-8').readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, ('s2.' + filename)), mode='rt', encoding='utf-8').readlines()
labels = gzip.open(os.path.join(self.dataset_folder, ('labels.' + filename)), mode='rt', encoding='utf-8').readlines()
examples = []
id = 0
for (sentence_a, sentence_b, label) in zip(s1, s2, labels):
guid = ('%s-%d' % (filename, id))
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if (0 < max_examples <= len(examples)):
break
return examples
def get_labels():
return {'contradiction': 0, 'entailment': 1, 'neutral': 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()] |
()
('db_file', type=click.Path())
('entity_db_file', type=click.Path())
('out_file', type=click.Path())
('--min-word-count', default=5)
('--min-entity-count', default=3)
def build_vocab(db_file, entity_db_file, out_file, **kwargs):
db = AbstractDB(db_file, 'r')
entity_db = EntityDB.load(entity_db_file)
vocab = Vocab.build(db, entity_db, **kwargs)
vocab.save(out_file) |
def assert_arctan2_ispzero(x, y):
assert_(((ncu.arctan2(x, y) == 0) and (not np.signbit(ncu.arctan2(x, y)))), ('arctan(%s, %s) is %s, not +0' % (x, y, ncu.arctan2(x, y)))) |
class SymforceLinterTest(TestCase):
_on_sympy
((sys.version_info[:3] >= (3, 10, 7)), '\n Mypy fails on Python 3.10.7 because of this bug, which is fixed in mypy 0.981:\n ')
def test_linter(self) -> None:
try:
python_util.execute_subprocess(['make', 'lint'], cwd=SYMFORCE_DIR, env=dict(os.environ, PYTHON=sys.executable))
except subprocess.CalledProcessError as exc:
logger.error(exc)
self.fail('Linter Failed.') |
def create_lmdb_for_vimeo90k():
with open(yml_path, 'r') as fp:
fp = yaml.load(fp, Loader=yaml.FullLoader)
root_dir = fp['dataset']['root']
gt_folder = fp['dataset']['train']['gt_folder']
lq_folder = fp['dataset']['train']['lq_folder']
gt_path = fp['dataset']['train']['gt_path']
lq_path = fp['dataset']['train']['lq_path']
meta_path = fp['dataset']['train']['meta_path']
gt_dir = op.join(root_dir, gt_folder)
lq_dir = op.join(root_dir, lq_folder)
lmdb_gt_path = op.join(root_dir, gt_path)
lmdb_lq_path = op.join(root_dir, lq_path)
meta_path = op.join(root_dir, meta_path)
print('Scaning meta list...')
gt_video_list = []
lq_video_list = []
meta_fp = open(meta_path, 'r')
while True:
new_line = meta_fp.readline().split('\n')[0]
if (new_line == ''):
break
vid_name = ((new_line.split('/')[0] + '_') + new_line.split('/')[1])
qt_path = op.join(gt_dir, (vid_name + '.yuv'))
gt_video_list.append(qt_path)
lq_path = op.join(lq_dir, (vid_name + '.yuv'))
lq_video_list.append(lq_path)
msg = f'> {len(gt_video_list)} videos found.'
print(msg)
print('Scaning GT frames (only center frames of each sequence)...')
frm_list = []
for gt_video_path in gt_video_list:
nfs = 7
num_seq = (nfs // ((2 * radius) + 1))
frm_list.append([(radius + (iter_seq * ((2 * radius) + 1))) for iter_seq in range(num_seq)])
num_frm_total = sum([len(frms) for frms in frm_list])
msg = f'> {num_frm_total} frames found.'
print(msg)
key_list = []
video_path_list = []
index_frame_list = []
for iter_vid in range(len(gt_video_list)):
frms = frm_list[iter_vid]
for iter_frm in range(len(frms)):
key_list.append('{:03d}/{:03d}/im4.png'.format((iter_vid + 1), (iter_frm + 1)))
video_path_list.append(gt_video_list[iter_vid])
index_frame_list.append(frms[iter_frm])
print('Writing LMDB for GT data...')
make_y_lmdb_from_yuv(video_path_list=video_path_list, yuv_type='444p', h=256, w=448, index_frame_list=index_frame_list, key_list=key_list, lmdb_path=lmdb_gt_path, multiprocessing_read=True)
print('> Finish.')
print('Scaning LQ frames...')
len_input = ((2 * radius) + 1)
frm_list = []
for lq_video_path in lq_video_list:
nfs = 7
num_seq = (nfs // len_input)
frm_list.append([list(range((iter_seq * len_input), ((iter_seq + 1) * len_input))) for iter_seq in range(num_seq)])
num_frm_total = sum([(len(frms) * len_input) for frms in frm_list])
msg = f'> {num_frm_total} frames found.'
print(msg)
key_list = []
video_path_list = []
index_frame_list = []
for iter_vid in range(len(lq_video_list)):
frm_seq = frm_list[iter_vid]
for iter_seq in range(len(frm_seq)):
key_list.extend(['{:03d}/{:03d}/im{:d}.png'.format((iter_vid + 1), (iter_seq + 1), i) for i in range(1, (len_input + 1))])
video_path_list.extend(([lq_video_list[iter_vid]] * len_input))
index_frame_list.extend(frm_seq[iter_seq])
print('Writing LMDB for LQ data...')
make_y_lmdb_from_yuv(video_path_list=video_path_list, yuv_type='444p', h=256, w=448, index_frame_list=index_frame_list, key_list=key_list, lmdb_path=lmdb_lq_path, multiprocessing_read=True)
print('> Finish.')
if (not op.exists('data/vimeo90k')):
if (not op.exists('data/')):
os.system('mkdir data/')
os.system(f'ln -s {root_dir} ./data/vimeo90k')
print('Sym-linking done.')
else:
print('data/vimeo90k already exists.') |
def get_sequence_list_and_phyche_value(input_data, k, phyche_index, extra_phyche_index, all_property):
if (phyche_index is None):
phyche_index = []
if (extra_phyche_index is None):
extra_phyche_index = {}
diphyche_list = ['Base stacking', 'Protein induced deformability', 'B-DNA twist', 'Dinucleotide GC Content', 'A-philicity', 'Propeller twist', 'Duplex stability:(freeenergy)', 'Duplex tability(disruptenergy)', 'DNA denaturation', 'Bending stiffness', 'Protein DNA twist', 'Stabilising energy of Z-DNA', 'Aida_BA_transition', 'Breslauer_dG', 'Breslauer_dH', 'Breslauer_dS', 'Electron_interaction', 'Hartman_trans_free_energy', 'Helix-Coil_transition', 'Ivanov_BA_transition', 'Lisser_BZ_transition', 'Polar_interaction', 'SantaLucia_dG', 'SantaLucia_dH', 'SantaLucia_dS', 'Sarai_flexibility', 'Stability', 'Stacking_energy', 'Sugimoto_dG', 'Sugimoto_dH', 'Sugimoto_dS', 'Watson-Crick_interaction', 'Twist', 'Tilt', 'Roll', 'Shift', 'Slide', 'Rise']
triphyche_list = ['Dnase I', 'Bendability (DNAse)', 'Bendability (consensus)', 'Trinucleotide GC Content', 'Nucleosome positioning', 'Consensus_roll', 'Consensus-Rigid', 'Dnase I-Rigid', 'MW-Daltons', 'MW-kg', 'Nucleosome', 'Nucleosome-Rigid']
phyche_list = []
if (k == 2):
phyche_list = diphyche_list
elif (k == 3):
phyche_list = triphyche_list
try:
if (all_property is True):
phyche_index = phyche_list
else:
for e in phyche_index:
if (e not in phyche_list):
error_info = (('Sorry, the physicochemical properties ' + e) + ' is not exit.')
raise NameError(error_info)
except NameError:
raise
phyche_value = extend_phyche_index(get_phyche_index(k, phyche_index), extra_phyche_index)
sequence_list = get_data(input_data)
return (sequence_list, phyche_value) |
class RLAlgorithm(Algorithm):
def __init__(self, sampler, n_epochs=1000, n_train_repeat=1, n_initial_exploration_steps=10000, epoch_length=1000, eval_n_episodes=10, eval_deterministic=True, eval_render=False, control_interval=1):
self.sampler = sampler
self._n_epochs = int(n_epochs)
self._n_train_repeat = n_train_repeat
self._epoch_length = epoch_length
self._n_initial_exploration_steps = n_initial_exploration_steps
self._control_interval = control_interval
self._eval_n_episodes = eval_n_episodes
self._eval_deterministic = eval_deterministic
self._eval_render = eval_render
self._sess = tf_utils.get_default_session()
self._env = None
self._policy = None
self._pool = None
def _train(self, env, eval_env, policy, initial_exploration_policy, pool):
self._init_training(env, eval_env, policy, pool)
if (initial_exploration_policy is None):
self.sampler.initialize(env, policy, pool)
initial_exploration_done = True
else:
self.sampler.initialize(env, initial_exploration_policy, pool)
initial_exploration_done = False
with self._sess.as_default():
gt.rename_root('RLAlgorithm')
gt.reset()
gt.set_def_unique(False)
for epoch in gt.timed_for(range((self._n_epochs + 1)), save_itrs=True):
logger.push_prefix(('Epoch #%d | ' % epoch))
for t in range(self._epoch_length):
if (not initial_exploration_done):
if ((self._epoch_length * epoch) >= self._n_initial_exploration_steps):
self.sampler.set_policy(policy)
initial_exploration_done = True
self.sampler.sample()
if (not self.sampler.batch_ready()):
continue
gt.stamp('sample')
for i in range(self._n_train_repeat):
self._do_training(iteration=(t + (epoch * self._epoch_length)), batch=self.sampler.random_batch())
gt.stamp('train')
curr_len = (int((epoch * self._epoch_length)) % self.sampler.pool._max_buffer_size)
self._policy.log_diagnostics_curr(self.sampler.pool._observations[curr_len:(curr_len + self._epoch_length)])
self._evaluate(epoch)
params = self.get_snapshot(epoch)
logger.save_itr_params(epoch, params)
times_itrs = gt.get_times().stamps.itrs
eval_time = (times_itrs['eval'][(- 1)] if (epoch > 1) else 0)
total_time = gt.get_times().total
self.sampler.log_diagnostics()
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
gt.stamp('eval')
self.sampler.terminate()
def _evaluate(self, epoch):
if (self._eval_n_episodes < 1):
return
with self._policy.deterministic(self._eval_deterministic):
paths = rollouts(self._eval_env, self._policy, self.sampler._max_path_length, self._eval_n_episodes)
total_returns = [path['rewards'].sum() for path in paths]
episode_lengths = [len(p['rewards']) for p in paths]
logger.record_tabular('return-average', np.mean(total_returns))
logger.record_tabular('return-min', np.min(total_returns))
logger.record_tabular('return-max', np.max(total_returns))
logger.record_tabular('return-std', np.std(total_returns))
logger.record_tabular('episode-length-avg', np.mean(episode_lengths))
logger.record_tabular('episode-length-min', np.min(episode_lengths))
logger.record_tabular('episode-length-max', np.max(episode_lengths))
logger.record_tabular('episode-length-std', np.std(episode_lengths))
if (self.sampler._domain == '2Dmaze-cont'):
coverage = len(self.sampler._visited_blocks)
logger.record_tabular('coverage', coverage)
logger.record_tabular('max-norm', self.sampler._max_norm)
self._eval_env.log_diagnostics(paths)
if self._eval_render:
self._eval_env.render(paths)
iteration = (epoch * self._epoch_length)
batch = self.sampler.random_batch()
self.log_diagnostics(iteration, batch)
def log_diagnostics(self, iteration, batch):
raise NotImplementedError
def get_snapshot(self, epoch):
raise NotImplementedError
def _do_training(self, iteration, batch):
raise NotImplementedError
def _init_training(self, env, eval_env, policy, pool):
self._env = env
if (eval_env is None):
if (self._eval_n_episodes > 0):
import tensorflow as tf
with tf.variable_scope('low_level_policy', reuse=True):
self._eval_env = deep_clone(env)
self._policy = policy
self._pool = pool
def policy(self):
return self._policy
def env(self):
return self._env
def pool(self):
return self._pool |
class BoundaryEntDiscriminator(nn.Module):
def __init__(self):
super(BoundaryEntDiscriminator, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(3, filter_num_list[0], kernel_size=4, stride=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.conv5(x)
return x |
def test_load_spatialevents():
dataset = tau2019sse.Dataset(TEST_DATA_HOME)
clip = dataset.clip('foa_dev/split1_ir0_ov1_1')
csv_path = clip.csv_path
events_data = tau2019sse.load_spatialevents(csv_path)
assert (events_data.labels[0] == 'cough')
assert (events_data.labels[(- 1)] == 'phone')
assert (events_data.intervals[0] == [0., 1.]).all()
assert (events_data.intervals[(- 1)] == [5., 7.]).all()
assert (events_data.elevations[0] == (- 10))
assert (events_data.azimuths[0] == (- 10))
assert (events_data.distances[0] == 2) |
.skipif((not has_pytorch()), reason='Pytorch not installed.')
.parametrize('size', [[1, 2, 3, 4]])
_utils.test(arch=[ti.cpu, ti.cuda, ti.opengl])
def test_get_external_tensor_shape_access_ndarray(size):
def func(x: ti.types.ndarray(), index: ti.template()) -> ti.i32:
return x.shape[index]
x_hat = ti.ndarray(ti.i32, shape=size)
for (idx, y_ref) in enumerate(size):
y_hat = func(x_hat, idx)
assert (y_ref == y_hat), 'Size of axis {} should equal {} and not {}.'.format(idx, y_ref, y_hat) |
class Discriminator(object):
def __init__(self, x_dim=16):
self.x_dim = x_dim
self.name = 'pendigit/mlp/d_net'
def __call__(self, x, keep=1.0, reuse=True):
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
fc1 = tc.layers.fully_connected(x, 256, weights_initializer=tf.random_normal_initializer(stddev=0.02), activation_fn=tf.identity)
fc1 = leaky_relu(tc.layers.batch_norm(fc1))
fc2 = tc.layers.fully_connected(fc1, 256, weights_initializer=tf.random_normal_initializer(stddev=0.02), activation_fn=tf.identity)
fc2 = leaky_relu(tc.layers.batch_norm(fc2))
fc3 = tc.layers.fully_connected(fc2, 1, weights_initializer=tf.random_normal_initializer(stddev=0.02), activation_fn=tf.identity)
return fc3
def vars(self):
return [var for var in tf.global_variables() if (self.name in var.name)] |
def _finalize_parameters_specs(user_parameters, _paramsets_requirements):
_paramsets_user_configs = {}
for parameter in user_parameters:
if (parameter['name'] in _paramsets_user_configs):
raise exceptions.InvalidModel(f"Multiple parameter configurations for {parameter['name']} were found.")
_paramsets_user_configs[parameter.get('name')] = parameter
_reqs = reduce_paramsets_requirements(_paramsets_requirements, _paramsets_user_configs)
return _reqs |
class FacebookManagerGetPost(VirtualFunctionTool):
name = 'FacebookManagerGetPost'
summary = 'Get the details of a post by its post_id.'
parameters: List[ArgParameter] = [{'name': 'post_id', 'type': 'string', 'description': 'The unique identifier of the post.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'post', 'type': 'object', 'description': 'The post details containing fields such as post_id, content, timestamp, and privacy_setting.'}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'post_id' parameter is not found."}] |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version='fsner-{version}'.format(version=__version__))
sub_parsers = parser.add_subparsers()
trainer_parser = sub_parsers.add_parser('trainer')
trainer_parser = init_trainer_parser(trainer_parser)
trainer_parser.set_defaults(func=trainer_main)
args = parser.parse_args()
if ('trainer' in args):
print('Parameters:')
print(('=' * 50))
for (k, v) in vars(args).items():
v = str(v)
if (str(k) == 'func'):
continue
print(f'{k:<30}{v:>20}')
print(('=' * 50))
try:
args.func(args)
except AttributeError:
parser.print_help()
parser.exit() |
_spec_function('msmarco')
def get_msmarco_spec(track: str, valid_topk: Optional[int]=None) -> RunSpec:
valid_topk = (None if (valid_topk is None) else int(valid_topk))
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.msmarco_scenario.MSMARCOScenario', args={'track': track, 'valid_topk': valid_topk})
adapter_spec: AdapterSpec = get_ranking_binary_adapter_spec(max_train_instances=4, stop_sequences=['\n'])
return RunSpec(name=f'msmarco:track={track},valid_topk={valid_topk}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=get_msmarco_metric_specs(track=track, rank=valid_topk), groups=[f'msmarco_{track}']) |
def detokenizer(string):
string = string.replace('`` ', '"')
string = string.replace(" ''", '"')
string = string.replace('` ', '"')
string = string.replace(" ' ", '" ')
string = string.replace("s '", "s'")
string = re.sub("/' [0-9]/", "/'[0-9]/", string)
string = string.replace(' - ', '-')
string = string.replace(' , ', ',')
string = string.replace(' ', '.')
string = string.replace(' :', ':')
string = string.replace(' ;', ';')
string = string.replace(' .', '.')
string = string.replace(' !', '!')
string = string.replace(' ?', '?')
string = string.replace(' ,', ',')
string = re.sub('\\(\\s*([^\\)]*?)\\s*\\)', '(\\1)', string)
string = re.sub('\\[\\s*([^\\]]*?)\\s*\\]', '[\\1]', string)
string = re.sub('{\\s*([^}]*?)\\s*}', '{\\1}', string)
string = string.replace('= = = =', '====')
string = string.replace('= = =', '===')
string = string.replace('= =', '==')
string = string.replace(((' ' + chr(176)) + ' '), chr(176))
string = string.replace(' \n', '\n')
string = string.replace('\n ', '\n')
string = string.replace(' N ', ' 1 ')
string = string.replace(" 's", "'s")
string = string.replace(" n't ", "n't ")
string = string.replace(" 'd ", "'d ")
string = string.replace(" 'm ", "'m ")
string = string.replace(" 're ", "'re ")
string = string.replace(" 've ", "'ve ")
return string |
def gauss_on_linear(I):
I = (Polynomial(p) for p in I)
linear = []
non_linear = []
for p in I:
if p.is_zero():
continue
if (p.deg() <= 1):
linear.append(p)
else:
non_linear.append(p)
if (not linear):
return non_linear
linear = list(gauss_on_polys(linear))
return (linear + non_linear) |
def setup_cfg(args):
cfg = get_cfg()
cfg.DATALOADER.NUM_WORKERS = 0
cfg = add_export_config(cfg)
add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg |
class Runner(object):
def __init__(self, *, env, model, nsteps, gamma, lam):
self.env = env
self.model = model
nenv = env.num_envs
self.obs = np.zeros(((nenv,) + env.observation_space.shape), dtype=model.train_model.X.dtype.name)
self.obs[:] = env.reset()
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
def run(self):
(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs) = ([], [], [], [], [], [])
mb_states = self.states
epinfos = []
num_episodes = 0
for _ in range(self.nsteps):
(actions, values, self.states, neglogpacs) = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
(self.obs[:], rewards, self.dones, infos) = self.env.step(actions)
for (i, done) in enumerate(self.dones):
if done:
num_episodes += 1
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if (t == (self.nsteps - 1)):
nextnonterminal = (1.0 - self.dones)
nextvalues = last_values
else:
nextnonterminal = (1.0 - mb_dones[(t + 1)])
nextvalues = mb_values[(t + 1)]
delta = ((mb_rewards[t] + ((self.gamma * nextvalues) * nextnonterminal)) - mb_values[t])
mb_advs[t] = lastgaelam = (delta + (((self.gamma * self.lam) * nextnonterminal) * lastgaelam))
mb_returns = (mb_advs + mb_values)
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), mb_states, epinfos, num_episodes) |
class CTRLConfig(PretrainedConfig):
pretrained_config_archive_map = CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self, vocab_size=246534, n_positions=256, n_ctx=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-06, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs):
super(CTRLConfig, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.dff = dff
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
def max_position_embeddings(self):
return self.n_positions
def hidden_size(self):
return self.n_embd
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
.skipif((not _ti_core.GGUI_AVAILABLE), reason='GGUI Not Available')
_utils.test(arch=supported_archs)
def test_draw_part_of_mesh_instances():
N = 10
NV = ((N + 1) ** 2)
NT = (2 * (N ** 2))
NE = (((2 * N) * (N + 1)) + (N ** 2))
pos = ti.Vector.field(3, ti.f32, shape=NV)
tri = ti.field(ti.i32, shape=(3 * NT))
edge = ti.Vector.field(2, ti.i32, shape=NE)
NInstanceRows = 10
NInstanceCols = 10
NInstance = (NInstanceRows * NInstanceCols)
instances_transforms = ti.Matrix.field(4, 4, ti.f32, shape=(NInstance,))
def init_transforms_of_instances():
identity = ti.Matrix.identity(ti.f32, 4)
for i in range(NInstanceRows):
for j in range(NInstanceCols):
index = ((i * NInstanceCols) + j)
instances_transforms[index] = identity
translate_matrix = ti.math.translate((1.2 * j), 0, ((- 1.2) * i))
instances_transforms[index] = (translate_matrix instances_transforms[index])
def init_pos():
for (i, j) in ti.ndrange((N + 1), (N + 1)):
idx = ((i * (N + 1)) + j)
pos[idx] = ti.Vector([(i / N), (1.0 - (j / N)), 0.5])
def init_tri():
for (i, j) in ti.ndrange(N, N):
tri_idx = (6 * ((i * N) + j))
pos_idx = ((i * (N + 1)) + j)
if (((i + j) % 2) == 0):
tri[(tri_idx + 0)] = pos_idx
tri[(tri_idx + 1)] = ((pos_idx + N) + 2)
tri[(tri_idx + 2)] = (pos_idx + 1)
tri[(tri_idx + 3)] = pos_idx
tri[(tri_idx + 4)] = ((pos_idx + N) + 1)
tri[(tri_idx + 5)] = ((pos_idx + N) + 2)
else:
tri[(tri_idx + 0)] = pos_idx
tri[(tri_idx + 1)] = ((pos_idx + N) + 1)
tri[(tri_idx + 2)] = (pos_idx + 1)
tri[(tri_idx + 3)] = (pos_idx + 1)
tri[(tri_idx + 4)] = ((pos_idx + N) + 1)
tri[(tri_idx + 5)] = ((pos_idx + N) + 2)
def init_edge():
for (i, j) in ti.ndrange((N + 1), N):
edge_idx = ((i * N) + j)
pos_idx = ((i * (N + 1)) + j)
edge[edge_idx] = ti.Vector([pos_idx, (pos_idx + 1)])
start = (N * (N + 1))
for (i, j) in ti.ndrange(N, (N + 1)):
edge_idx = ((start + (j * N)) + i)
pos_idx = ((i * (N + 1)) + j)
edge[edge_idx] = ti.Vector([pos_idx, ((pos_idx + N) + 1)])
start = ((2 * N) * (N + 1))
for (i, j) in ti.ndrange(N, N):
edge_idx = ((start + (i * N)) + j)
pos_idx = ((i * (N + 1)) + j)
if (((i + j) % 2) == 0):
edge[edge_idx] = ti.Vector([pos_idx, ((pos_idx + N) + 2)])
else:
edge[edge_idx] = ti.Vector([(pos_idx + 1), ((pos_idx + N) + 1)])
init_transforms_of_instances()
init_pos()
init_tri()
init_edge()
window = ti.ui.Window('test', (1024, 1024), vsync=True, show_window=False)
canvas = window.get_canvas()
scene = window.get_scene()
camera = ti.ui.Camera()
camera.position((- 1.), 2., 2.)
camera.lookat((- 1.), 2., 1.)
camera.fov(90)
def render():
scene.set_camera(camera)
scene.point_light(pos=(0.5, 1, 2), color=(1, 1, 1))
scene.mesh_instance(pos, tri, color=((39 / 255), (123 / 255), (192 / 255)), two_sided=True, transforms=instances_transforms, instance_count=10, instance_offset=2)
canvas.scene(scene)
for _ in range(RENDER_REPEAT):
render()
window.get_image_buffer_as_numpy()
render()
verify_image(window.get_image_buffer_as_numpy(), 'test_draw_part_of_mesh_instances')
window.destroy() |
def _have_importers():
has_py_importer = False
has_pyx_importer = False
for importer in sys.meta_path:
if isinstance(importer, PyxImporter):
if isinstance(importer, PyImporter):
has_py_importer = True
else:
has_pyx_importer = True
return (has_py_importer, has_pyx_importer) |
def main(args):
parser = get_config()
all_args = parse_args(args, parser)
if ((all_args.algorithm_name == 'rmappo') or (all_args.algorithm_name == 'rmappg')):
assert (all_args.use_recurrent_policy or all_args.use_naive_recurrent_policy), 'check recurrent policy!'
elif ((all_args.algorithm_name == 'mappo') or (all_args.algorithm_name == 'mappg')):
assert ((all_args.use_recurrent_policy == False) and (all_args.use_naive_recurrent_policy == False)), 'check recurrent policy!'
else:
raise NotImplementedError
if all_args.use_hsp:
def parse_value(s):
if s.startswith('r'):
if ('[' in s):
s = s[2:(- 1)]
(l, r, n) = s.split(':')
(l, r, n) = (float(l), float(r), int(n))
return np.random.choice(np.linspace(l, r, n))
else:
v = float(s[1:])
return np.random.uniform((- v), v)
return s
w0 = []
for s in all_args.w0.split(','):
w0.append(parse_value(s))
all_args.w0 = ''
for s in w0:
all_args.w0 += (str(s) + ',')
all_args.w0 = all_args.w0[:(- 1)]
w1 = []
for s in all_args.w1.split(','):
w1.append(parse_value(s))
all_args.w1 = ''
for s in w1:
all_args.w1 += (str(s) + ',')
all_args.w1 = all_args.w1[:(- 1)]
if (all_args.cuda and torch.cuda.is_available()):
print('choose to use gpu...')
device = torch.device('cuda:0')
torch.set_num_threads(all_args.n_training_threads)
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print('choose to use cpu...')
device = torch.device('cpu')
torch.set_num_threads(all_args.n_training_threads)
run_dir = ((((Path((os.path.split(os.path.dirname(os.path.abspath(__file__)))[0] + '/results')) / all_args.env_name) / all_args.layout_name) / all_args.algorithm_name) / all_args.experiment_name)
if (not run_dir.exists()):
os.makedirs(str(run_dir))
if all_args.use_wandb:
run = wandb.init(config=all_args, project=all_args.env_name, entity=all_args.wandb_name, notes=socket.gethostname(), name=((((str(all_args.algorithm_name) + '_') + str(all_args.experiment_name)) + '_seed') + str(all_args.seed)), group=all_args.layout_name, dir=str(run_dir), job_type='training', reinit=True, tags=all_args.wandb_tags)
else:
if (not run_dir.exists()):
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')]
if (len(exst_run_nums) == 0):
curr_run = 'run1'
else:
curr_run = ('run%i' % (max(exst_run_nums) + 1))
run_dir = (run_dir / curr_run)
if (not run_dir.exists()):
os.makedirs(str(run_dir))
setproctitle.setproctitle(((((((str(all_args.algorithm_name) + '-') + str(all_args.env_name)) + '-') + str(all_args.experiment_name)) + '') + str(all_args.user_name)))
torch.manual_seed(all_args.seed)
torch.cuda.manual_seed_all(all_args.seed)
np.random.seed(all_args.seed)
envs = make_train_env(all_args, run_dir)
eval_envs = (make_eval_env(all_args, run_dir) if all_args.use_eval else None)
num_agents = all_args.num_agents
config = {'all_args': all_args, 'envs': envs, 'eval_envs': eval_envs, 'num_agents': num_agents, 'device': device, 'run_dir': run_dir}
if all_args.share_policy:
from hsp.runner.shared.overcooked_runner import OvercookedRunner as Runner
else:
from hsp.runner.separated.overcooked_runner import OvercookedRunner as Runner
runner = Runner(config)
runner.run()
envs.close()
if (all_args.use_eval and (eval_envs is not envs)):
eval_envs.close()
if all_args.use_wandb:
run.finish()
else:
runner.writter.export_scalars_to_json(str((runner.log_dir + '/summary.json')))
runner.writter.close() |
def tanh_quantize(input, bits):
assert (bits >= 1), bits
if (bits == 1):
return torch.sign(input)
input = torch.tanh(input)
input_rescale = ((input + 1.0) / 2)
n = (math.pow(2.0, bits) - 1)
v = (torch.floor(((input_rescale * n) + 0.5)) / n)
v = ((2 * v) - 1)
v = (0.5 * torch.log(((1 + v) / (1 - v))))
return v |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.