code stringlengths 101 5.91M |
|---|
class Resnet18_3D(nn.Module):
def __init__(self, embedding_dimension=512):
super(Resnet18_3D, self).__init__()
self.model = resnet18()
self.input_features_fc_layer = self.model.fc.in_features
self.model.fc = common_functions.Identity()
def forward(self, images):
embedding = self.model(images)
return embedding |
def phi_on_multiplicative_basis(compo):
f = F_algebra(QQ).gen
if (tuple(compo) == (2,)):
return f(2)
if (len(compo) == 1):
(n,) = compo
return f(n)
return compute_u_on_compo(compo) |
def preprocess_args(fun, varnames):
def wrapper(f, *a, **kw):
if hasattr(f, 'func_code'):
func_code = f.func_code
else:
func_code = f.__code__
names = func_code.co_varnames
new_a = [(fun(arg) if (name in varnames) else arg) for (arg, name) in zip(a, names)]
new_kw = {k: (fun(v) if (k in varnames) else v) for (k, v) in kw.items()}
return f(*new_a, **new_kw)
return decorator.decorator(wrapper) |
def main(argv=None):
if (argv is None):
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] collection')
parser.add_option('--overwrite', default=0, type='int', help='overwrite existing file (default: 0)')
parser.add_option('--rootpath', default=ROOT_PATH, type='string', help=('rootpath (default: %s)' % ROOT_PATH))
parser.add_option('--caption_name', default='train_collection.caption.txt', type='string', help='caption_name')
parser.add_option('--language', default=DEFAULT_LANG, type='string', help=('language (default: %s)' % DEFAULT_LANG))
parser.add_option('--encoding', default='bow', type='choice', choices=TEXT_ENCODINGS, help=('text encoding strategy. Valid choices are %s. (default: %s)' % (TEXT_ENCODINGS, DEFAULT_TEXT_ENCODING)))
parser.add_option('--threshold', default=5, type='int', help=('minimum word occurrence (default: %d)' % MIN_WORD_COUNT))
parser.add_option('--folder_name', default='vocab', type='string', help='The output folder name (default: vocab)')
(options, args) = parser.parse_args(argv)
if (len(args) < 1):
parser.print_help()
return 1
assert (options.language in ['en', 'zh']), ('language %s not supported' % options.language)
return process(options, args[0]) |
class Clip(core.Clip):
def __init__(self, clip_id, data_home, dataset_name, index, metadata):
super().__init__(clip_id, data_home, dataset_name=dataset_name, index=index, metadata=metadata)
self.audio_path = self.get_path('audio')
self.annotation_path = self.get_path('annotation')
_property
def annotation(self):
return load_annotation(self.annotation_path)
def audio(self):
return load_audio(self.audio_path)
def to_jams(self):
return jams_utils.jams_converter(audio_path=self.audio_path, annotation_data=[(self.annotation, None)], metadata=self._metadata) |
def mk_lean_code_file(file_names: LeanFileNames, lean_info: LeanProgramInfo, assembly_info: LeanAssemblyInfo):
out = open(file_names.code_filename, 'w')
lean_code = [code_line for code_elt in assembly_info.lean_code for code_line in code_elt.code]
print('/-', file=out)
print('File: {}.lean'.format(file_names.code_base_filename), file=out)
print(file=out)
print('Autogenerated file.', file=out)
print('-/', file=out)
print(('import ' + mk_lean_core_import_path('hoare')), file=out)
print(file=out)
print('variables {F : Type} [field F] [decidable_eq F]', file=out)
print(file=out)
for func in lean_info.all_funcs:
print('/- function {} code definition -/'.format(func.name), file=out)
print(file=out)
mk_lean_function_code_def(func.name, lean_info.main_scope, lean_code[func.start_pc:func.end_pc], out)
print(file=out)
out.close() |
class SkewTableaux(UniqueRepresentation, Parent):
def __init__(self, category=None):
if (category is None):
Parent.__init__(self, category=Sets())
else:
Parent.__init__(self, category=category)
def _repr_(self):
return 'Skew tableaux'
def _element_constructor_(self, st):
return self.element_class(self, st)
Element = SkewTableau
options = Tableaux.options
def __contains__(self, x):
if isinstance(x, SkewTableau):
return True
try:
self.element_class(self, x)
except Exception:
return False
return True
def from_expr(self, expr):
skp = []
outer = expr[1]
inner = (expr[0] + ([0] * (len(outer) - len(expr[0]))))
for i in range(len(outer)):
skp.append((([None] * inner[i]) + outer[(- (i + 1))]))
return self.element_class(self, skp)
def from_chain(self, chain):
shape = chain[(- 1)]
T = [([None] * r) for r in shape]
for i in range(1, len(chain)):
la = chain[i]
mu = chain[(i - 1)]
mu += ([0] * (len(la) - len(mu)))
for r in range(len(la)):
for c in range(mu[r], la[r]):
T[r][c] = i
return self.element_class(self, T)
def from_shape_and_word(self, shape, word):
(outer, inner) = shape
st = [([None] * row_length) for row_length in outer]
w_count = 0
for i in reversed(range(len(inner), len(outer))):
for j in range(outer[i]):
st[i][j] = word[w_count]
w_count += 1
for i in reversed(range(len(inner))):
for j in range(outer[i]):
if (j >= inner[i]):
st[i][j] = word[w_count]
w_count += 1
return self.element_class(self, st) |
class Take_all():
def take(self, net, RAT=None, LDP=None):
cd = {}
if (RAT is None):
Rat = net.RAT
elif isinstance(RAT, str):
Rat = [RAT]
if (LDP is None):
Ldp = net.LDP
elif isinstance(LDP, str):
Ldp = [LDP]
for ldp in Ldp:
cd[ldp] = {}
for rat in Rat:
try:
cd[ldp][rat] = nx.get_edge_attributes(net.SubNet[rat], ldp).items()
except:
pass
return cd |
def mean_kernel_inception_distance():
source_alpha = 0.98
target_alpha = (1 - source_alpha)
filenames = glob(os.path.join('./real_source', '*.*'))
real_source_images = [get_images(filename) for filename in filenames]
real_source_images = np.transpose(real_source_images, axes=[0, 3, 1, 2])
filenames = glob(os.path.join('./real_target', '*.*'))
real_target_images = [get_images(filename) for filename in filenames]
real_target_images = np.transpose(real_target_images, axes=[0, 3, 1, 2])
filenames = glob(os.path.join('./fake', '*.*'))
fake_images = [get_images(filename) for filename in filenames]
fake_images = np.transpose(fake_images, axes=[0, 3, 1, 2])
BATCH_SIZE = 1
inception_images = tf.placeholder(tf.float32, [BATCH_SIZE, 3, None, None])
real_activation = tf.placeholder(tf.float32, [None, None], name='activations1')
fake_activation = tf.placeholder(tf.float32, [None, None], name='activations2')
fcd = frechet_classifier_distance_from_activations(real_activation, fake_activation)
(kcd_mean, kcd_stddev) = kernel_classifier_distance_and_std_from_activations(real_activation, fake_activation, max_block_size=10)
activations = inception_activations(inception_images)
FID = get_fid(fcd, BATCH_SIZE, real_target_images, fake_images, inception_images, real_activation, fake_activation, activations)
KID_mean = get_kid(kcd_mean, BATCH_SIZE, real_target_images, fake_images, inception_images, real_activation, fake_activation, activations)
KID_stddev = get_kid(kcd_stddev, BATCH_SIZE, real_target_images, fake_images, inception_images, real_activation, fake_activation, activations)
mean_FID = get_fid(fcd, BATCH_SIZE, real_source_images, fake_images, inception_images, real_activation, fake_activation, activations)
mean_KID_mean = get_kid(kcd_mean, BATCH_SIZE, real_source_images, fake_images, inception_images, real_activation, fake_activation, activations)
mean_KID_stddev = get_kid(kcd_stddev, BATCH_SIZE, real_source_images, fake_images, inception_images, real_activation, fake_activation, activations)
mean_FID = (((target_alpha * FID) + (source_alpha * mean_FID)) / 2.0)
mean_KID_mean = (((target_alpha * KID_mean) + (source_alpha * mean_KID_mean)) / 2.0)
mean_KID_stddev = (((target_alpha * KID_stddev) + (source_alpha * mean_KID_stddev)) / 2.0)
print()
print('mean_FID : ', (mean_FID / 100))
print('mean_KID_mean : ', (mean_KID_mean * 100))
print('mean_KID_stddev : ', (mean_KID_stddev * 100)) |
def init_test_kitti():
config['kitti_image_root'] = '/home/ssm/ssj/dataset/KITTI/tracking/image_2'
config['kitti_detection_root'] = '/home/ssm/ssj/dataset/KITTI/tracking/det_2_lsvm'
config['type'] = 'train'
config['dataset_type'] = 'training'
config['resume'] = '/home/ssm/ssj/weights/KITTI/weights0406-I60k-M80-G5-C10-All-Continue/ssj300_0712_140000.pth'
config['cuda'] = True
config['batch_size'] = 1
config['false_constant'] = 10
config['max_object'] = 80 |
class TestBackBones(unittest.TestCase):
def count_layers(self, model):
if isinstance(model[4][0], BasicBlock):
n_convs = 2
elif isinstance(model[4][0], Bottleneck):
n_convs = 3
else:
raise ValueError('Backbone layer block not supported!')
return ((sum([len(model[i]) for i in range(4, 8)]) * n_convs) + 2)
def test_resnet(self):
rn_18 = ResNetBackbone('resnet18')
rn_34 = ResNetBackbone('resnet34')
rn_50 = ResNetBackbone('resnet50')
rn_101 = ResNetBackbone('resnet101')
rn_152 = ResNetBackbone('resnet152')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(rn_18(tensor).shape[1], 512)
self.assertEqual(rn_34(tensor).shape[1], 512)
self.assertEqual(rn_50(tensor).shape[1], 2048)
self.assertEqual(rn_101(tensor).shape[1], 2048)
self.assertAlmostEqual(rn_152(tensor).shape[1], 2048)
self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18)
self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34)
self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50)
self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101)
self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152)
with self.assertRaises(ValueError):
ResNetBackbone('resnet51')
def test_mobilenet(self):
mobilenet = MobileNetBackbone('mobilenet_v2')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(mobilenet(tensor).shape[1], 1280) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, criterion):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=False)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=False)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 1, 1))
self.head = nn.Sequential(PSPModule(2048, 512), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
self.dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), InPlaceABNSync(512), nn.Dropout2d(0.1), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
self.criterion = criterion
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion), affine=affine_par))
layers = []
generate_multi_grid = (lambda index, grids: (grids[(index % len(grids))] if isinstance(grids, tuple) else 1))
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid)))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))
return nn.Sequential(*layers)
def forward(self, x, labels=None):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x_dsn = self.dsn(x)
x = self.layer4(x)
x = self.head(x)
outs = [x, x_dsn]
if ((self.criterion is not None) and (labels is not None)):
return self.criterion(outs, labels)
else:
return outs |
.parametrize('action_size', [4])
.parametrize('batch_size', [32])
.parametrize('observation_shape', [(100,)])
def test_discrete_random_policy(action_size: int, batch_size: int, observation_shape: Sequence[int]) -> None:
algo = DiscreteRandomPolicyConfig().create()
algo.create_impl(observation_shape, action_size)
x = np.random.random((batch_size, *observation_shape))
action = algo.predict(x)
assert (action.shape == (batch_size,))
assert np.all((action < action_size))
action = algo.sample_action(x)
assert (action.shape == (batch_size,))
assert np.all((action < action_size)) |
class TransSfPNet(nn.Module):
def __init__(self, backbone='resnet', output_stride=16, num_classes=21, sync_bn=True, freeze_bn=False, device=None):
super(TransSfPNet, self).__init__()
if (backbone == 'drn'):
output_stride = 8
if (sync_bn == True):
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.kernel_size = 9
self.lamda = 1
self.m = 0.5
self.mean_kernel = (torch.ones([1, 1, self.kernel_size, self.kernel_size]) / (self.kernel_size ** 2))
self.mean_kernel = self.mean_kernel.to(device)
self.mean_kernel = nn.Parameter(data=self.mean_kernel, requires_grad=False)
self.sum_kernel_1 = torch.ones([1, 1, self.kernel_size, self.kernel_size])
self.sum_kernel_1 = self.sum_kernel_1.to(device)
self.sum_kernel_1 = nn.Parameter(data=self.sum_kernel_1, requires_grad=False)
self.sum_kernel_3 = torch.ones([3, 3, self.kernel_size, self.kernel_size])
self.sum_kernel_3 = self.sum_kernel_3.to(device)
self.sum_kernel_3 = nn.Parameter(data=self.sum_kernel_3, requires_grad=False)
self.backbone_orig = build_backbone(in_channels=2, backbone=backbone, output_stride=output_stride, BatchNorm=BatchNorm, Fusion=True)
self.aspp_orig = build_aspp(backbone, output_stride, BatchNorm)
self.backbone_prior = build_backbone(in_channels=12, backbone=backbone, output_stride=output_stride, BatchNorm=BatchNorm, Fusion=True)
self.aspp_prior = build_aspp(backbone, output_stride, BatchNorm)
self.backbone_atten = build_backbone(in_channels=1, backbone=backbone, output_stride=output_stride, BatchNorm=BatchNorm, Fusion=True)
self.aspp_atten = build_aspp(backbone, output_stride, BatchNorm)
self.decoder = decoder()
self.fusion_module_0 = FusionModule(in_channels=64, out_channels=16, output_size=512)
self.fusion_module_1 = FusionModule(in_channels=64, out_channels=32, output_size=256)
self.fusion_module_2 = FusionModule(in_channels=256, out_channels=64, output_size=128)
self.fusion_module_3 = FusionModule(in_channels=512, out_channels=128, output_size=64)
self.fusion_module_4 = FusionModule(in_channels=1024, out_channels=256, output_size=32)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
if freeze_bn:
self.freeze_bn()
self.writer = SummaryWriter()
def forward(self, orig, prior):
img = orig
img_split = torch.split(img, 1, 1)
aolp = img_split[1]
mean_map = nn.functional.conv2d(aolp, self.mean_kernel, padding=(self.kernel_size // 2))
abs_map = torch.abs((aolp - mean_map))
abs_map = torch.pow(abs_map, self.m)
confidence_map = nn.functional.conv2d(abs_map, self.sum_kernel_1, padding=(self.kernel_size // 2))
shape = confidence_map.shape
confidence_map = torch.reshape(confidence_map, [shape[0], (- 1)])
(max_values, indices) = torch.max(confidence_map, dim=1)
max_values = torch.reshape(max_values, [shape[0], 1])
confidence_map = torch.div(confidence_map, max_values)
confidence_map = torch.reshape(confidence_map, [shape[0], shape[1], shape[2], shape[3]])
confidence_map = (1 - confidence_map)
(x_orig, x_orig_0, x_orig_1, x_orig_2, x_orig_3, x_orig_4) = self.backbone_orig(orig)
x_orig = self.aspp_orig(x_orig)
(x_prior, x_prior_0, x_prior_1, x_prior_2, x_prior_3, x_prior_4) = self.backbone_prior(prior)
x_prior = self.aspp_prior(x_prior)
(x_c, x_c_0, x_c_1, x_c_2, x_c_3, x_c_4) = self.backbone_atten(confidence_map)
x_c = self.aspp_atten(x_c)
x_fusion = (x_orig + (self.sigmoid(x_c) * x_prior))
x_fusion_0 = self.fusion_module_0(x_orig_0, x_c_0, x_prior_0)
x_fusion_1 = self.fusion_module_1(x_orig_1, x_c_1, x_prior_1)
x_fusion_2 = self.fusion_module_2(x_orig_2, x_c_2, x_prior_2)
x_fusion_3 = self.fusion_module_3(x_orig_3, x_c_3, x_prior_3)
x_fusion_4 = self.fusion_module_4(x_orig_4, x_c_4, x_prior_4)
x = self.decoder(x_fusion, x_fusion_0, x_fusion_1, x_fusion_2, x_fusion_3, x_fusion_4)
return (x, confidence_map)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval() |
class FaissIndexer():
def __init__(self, index=None):
import faiss as faiss_module
self.faiss_module = faiss_module
self.index = index
def train_index(self, embeddings):
self.index = self.faiss_module.IndexFlatL2(embeddings.shape[1])
self.add_to_index(embeddings)
def add_to_index(self, embeddings):
self.index.add(embeddings)
def search_nn(self, query_batch, k):
(D, I) = self.index.search(query_batch, k)
return (I, D)
def save(self, filename):
self.faiss_module.write_index(self.index, filename)
def load(self, filename):
self.index = self.faiss_module.read_index(filename) |
class Closeness(BaseRanking):
def __init__(self, method: str='exact', tol: float=0.1):
super(Closeness, self).__init__()
self.method = method
self.tol = tol
def fit(self, adjacency: Union[(sparse.csr_matrix, np.ndarray)]) -> 'Closeness':
adjacency = check_format(adjacency)
check_square(adjacency)
check_connected(adjacency)
n = adjacency.shape[0]
if (self.method == 'exact'):
n_sources = n
sources = np.arange(n)
elif (self.method == 'approximate'):
n_sources = min(int((log(n) / (self.tol ** 2))), n)
sources = np.random.choice(np.arange(n), n_sources, replace=False)
else:
raise ValueError("Method should be either 'exact' or 'approximate'.")
distances = np.array([get_distances(adjacency, source=source) for source in sources])
distances_min = np.min(distances, axis=1)
scores = (((n - 1) / n) / np.mean(distances, axis=1))
scores[(distances_min < 0)] = 0
self.scores_ = scores
return self |
def is_chinese(word: str):
for char in word:
char = ord(char)
if (not _is_chinese_char(char)):
return 0
return 1 |
def generate_h5(model_resnext101, video_ids, outfile):
video_total_num = len(video_ids)
with h5py.File(outfile, 'w') as fd:
feat_dset_resnext101 = None
video_ids_dset = None
i0 = 0
for (i, (video_path, video_id)) in enumerate(video_ids):
(clips, valid) = extract_clips_with_consecutive_frames(video_path)
clip_torch = torch.FloatTensor(np.asarray(clips)).cuda()
if valid:
clip_feat_resnext101 = model_resnext101(clip_torch)
clip_feat_resnext101 = clip_feat_resnext101.squeeze()
clip_feat_resnext101 = clip_feat_resnext101.detach().cpu().numpy()
else:
clip_feat_resnext101 = np.zeros(shape=(8, 2048))
F = clip_feat_resnext101.shape[0]
D_101 = clip_feat_resnext101.shape[1]
if (feat_dset_resnext101 is None):
feat_dset_resnext101 = fd.create_dataset('resnext101_features', (video_total_num, F, D_101), dtype=np.float32)
video_ids_dset = fd.create_dataset('video_ids', shape=(video_total_num,), dtype=np.int)
i1 = (i0 + 1)
feat_dset_resnext101[i0] = clip_feat_resnext101
video_ids_dset[i0] = video_id
i0 = i1 |
class LinearSeqAttn(nn.Module):
def __init__(self, input_size):
super(LinearSeqAttn, self).__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, x, x_mask):
x_flat = x.view((- 1), x.size((- 1)))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.masked_fill_(x_mask, (- float('inf')))
alpha = F.softmax(scores, dim=(- 1))
return alpha |
def parse_few_shot_qa_single_answer(string, setting_name, language='en'):
answer = try_parse_few_shot_qa_single_answer(string, setting_name, language)
if (answer is None):
return find_first_capital_letter(string)
else:
return answer |
class GaussianRasterizationSettings(NamedTuple):
image_height: int
image_width: int
tanfovx: float
tanfovy: float
bg: torch.Tensor
scale_modifier: float
viewmatrix: torch.Tensor
projmatrix: torch.Tensor
sh_degree: int
campos: torch.Tensor
prefiltered: bool
debug: bool |
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError() |
def nlte_raw_plasma_w0(tardis_model_config_nlte, nlte_raw_simulation_state, nlte_atom_data):
nlte_raw_simulation_state.dilution_factor = np.zeros_like(nlte_raw_simulation_state.dilution_factor)
plasma = assemble_plasma(tardis_model_config_nlte, nlte_raw_simulation_state, nlte_atom_data)
return plasma |
def copy_checkpoint(src, dst, logger):
if osp.isfile(dst):
if hasattr(logger, 'log'):
logger.log('Find {:} exist, delete is at first before saving'.format(dst))
os.remove(dst)
copyfile(src, dst)
if hasattr(logger, 'log'):
logger.log('copy the file from {:} into {:}'.format(src, dst)) |
def _update_from_config(obj, cfg):
for k in obj.__dict__.keys():
try:
obj.__dict__[k] = cfg[k.upper()]
except KeyError:
raise KeyError("'{}' has not been defined in config file".format(k.upper()))
except Exception as e:
raise Exception(e) |
class Kmer():
def __init__(self, k=1, normalize=False, upto=False, alphabet='ACGT'):
self.k = k
self.upto = upto
self.normalize = normalize
self.alphabet = alphabet
check_nac_para(k=self.k, upto=self.upto, normalize=self.normalize, alphabet=self.alphabet)
self._kmer_list = get_kmer_list(self.k, self.upto, self.alphabet)
self.feature_name_list = self._kmer_list
def make_vec(self, data):
sequence_list = get_data(data)
kmer_list = self._kmer_list
rev_kmer_list = []
revcomp = False
vec = make_kmer_vector(sequence_list, kmer_list, rev_kmer_list, self.k, self.upto, revcomp, self.normalize)
return vec |
class FederatedFlow(FLSpec):
def __init__(self, model=None, optimizer=None, rounds=3, **kwargs):
super().__init__(**kwargs)
if (model is not None):
self.model = model
self.optimizer = optimizer
else:
self.model = Net()
self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, momentum=momentum)
self.rounds = rounds
def start(self):
print(f'Performing initialization for model')
self.collaborators = self.runtime.collaborators
self.private = 10
self.current_round = 0
self.next(self.aggregated_model_validation, foreach='collaborators', exclude=['private'])
def aggregated_model_validation(self):
print(f'Performing aggregated model validation for collaborator {self.input}')
self.agg_validation_score = inference(self.model, self.test_loader)
print(f'{self.input} value of {self.agg_validation_score}')
self.next(self.train)
def train(self):
self.model.train()
self.weight = len(self.train_loader)
self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, momentum=momentum)
train_losses = []
for (batch_idx, (data, target)) in enumerate(self.train_loader):
self.optimizer.zero_grad()
output = self.model(data)
loss = F.nll_loss(output, target)
loss.backward()
self.optimizer.step()
if ((batch_idx % log_interval) == 0):
print('Train Epoch: 1 [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format((batch_idx * len(data)), len(self.train_loader.dataset), ((100.0 * batch_idx) / len(self.train_loader)), loss.item()))
self.loss = loss.item()
torch.save(self.model.state_dict(), 'model.pth')
torch.save(self.optimizer.state_dict(), 'optimizer.pth')
self.training_completed = True
self.next(self.local_model_validation)
def local_model_validation(self):
self.local_validation_score = inference(self.model, self.test_loader)
print(f'Doing local model validation for collaborator {self.input}: {self.local_validation_score}')
self.next(self.join, exclude=['training_completed'])
def join(self, inputs):
weights = np.array([float(input.weight) for input in inputs])
weights /= sum(weights)
self.average_loss = np.average([input.loss for input in inputs], axis=0, weights=weights)
self.aggregated_model_accuracy = np.average([input.agg_validation_score for input in inputs], axis=0, weights=weights)
self.local_model_accuracy = np.average([input.local_validation_score for input in inputs], axis=0, weights=weights)
print(f'Average aggregated model validation values = {self.aggregated_model_accuracy}')
print(f'Average training loss = {self.average_loss}')
print(f'Average local model validation values = {self.local_model_accuracy}')
self.model = FedAvg([input.model for input in inputs], weights=weights)
self.optimizer = [input.optimizer for input in inputs][0]
self.current_round += 1
if (self.current_round < self.rounds):
self.next(self.aggregated_model_validation, foreach='collaborators', exclude=['private'])
else:
self.final_accuracy = self.aggregated_model_accuracy
self.next(self.end)
def end(self):
print(f'''
final model accuracy = {self.final_accuracy}''')
print(f'This is the end of the flow') |
def test_sample_regular_pass_smote_enn():
smote = SMOTEENN(smote=SMOTE(sampling_strategy='auto', random_state=RND_SEED), enn=EditedNearestNeighbours(sampling_strategy='all'), random_state=RND_SEED)
(X_resampled, y_resampled) = smote.fit_resample(X, Y)
X_gt = np.array([[1., (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., (- 0.)], [(- 0.), (- 2.)], [0., 1.], [0., 0.]])
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt) |
_utils.test(arch=get_host_arch_list())
def test_order_must_throw_vector():
with pytest.raises(ti.TaichiCompilationError, match='The dimensionality of shape and order must be the same'):
a = ti.Vector.field(3, dtype=ti.f32, shape=3, order='ij')
with pytest.raises(ti.TaichiCompilationError, match='shape cannot be None when order is set'):
b = ti.Vector.field(3, dtype=ti.f32, shape=None, order='i')
with pytest.raises(ti.TaichiCompilationError, match='The axes in order must be different'):
c = ti.Vector.field(3, dtype=ti.f32, shape=(3, 4, 3), order='iii')
with pytest.raises(ti.TaichiCompilationError, match='Invalid axis'):
d = ti.Vector.field(3, dtype=ti.f32, shape=(3, 4, 3), order='ihj') |
class LayerNorm(nn.Module):
def __init__(self, n_out, eps=1e-05, affine=True):
super(LayerNorm, self).__init__()
self.n_out = n_out
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones(n_out, 1, 1))
self.bias = nn.Parameter(torch.zeros(n_out, 1, 1))
return
def forward(self, x):
normalized_shape = x.size()[1:]
if self.affine:
return F.layer_norm(x, normalized_shape, self.weight.expand(normalized_shape), self.bias.expand(normalized_shape))
else:
return F.layer_norm(x, normalized_shape) |
class DocStringSlot(SlotDescriptor):
def slot_code(self, scope):
doc = scope.doc
if (doc is None):
return '0'
if doc.is_unicode:
doc = doc.as_utf8_string()
return doc.as_c_string_literal() |
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
scratch = nn.Module()
out_shape1 = in_shape[0]
out_shape2 = in_shape[1]
out_shape3 = in_shape[2]
out_shape4 = in_shape[3]
scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
return scratch |
class VGGATest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = vgg.vgg_a(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
(height, width) = (256, 256)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(), [batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
(height, width) = (256, 256)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False, global_pool=True)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(), [batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1', 'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2', 'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2', 'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2', 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8']
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(net, end_points) = vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1', 'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2', 'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2', 'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2', 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7']
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('vgg_a/fc7'))
def testModelVariables(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1/weights', 'vgg_a/conv1/conv1_1/biases', 'vgg_a/conv2/conv2_1/weights', 'vgg_a/conv2/conv2_1/biases', 'vgg_a/conv3/conv3_1/weights', 'vgg_a/conv3/conv3_1/biases', 'vgg_a/conv3/conv3_2/weights', 'vgg_a/conv3/conv3_2/biases', 'vgg_a/conv4/conv4_1/weights', 'vgg_a/conv4/conv4_1/biases', 'vgg_a/conv4/conv4_2/weights', 'vgg_a/conv4/conv4_2/biases', 'vgg_a/conv5/conv5_1/weights', 'vgg_a/conv5/conv5_1/biases', 'vgg_a/conv5/conv5_2/weights', 'vgg_a/conv5/conv5_2/biases', 'vgg_a/fc6/weights', 'vgg_a/fc6/biases', 'vgg_a/fc7/weights', 'vgg_a/fc7/biases', 'vgg_a/fc8/weights', 'vgg_a/fc8/biases']
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = vgg.vgg_a(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
(train_height, train_width) = (224, 224)
(eval_height, eval_width) = (256, 256)
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform((train_batch_size, train_height, train_width, 3))
(logits, _) = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform((eval_batch_size, eval_height, eval_width, 3))
(logits, _) = vgg.vgg_a(eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
(height, width) = (224, 224)
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = vgg.vgg_a(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any()) |
def save_file(obj, filename, *args, **kwargs):
ext = get_ext(filename)
if (ext in _ext_table):
before_save(filename)
return _ext_table[ext][0](obj, filename, *args, **kwargs)
else:
raise ValueError('Unsupported file {} with file extension {}'.format(filename, ext)) |
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--graph', help='compiled TF graph', required=True)
arg_parser.add_argument('--chkpt', help='TF checkpoint (model params)', required=True)
arg_parser.add_argument('--beam_size', type=int, default=12)
arg_parser.add_argument('--rec_step_by_step_json', required=True)
args = arg_parser.parse_args()
def make_initial_feed_dict():
return {}
info = json.load(open(args.rec_step_by_step_json))
assert isinstance(info, dict)
if (os.path.splitext(args.graph)[1] in ['.meta', '.metatxt']):
saver = tf.compat.v1.train.import_meta_graph(args.graph)
else:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(open(args.graph, 'rb').read())
tf.import_graph_def(graph_def)
saver = tf.compat.v1.train.Saver()
with tf.compat.v1.Session() as session:
saver.restore(session, args.chkpt)
initial_feed_dict = make_initial_feed_dict()
session.run(info['init_op'], feed_dict=initial_feed_dict)
hyps = [Hyp(idx=0)]
max_dec_len = 100
for i in range(max_dec_len):
for stochastic_var in info['stochastic_var_order']:
assert isinstance(stochastic_var, str)
session.run(info['stochastic_vars'][stochastic_var]['calc_scores_op'])
scores = session.run(info['state_vars'][('stochastic_var_scores_%s' % stochastic_var)])
assert (isinstance(scores, numpy.ndarray) and (scores.ndim == 2) and (scores.shape[0] == len(hyps)))
all_possibilities = [((hyp.score + scores[(i, j)]), j, hyp) for (i, hyp) in enumerate(hyps) for j in range(scores.shape[1])]
best_possibilities = sorted(all_possibilities)[:args.beam_size]
assert (len(best_possibilities) == args.beam_size)
hyps = [hyp.expand(idx=i, label=label, score=score) for (i, (score, label, hyp)) in enumerate(best_possibilities)]
session.run((info['state_vars'][('stochastic_var_scores_%s' % stochastic_var)] + '/Assign...?'), feed_dict={(info['state_vars'][('stochastic_var_scores_%s' % stochastic_var)] + '/Initial...?'): [[hyp.seq[(- 1)] for hyp in hyps]]})
session.run(info['select_src_beams']['op'], feed_dict={info['select_src_beams']['src_beams_placeholder']: [[hyp.source_idx] for hyp in hyps]})
session.run(info['next_step_op'])
print('Best hypotheses:')
for hyp in hyps:
print(('score %.2f: %r' % (hyp.score, hyp.seq))) |
_to_string
class Rule(RuleFactory):
def __init__(self, string, defaults=None, subdomain=None, methods=None, build_only=False, endpoint=None, strict_slashes=None, merge_slashes=None, redirect_to=None, alias=False, host=None, websocket=False):
if (not string.startswith('/')):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = (not string.endswith('/'))
self.map = None
self.strict_slashes = strict_slashes
self.merge_slashes = merge_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
self.websocket = websocket
if (methods is not None):
if isinstance(methods, str):
raise TypeError("'methods' should be a list of strings.")
methods = {x.upper() for x in methods}
if (('HEAD' not in methods) and ('GET' in methods)):
methods.add('HEAD')
if (websocket and (methods - {'GET', 'HEAD', 'OPTIONS'})):
raise ValueError("WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods.")
self.methods = methods
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._argument_weights = None
def empty(self):
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(defaults=defaults, subdomain=self.subdomain, methods=self.methods, build_only=self.build_only, endpoint=self.endpoint, strict_slashes=self.strict_slashes, redirect_to=self.redirect_to, alias=self.alias, host=self.host)
def get_rules(self, map):
(yield self)
def refresh(self):
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
if ((self.map is not None) and (not rebind)):
raise RuntimeError(('url rule %r already bound to map %r' % (self, self.map)))
self.map = map
if (self.strict_slashes is None):
self.strict_slashes = map.strict_slashes
if (self.merge_slashes is None):
self.merge_slashes = map.merge_slashes
if (self.subdomain is None):
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
if (converter_name not in self.map.converters):
raise LookupError(('the converter %r does not exist' % converter_name))
return self.map.converters[converter_name](self.map, *args, **kwargs)
def _encode_query_vars(self, query_vars):
return url_encode(query_vars, charset=self.map.charset, sort=self.map.sort_parameters, key=self.map.sort_key)
def compile(self):
assert (self.map is not None), 'rule not bound'
if self.map.host_matching:
domain_rule = (self.host or '')
else:
domain_rule = (self.subdomain or '')
self._trace = []
self._converters = {}
self._static_weights = []
self._argument_weights = []
regex_parts = []
def _build_regex(rule):
index = 0
for (converter, arguments, variable) in parse_rule(rule):
if (converter is None):
for match in re.finditer('/+|[^/]+', variable):
part = match.group(0)
if part.startswith('/'):
if self.merge_slashes:
regex_parts.append('/+?')
self._trace.append((False, '/'))
else:
regex_parts.append(part)
self._trace.append((False, part))
continue
self._trace.append((False, part))
regex_parts.append(re.escape(part))
if part:
self._static_weights.append((index, (- len(part))))
else:
if arguments:
(c_args, c_kwargs) = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(variable, converter, c_args, c_kwargs)
regex_parts.append(('(?P<%s>%s)' % (variable, convobj.regex)))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._argument_weights.append(convobj.weight)
self.arguments.add(str(variable))
index = (index + 1)
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex((self.rule if self.is_leaf else self.rule.rstrip('/')))
if (not self.is_leaf):
self._trace.append((False, '/'))
self._build = self._compile_builder(False).__get__(self, None)
self._build_unknown = self._compile_builder(True).__get__(self, None)
if self.build_only:
return
if (not (self.is_leaf and self.strict_slashes)):
reps = (u'*' if self.merge_slashes else u'?')
tail = (u'(?<!/)(?P<__suffix__>/%s)' % reps)
else:
tail = u''
regex = (u'^%s%s$' % (u''.join(regex_parts), tail))
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
if (not self.build_only):
require_redirect = False
m = self._regex.search(path)
if (m is not None):
groups = m.groupdict()
if (self.strict_slashes and (not self.is_leaf) and (not groups.pop('__suffix__')) and ((method is None) or (self.methods is None) or (method in self.methods))):
path += '/'
require_redirect = True
elif (not self.strict_slashes):
del groups['__suffix__']
result = {}
for (name, value) in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.merge_slashes:
new_path = '|'.join(self.build(result, False))
if (path.endswith('/') and (not new_path.endswith('/'))):
new_path += '/'
if (new_path.count('/') < path.count('/')):
path = new_path
require_redirect = True
if require_redirect:
path = path.split('|', 1)[1]
raise RequestPath(path)
if (self.alias and self.map.redirect_defaults):
raise RequestAliasRedirect(result)
return result
def _get_func_code(code, name):
(globs, locs) = ({}, {})
exec(code, globs, locs)
return locs[name]
def _compile_builder(self, append_unknown=True):
defaults = (self.defaults or {})
dom_ops = []
url_ops = []
opl = dom_ops
for (is_dynamic, data) in self._trace:
if ((data == '|') and (opl is dom_ops)):
opl = url_ops
continue
if (is_dynamic and (data in defaults)):
data = self._converters[data].to_url(defaults[data])
opl.append((False, data))
elif (not is_dynamic):
opl.append((False, url_quote(to_bytes(data, self.map.charset), safe='/:|+')))
else:
opl.append((True, data))
def _convert(elem):
ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
ret.args = [ast.Name(str(elem), ast.Load())]
return ret
def _parts(ops):
parts = [(_convert(elem) if is_dynamic else ast.Str(s=elem)) for (is_dynamic, elem) in ops]
parts = (parts or [ast.Str('')])
ret = [parts[0]]
for p in parts[1:]:
if (isinstance(p, ast.Str) and isinstance(ret[(- 1)], ast.Str)):
ret[(- 1)] = ast.Str((ret[(- 1)].s + p.s))
else:
ret.append(p)
return ret
dom_parts = _parts(dom_ops)
url_parts = _parts(url_ops)
if (not append_unknown):
body = []
else:
body = [_IF_KWARGS_URL_ENCODE_AST]
url_parts.extend(_URL_ENCODE_AST_NAMES)
def _join(parts):
if (len(parts) == 1):
return parts[0]
elif hasattr(ast, 'JoinedStr'):
return ast.JoinedStr(parts)
else:
call = _prefix_names('"".join()')
call.args = [ast.Tuple(parts, ast.Load())]
return call
body.append(ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load())))
pargs = [str(elem) for (is_dynamic, elem) in (dom_ops + url_ops) if (is_dynamic and (elem not in defaults))]
kargs = [str(k) for k in defaults]
func_ast = _prefix_names('def _(): pass')
func_ast.name = '<builder:{!r}>'.format(self.rule)
if hasattr(ast, 'arg'):
func_ast.args.args.append(ast.arg('.self', None))
for arg in (pargs + kargs):
func_ast.args.args.append(ast.arg(arg, None))
func_ast.args.kwarg = ast.arg('.kwargs', None)
else:
func_ast.args.args.append(ast.Name('.self', ast.Param()))
for arg in (pargs + kargs):
func_ast.args.args.append(ast.Name(arg, ast.Param()))
func_ast.args.kwarg = '.kwargs'
for _ in kargs:
func_ast.args.defaults.append(ast.Str(''))
func_ast.body = body
module = ast.parse('')
module.body = [func_ast]
for node in ast.walk(module):
if ('lineno' in node._attributes):
node.lineno = 1
if ('col_offset' in node._attributes):
node.col_offset = 0
code = compile(module, '<werkzeug routing>', 'exec')
return self._get_func_code(code, func_ast.name)
def build(self, values, append_unknown=True):
try:
if append_unknown:
return self._build_unknown(**values)
else:
return self._build(**values)
except ValidationError:
return None
def provides_defaults_for(self, rule):
return ((not self.build_only) and self.defaults and (self.endpoint == rule.endpoint) and (self != rule) and (self.arguments == rule.arguments))
def suitable_for(self, values, method=None):
if ((method is not None) and (self.methods is not None) and (method not in self.methods)):
return False
defaults = (self.defaults or ())
for key in self.arguments:
if ((key not in defaults) and (key not in values)):
return False
if defaults:
for (key, value) in iteritems(defaults):
if ((key in values) and (value != values[key])):
return False
return True
def match_compare_key(self):
return (bool(self.arguments), (- len(self._static_weights)), self._static_weights, (- len(self._argument_weights)), self._argument_weights)
def build_compare_key(self):
return ((1 if self.alias else 0), (- len(self.arguments)), (- len((self.defaults or ()))))
def __eq__(self, other):
return ((self.__class__ is other.__class__) and (self._trace == other._trace))
__hash__ = None
def __ne__(self, other):
return (not self.__eq__(other))
def __str__(self):
return self.rule
_string_result
def __repr__(self):
if (self.map is None):
return (u'<%s (unbound)>' % self.__class__.__name__)
tmp = []
for (is_dynamic, data) in self._trace:
if is_dynamic:
tmp.append((u'<%s>' % data))
else:
tmp.append(data)
return (u'<%s %s%s -> %s>' % (self.__class__.__name__, repr(u''.join(tmp).lstrip(u'|')).lstrip(u'u'), (((self.methods is not None) and (u' (%s)' % u', '.join(self.methods))) or u''), self.endpoint)) |
class ImageBindModality(Modality):
def __init__(self, num_projector_layers: int=2, num_tokens: int=4, preprocess_device: str='cpu'):
self.module = ImageBindModule()
self.dtype = torch.float32
self.device = 'cpu'
self.imagebind_device = 'cpu'
self.preprocess_device = preprocess_device
self.num_projector_layers = num_projector_layers
self.num_tokens = num_tokens
def build_projector(self, lm_hidden_size: int) -> nn.Module:
return build_mlp_vector_projector(self.module.embedding_size, lm_hidden_size, num_layers=self.num_projector_layers, num_tokens=self.num_tokens)
def name(self) -> str:
return 'imagebind'
def token(self) -> str:
return '<imagebind>'
def data_key(self) -> str:
return 'imagebinds'
def token_width(self) -> int:
return self.num_tokens
def to(self, dtype: torch.dtype, device: torch.device) -> 'ImageBindModality':
self.device = device
self.dtype = dtype
if (IMAGE_BIND_FORCE_CPU not in os.environ):
self.module.to(device=device)
self.imagebind_device = device
return self
def preprocess_rows(self, rows: List[Dict]) -> List[List[Dict]]:
from imagebind.models.imagebind_model import ModalityType
from imagebind import data
row_values = []
for row in rows:
items = []
with with_local_files(row[self.data_key]) as item_paths:
for item_path in item_paths:
ib_modality = filename_to_imagebind_modality(item_path)
if (ib_modality == ModalityType.TEXT):
items.append({ModalityType.TEXT: data.load_and_transform_text([item_path], self.preprocess_device)})
elif (ib_modality == ModalityType.VISION):
items.append({ModalityType.VISION: data.load_and_transform_vision_data([item_path], self.preprocess_device)})
elif (ib_modality == ModalityType.AUDIO):
items.append({ModalityType.AUDIO: data.load_and_transform_audio_data([item_path], self.preprocess_device)})
else:
raise ValueError(f'Unknown modality type: {ib_modality}')
row_values.append(items)
return row_values
_grad()
def forward(self, encoded_values: List[List[Dict]]) -> List[torch.Tensor]:
item_features = []
for item_batch in encoded_values:
item_batch_emb = []
for item in item_batch:
item = {k: v.to(device=self.imagebind_device, dtype=torch.float32) for (k, v) in item.items()}
item_batch_emb.extend(list(self.module.forward(item).values()))
item_features.append(torch.stack(item_batch_emb).to(device=self.device, dtype=self.dtype))
return item_features |
(autouse=True, scope='session')
def add_imports(doctest_namespace: dict[(str, Any)]):
import sage.all
dict_all = sage.all.__dict__
dict_all.pop('__package__', None)
sage_namespace = dict(dict_all)
sage_namespace['__name__'] = '__main__'
doctest_namespace.update(**sage_namespace) |
class Node(object):
def __init__(self, node_type, name, n_name=None, caseless=True):
self.node_type = node_type
self.name = name
self.normalized_name = (n_name if n_name else name)
self.indexable_name = utils.to_indexable(name, caseless)
self.lexical_features = None
def compute_lexical_features(self, tokenize=None, normalized=False):
name = (self.normalized_name if normalized else self.name)
if (tokenize is None):
self.lexical_features = name.split(' ')
else:
self.lexical_features = tokenize(name)
def signature(self):
return self.name
def indexable_signature(self):
return self.indexable_name
def printable_name(self):
return '{} ({})'.format(self.name, self.normalized_name) |
def verify_no_leak(callback: Callable[([], Any)], repeat: int=10000, fuzzy: int=10) -> None:
callback()
initial_blocks = (0, 0, 0, 0)
valgrind.memcheck_do_leak_check()
initial_blocks = valgrind.memcheck_count_leak_blocks()
for _ in range(repeat):
callback()
valgrind.memcheck_do_leak_check()
leak_blocks = valgrind.memcheck_count_leak_blocks()
leak = (leak_blocks[0] - initial_blocks[0])
if (leak < (repeat - fuzzy)):
return
blocks = round((leak / repeat), 2)
message = f'{callback} leaked {blocks} block on average ({repeat} iterations)'
raise AssertionError(message) |
def example_to_device(example, device, non_blocking=False) -> dict:
example_torch = {}
float_names = ['voxels', 'bev_map']
for (k, v) in example.items():
if (k in ['anchors', 'anchors_mask', 'reg_targets', 'reg_weights', 'labels', 'hm', 'anno_box', 'ind', 'mask', 'cat']):
example_torch[k] = [res.to(device, non_blocking=non_blocking) for res in v]
elif (k in ['voxels', 'dense_voxels', 'bev_map', 'coordinates', 'dense_coordinates', 'num_points', 'dense_num_points', 'points', 'dense_points', 'num_voxels', 'dense_num_voxels', 'cyv_voxels', 'cyv_num_voxels', 'cyv_coordinates', 'cyv_num_points', 'gt_boxes_and_cls', 'reconstruction_coordinates', 'reconstruction_voxels', 'reconstruction_num_voxels', 'reconstruction_num_points', 'reconstruction_coordinates_4', 'reconstruction_voxels_4', 'reconstruction_num_voxels_4', 'reconstruction_num_points_4', 'reconstruction_coordinates_2', 'reconstruction_voxels_2', 'reconstruction_num_voxels_2', 'reconstruction_num_points_2']):
example_torch[k] = v.to(device, non_blocking=non_blocking)
elif (k == 'calib'):
calib = {}
for (k1, v1) in v.items():
calib[k1] = v1.to(device, non_blocking=non_blocking)
example_torch[k] = calib
else:
example_torch[k] = v
return example_torch |
def parse_sim_time(path):
ret = {}
if (not os.path.exists(path)):
return ret
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f)
ret['simtime'] = ((data['end_time'] - data['start_time']) / 60)
f.close()
return ret |
def get_valid_stats(args, trainer, stats):
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = (max if args.maximize_best_checkpoint_metric else min)
stats[key] = best_function(checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric])
return stats |
class GecDataModule(pl.LightningDataModule):
def __init__(self, args, tokenizer, DatasetModule):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.train = DatasetModule(self.args.train_data_path, self.tokenizer, self.args.max_seq_len, data_split_type='train')
self.valid = DatasetModule(self.args.val_data_path, self.tokenizer, self.args.max_seq_len, data_split_type='test')
self.test = DatasetModule(self.args.test_data_path, self.tokenizer, self.args.max_seq_len, data_split_type='test')
def train_dataloader(self):
return DataLoader(self.train, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.num_workers)
def val_dataloader(self):
return DataLoader(self.valid, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers)
def test_dataloader(self):
return DataLoader(self.test, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers)
def train_noshuffle_dataloader(self):
return DataLoader(self.train, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.num_workers) |
def GetNodeOutDegV_PUndirNet(Graph, NIdOutDegV):
return _snap.GetNodeOutDegV_PUndirNet(Graph, NIdOutDegV) |
def make_user_schema(**kwargs):
return make_object_schema(first_name={'type': 'string'}, last_name={'type': 'string'}, **kwargs) |
def _shuffle_and_split(data: List, test_ratio=None, test_size=None, seed=0):
random.seed(seed)
size = len(data)
if (test_ratio is not None):
train_ratio = (1 - test_ratio)
train_size = math.floor((size * train_ratio))
elif (test_size is not None):
train_size = (size - test_size)
index = list(range(size))
random.shuffle(index)
train_index = index[:train_size]
test_index = index[train_size:]
train_data = [data[i] for i in train_index]
test_data = [data[i] for i in test_index]
return (train_data, test_data) |
def test_tpfp_openimages():
det_bboxes = np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98], [10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97], [30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]])
gt_bboxes = np.array([[10.0, 10.0, 30.0, 30.0], [30.0, 30.0, 50.0, 50.0]])
gt_groups_of = np.array([True, False], dtype=bool)
gt_ignore = np.zeros((0, 4))
result = tpfp_openimages(det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=True, ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert (tp.shape == (1, 4))
assert (fp.shape == (1, 4))
assert (cls_dets.shape == (4, 5))
assert (tp == np.array([[0, 1, 0, 1]])).all()
assert (fp == np.array([[1, 0, 1, 0]])).all()
cls_dets_gt = np.array([[28.0, 28.0, 35.0, 35.0, 0.97], [30.0, 30.0, 51.0, 51.0, 0.96], [100.0, 110.0, 120.0, 130.0, 0.15], [10.0, 10.0, 15.0, 15.0, 1.0]])
assert (cls_dets == cls_dets_gt).all()
result = tpfp_openimages(det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=False, ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert (tp.shape == (1, 6))
assert (fp.shape == (1, 6))
assert (cls_dets.shape == (6, 5))
gt_groups_of = np.array([True, True], dtype=bool)
result = tpfp_openimages(det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=True, ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert (tp.shape == (1, 3))
assert (fp.shape == (1, 3))
assert (cls_dets.shape == (3, 5))
gt_bboxes = np.zeros((0, 4))
gt_groups_of = np.empty(0)
result = tpfp_openimages(det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=True, ioa_thr=0.5)
fp = result[1]
assert (fp == np.array([[1, 1, 1, 1, 1, 1]])).all() |
_mode(matmul=False)
def kid(x, y, max_size=5000):
(x_size, y_size) = (x.shape[0], y.shape[0])
n_partitions = math.ceil(max((x_size / max_size), (y_size / max_size)))
total_mmd = x.new_zeros([])
for i in range(n_partitions):
cur_x = x[round(((i * x_size) / n_partitions)):round((((i + 1) * x_size) / n_partitions))]
cur_y = y[round(((i * y_size) / n_partitions)):round((((i + 1) * y_size) / n_partitions))]
total_mmd = (total_mmd + squared_mmd(cur_x, cur_y))
return (total_mmd / n_partitions) |
def generate_syn_feature(generator, classes, attribute, num):
nclass = classes.size(0)
syn_feature = torch.FloatTensor((nclass * num), opt.resSize)
syn_label = torch.LongTensor((nclass * num))
syn_att = torch.FloatTensor(num, opt.attSize)
syn_noise = torch.FloatTensor(num, opt.nz)
if opt.cuda:
syn_att = syn_att.cuda()
syn_noise = syn_noise.cuda()
for i in range(nclass):
iclass = classes[i]
iclass_att = attribute[iclass]
syn_att.copy_(iclass_att.repeat(num, 1))
syn_noise.normal_(0, 1)
with torch.no_grad():
syn_noisev = Variable(syn_noise)
syn_attv = Variable(syn_att)
fake = generator(syn_noisev, c=syn_attv)
output = fake
syn_feature.narrow(0, (i * num), num).copy_(output.data.cpu())
syn_label.narrow(0, (i * num), num).fill_(iclass)
return (syn_feature, syn_label) |
class PROBINGEval(object):
def __init__(self, task, task_path, seed=1111):
self.seed = seed
self.task = task
logging.debug('***** (Probing) Transfer task : %s classification *****', self.task.upper())
self.task_data = {'train': {'X': [], 'y': []}, 'dev': {'X': [], 'y': []}, 'test': {'X': [], 'y': []}}
self.loadFile(task_path)
logging.info(('Loaded %s train - %s dev - %s test for %s' % (len(self.task_data['train']['y']), len(self.task_data['dev']['y']), len(self.task_data['test']['y']), self.task)))
def do_prepare(self, params, prepare):
samples = ((self.task_data['train']['X'] + self.task_data['dev']['X']) + self.task_data['test']['X'])
return prepare(params, samples)
def loadFile(self, fpath):
self.tok2split = {'tr': 'train', 'va': 'dev', 'te': 'test'}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip().split('\t')
self.task_data[self.tok2split[line[0]]]['X'].append(line[(- 1)].split())
self.task_data[self.tok2split[line[0]]]['y'].append(line[1])
labels = sorted(np.unique(self.task_data['train']['y']))
self.tok2label = dict(zip(labels, range(len(labels))))
self.nclasses = len(self.tok2label)
for split in self.task_data:
for (i, y) in enumerate(self.task_data[split]['y']):
self.task_data[split]['y'][i] = self.tok2label[y]
def run(self, params, batcher):
task_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
logging.info('Computing embeddings for train/dev/test')
for key in self.task_data:
sorted_data = sorted(zip(self.task_data[key]['X'], self.task_data[key]['y']), key=(lambda z: (len(z[0]), z[1])))
(self.task_data[key]['X'], self.task_data[key]['y']) = map(list, zip(*sorted_data))
task_embed[key]['X'] = []
for ii in range(0, len(self.task_data[key]['y']), bsize):
batch = self.task_data[key]['X'][ii:(ii + bsize)]
embeddings = batcher(params, batch)
task_embed[key]['X'].append(embeddings)
task_embed[key]['X'] = np.vstack(task_embed[key]['X'])
task_embed[key]['y'] = np.array(self.task_data[key]['y'])
logging.info('Computed embeddings')
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed, 'usepytorch': params.usepytorch, 'classifier': params.classifier}
if ((self.task == 'WordContent') and (params.classifier['nhid'] > 0)):
config_classifier = copy.deepcopy(config_classifier)
config_classifier['classifier']['nhid'] = 0
print(params.classifier['nhid'])
clf = SplitClassifier(X={'train': task_embed['train']['X'], 'valid': task_embed['dev']['X'], 'test': task_embed['test']['X']}, y={'train': task_embed['train']['y'], 'valid': task_embed['dev']['y'], 'test': task_embed['test']['y']}, config=config_classifier)
(devacc, testacc) = clf.run()
logging.debug(('\nDev acc : %.1f Test acc : %.1f for %s classification\n' % (devacc, testacc, self.task.upper())))
return {'devacc': devacc, 'acc': testacc, 'ndev': len(task_embed['dev']['X']), 'ntest': len(task_embed['test']['X'])} |
def gen_lean_struct(struct_def: StructDefinition, namespace: ScopedName, open_namespaces: List[ScopedName]) -> List[List[str]]:
member_defs = [f'( {name} : {get_lean_type(member.cairo_type, namespace, open_namespaces)} )' for (name, member) in struct_def.members.items()]
member_casts = [(name, get_lean_type_cast(member.cairo_type, namespace, open_namespaces, ' ')) for (name, member) in struct_def.members.items()]
struct_name = get_name_in_scope(struct_def.full_name, namespace)
ptr_name = get_name_in_scope(struct_def.full_name, namespace, LEAN_STRUCT_PTR_PREFIX)
offset_name = get_name_in_scope(struct_def.full_name, namespace, LEAN_STRUCT_OFFSET_PREFIX)
cast_name = get_name_in_scope(struct_def.full_name, namespace, LEAN_CAST_PREFIX)
ptr_cast_name = get_name_in_scope(struct_def.full_name, namespace, (LEAN_CAST_PREFIX + LEAN_STRUCT_PTR_PREFIX))
struct_defs = [[f'[ext] structure {struct_name} (F : Type) :=', (' ' + ' '.join(member_defs))], [f'[ext] structure {ptr_name} (F : Type) :=', (' ( _ptr : F ) ' + ' '.join(member_defs))]]
struct_defs += [[f'[reducible] def {offset_name}.{name} := {member.offset}'] for (name, member) in struct_def.members.items()]
struct_defs += [[f'[reducible] def {offset_name}.SIZE := {struct_def.size}']]
struct_defs += [[f'[reducible] def {cast_name} (mem : F F) (p : F) : {struct_name} F := {{', ((' ' + ',\n '.join([f'{m} := {m_cast}mem (p + {offset_name}.{m})' for (m, m_cast) in member_casts])) + '\n}')], [f'[reducible] def {ptr_cast_name} (mem : F F) (p : F) : {ptr_name} F := {{', ((' _ptr := mem p,\n ' + ',\n '.join([f'{m} := {m_cast}mem ((mem p) + {offset_name}.{m})' for (m, m_cast) in member_casts])) + '\n}')], [f'instance {ptr_name}_to_F : has_coe ({ptr_name} F) F := s, s._ptr']]
return struct_defs |
def simImportShape(fileformat, pathAndFilename, options, identicalVerticeTolerance, scalingFactor):
handle = lib.simImportShape(fileformat, pathAndFilename.encode('ascii'), options, identicalVerticeTolerance, scalingFactor)
_check_return(handle)
return handle |
.parametrize('data_dict', [pytest.param('full_spark_dataset', marks=pytest.mark.spark), pytest.param('full_pandas_dataset', marks=pytest.mark.core)])
def test_feature_schema_schema_columns(data_dict, request):
dataset = create_dataset(request.getfixturevalue(data_dict))
assert (dataset.feature_schema.columns == ['user_id', 'item_id', 'timestamp', 'rating', 'gender', 'category_id', 'feature1']) |
def test_tmu_tilde(caplog):
mu = 1.0
model = pyhf.simplemodels.uncorrelated_background([6], [9], [3])
data = ([9] + model.config.auxdata)
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
fixed_params = model.config.suggested_fixed()
par_bounds[model.config.poi_index] = [(- 10), 10]
with caplog.at_level(logging.WARNING, 'pyhf.infer.test_statistics'):
pyhf.infer.test_statistics.tmu_tilde(mu, data, model, init_pars, par_bounds, fixed_params)
assert ('tmu_tilde test statistic used for fit' in caplog.text)
caplog.clear() |
class TestMinisketch(unittest.TestCase):
def construct_data(cls, field_size, num_a_only, num_b_only, num_both):
sample = []
for _ in range(((num_a_only + num_b_only) + num_both)):
while True:
r = random.randrange(1, (1 << field_size))
if (r not in sample):
sample.append(r)
break
full_a = sample[:(num_a_only + num_both)]
full_b = sample[num_a_only:]
random.shuffle(full_a)
random.shuffle(full_b)
return (full_a, full_b)
def field_size_capacity_test(self, field_size, capacity):
used_capacity = random.randrange((capacity + 1))
num_a = random.randrange((used_capacity + 1))
num_both = random.randrange((min((2 * capacity), (((1 << field_size) - 1) - used_capacity)) + 1))
(full_a, full_b) = self.construct_data(field_size, num_a, (used_capacity - num_a), num_both)
sketch_a = Minisketch(field_size, capacity)
sketch_b = Minisketch(field_size, capacity)
for v in full_a:
sketch_a.add(v)
for v in full_b:
sketch_b.add(v)
sketch_combined = sketch_a.clone()
sketch_b_ser = sketch_b.serialize()
sketch_b_received = Minisketch(field_size, capacity)
sketch_b_received.deserialize(sketch_b_ser)
sketch_combined.merge(sketch_b_received)
decode = sketch_combined.decode()
self.assertEqual(decode, sorted((set(full_a) ^ set(full_b))))
def test(self):
for field_size in range(2, 65):
for capacity in [0, 1, 2, 5, 10, field_size]:
self.field_size_capacity_test(field_size, min(capacity, ((1 << field_size) - 1))) |
def main():
print(("Loading train and validate data from '%s'" % opt.data))
train = torch.load((opt.data + '.train.pt'))
valid = torch.load((opt.data + '.valid.pt'))
print((' * number of training sentences: %d' % len(train)))
print((' * maximum batch size: %d' % opt.batch_size))
if opt.train_from:
print(('Loading checkpoint from %s' % opt.train_from))
checkpoint = torch.load(opt.train_from, map_location=(lambda storage, loc: storage))
model_opt = checkpoint['opt']
opt.start_epoch = (checkpoint['epoch'] + 1)
else:
checkpoint = None
model_opt = opt
fields = load_fields(train, valid, checkpoint)
src_features = collect_features(train, fields)
for (j, feat) in enumerate(src_features):
print((' * src feature %d size = %d' % (j, len(fields[feat].vocab))))
model = build_model(model_opt, opt, fields, checkpoint)
print(model)
tally_parameters(model)
check_save_model_path()
optim = build_optim(model, checkpoint)
train_model(model, train, valid, fields, optim, model_opt=model_opt) |
class Robot():
def __init__(self, filename, base_position, base_orientation, initial_joint_positions, max_joint_force, gripper_force, arm_joint_ids, gripper_joint_ids, gripper_joint_limits, tcp_link_id, end_effector_link_id, cid, use_nullspace, max_velocity, use_ik_fast, euler_obs, lower_joint_limits=((- 2.8973), (- 1.7628), (- 2.8973), (- 3.0718), (- 2.8973), (- 0.0175), (- 2.8973)), upper_joint_limits=(2.8973, 1.7628, 2.8973, (- 0.0698), 2.8973, 3.7525, 2.8973), max_rel_pos=0.02, max_rel_orn=0.05, magic_scaling_factor_pos=1, magic_scaling_factor_orn=1, use_target_pose=True, **kwargs):
log.info('Loading robot')
self.cid = cid
self.filename = filename
self.use_nullspace = use_nullspace
self.max_velocity = max_velocity
self.use_ik_fast = use_ik_fast
self.base_position = base_position
self.base_orientation = p.getQuaternionFromEuler(base_orientation)
self.arm_joint_ids = arm_joint_ids
self.initial_joint_positions = np.array(initial_joint_positions)
self.gripper_joint_ids = gripper_joint_ids
self.max_joint_force = max_joint_force
self.gripper_force = gripper_force
self.gripper_joint_limits = gripper_joint_limits
self.tcp_link_id = tcp_link_id
self.prev_ee_orn = p.getQuaternionFromEuler([0, 0, 0])
self.robot_uid = None
self.end_effector_link_id = end_effector_link_id
self.gripper_action = 1
self.ll = self.ul = self.jr = self.rp = None
self.ll_real = np.array(lower_joint_limits)
self.ul_real = np.array(upper_joint_limits)
self.mixed_ik = None
self.euler_obs = euler_obs
self.max_rel_pos = max_rel_pos
self.max_rel_orn = max_rel_orn
self.magic_scaling_factor_pos = magic_scaling_factor_pos
self.magic_scaling_factor_orn = magic_scaling_factor_orn
self.target_pos = None
self.target_orn = None
self.use_target_pose = use_target_pose
def load(self):
self.robot_uid = p.loadURDF(fileName=self.filename, basePosition=self.base_position, baseOrientation=self.base_orientation, useFixedBase=True, physicsClientId=self.cid)
self.add_base_cylinder()
c = p.createConstraint(self.robot_uid, self.gripper_joint_ids[0], self.robot_uid, self.gripper_joint_ids[1], jointType=p.JOINT_GEAR, jointAxis=[1, 0, 0], parentFramePosition=[0, 0, 0], childFramePosition=[0, 0, 0], physicsClientId=self.cid)
p.changeConstraint(c, gearRatio=(- 1), erp=0.1, maxForce=50, physicsClientId=self.cid)
num_dof = p.computeDofCount(self.robot_uid)
self.ll = ([(- 7)] * num_dof)
self.ul = ([7] * num_dof)
self.jr = ([7] * num_dof)
self.rp = (list(self.initial_joint_positions) + ([self.gripper_joint_limits[1]] * 2))
self.reset()
self.mixed_ik = MixedIK(self.robot_uid, self.cid, self.ll_real, self.ul_real, self.base_position, self.base_orientation, self.tcp_link_id, self.ll, self.ul, self.jr, self.rp, self.use_ik_fast, threshold_pos=0.03, threshold_orn=0.1, weights=(10, 8, 6, 6, 2, 2, 1), num_angles=30)
def add_base_cylinder(self):
pos = self.base_position.copy()
pos[2] /= 2
angle = p.getEulerFromQuaternion(self.base_orientation)[2]
pos[0] -= (np.cos(angle) * 0.05)
pos[1] -= (np.sin(angle) * 0.05)
cylinder = p.createVisualShape(shapeType=p.GEOM_CYLINDER, rgbaColor=[1, 1, 1, 1], radius=0.13, length=self.base_position[2], visualFramePosition=pos)
p.createMultiBody(baseVisualShapeIndex=cylinder)
def reset(self, robot_state=None):
if (robot_state is None):
gripper_state = self.gripper_joint_limits[1]
joint_states = self.initial_joint_positions
else:
joint_indices = [i for (i, x) in enumerate(self.get_observation_labels()) if x.startswith('robot_joint')]
joint_states = robot_state[joint_indices]
gripper_state = (robot_state[self.get_observation_labels().index('gripper_opening_width')] / 2)
assert (len(joint_states) == len(self.arm_joint_ids))
for (i, _id) in enumerate(self.arm_joint_ids):
p.resetJointState(self.robot_uid, _id, joint_states[i], physicsClientId=self.cid)
p.setJointMotorControl2(bodyIndex=self.robot_uid, jointIndex=_id, controlMode=p.POSITION_CONTROL, force=self.max_joint_force, targetPosition=joint_states[i], maxVelocity=self.max_velocity, physicsClientId=self.cid)
for i in self.gripper_joint_ids:
p.resetJointState(self.robot_uid, i, gripper_state, physicsClientId=self.cid)
p.setJointMotorControl2(bodyIndex=self.robot_uid, jointIndex=i, controlMode=p.POSITION_CONTROL, force=self.gripper_force, targetPosition=gripper_state, maxVelocity=1, physicsClientId=self.cid)
(tcp_pos, tcp_orn) = p.getLinkState(self.robot_uid, self.tcp_link_id, physicsClientId=self.cid)[:2]
if self.euler_obs:
tcp_orn = p.getEulerFromQuaternion(tcp_orn)
self.target_pos = np.array(tcp_pos)
self.target_orn = np.array(tcp_orn)
def get_observation(self):
(tcp_pos, tcp_orn) = p.getLinkState(self.robot_uid, self.tcp_link_id, physicsClientId=self.cid)[:2]
if self.euler_obs:
tcp_orn = p.getEulerFromQuaternion(tcp_orn)
gripper_opening_width = (p.getJointState(self.robot_uid, self.gripper_joint_ids[0], physicsClientId=self.cid)[0] + p.getJointState(self.robot_uid, self.gripper_joint_ids[1], physicsClientId=self.cid)[0])
arm_joint_states = []
for i in self.arm_joint_ids:
arm_joint_states.append(p.getJointState(self.robot_uid, i, physicsClientId=self.cid)[0])
robot_state = np.array([*tcp_pos, *tcp_orn, gripper_opening_width, *arm_joint_states, self.gripper_action])
robot_info = {'tcp_pos': tcp_pos, 'tcp_orn': tcp_orn, 'gripper_opening_width': gripper_opening_width, 'arm_joint_states': arm_joint_states, 'gripper_action': self.gripper_action, 'uid': self.robot_uid, 'contacts': p.getContactPoints(bodyA=self.robot_uid, physicsClientId=self.cid)}
return (robot_state, robot_info)
def get_observation_labels(self):
tcp_pos_labels = [f'tcp_pos_{ax}' for ax in ('x', 'y', 'z')]
if self.euler_obs:
tcp_orn_labels = [f'tcp_orn_{ax}' for ax in ('x', 'y', 'z')]
else:
tcp_orn_labels = [f'tcp_orn_{ax}' for ax in ('x', 'y', 'z', 'w')]
return [*tcp_pos_labels, *tcp_orn_labels, 'gripper_opening_width', *[f'robot_joint_{i}' for i in self.arm_joint_ids], 'gripper_action']
def relative_to_absolute(self, action):
assert (len(action) == 7)
(rel_pos, rel_orn, gripper) = np.split(action, [3, 6])
rel_pos *= (self.max_rel_pos * self.magic_scaling_factor_pos)
rel_orn *= (self.max_rel_orn * self.magic_scaling_factor_orn)
if self.use_target_pose:
self.target_pos += rel_pos
self.target_orn += rel_orn
return (self.target_pos, self.target_orn, gripper)
else:
(tcp_pos, tcp_orn) = p.getLinkState(self.robot_uid, self.tcp_link_id, physicsClientId=self.cid)[:2]
tcp_orn = p.getEulerFromQuaternion(tcp_orn)
abs_pos = (np.array(tcp_pos) + rel_pos)
abs_orn = (np.array(tcp_orn) + rel_orn)
return (abs_pos, abs_orn, gripper)
def apply_action(self, action):
if (not (len(action) == 3)):
action = self.relative_to_absolute(action)
(target_ee_pos, target_ee_orn, self.gripper_action) = action
assert (len(target_ee_pos) == 3)
assert (len(target_ee_orn) in (3, 4))
if (len(target_ee_orn) == 3):
target_ee_orn = p.getQuaternionFromEuler(target_ee_orn)
if ((not isinstance(self.gripper_action, int)) and (len(self.gripper_action) == 1)):
self.gripper_action = self.gripper_action[0]
assert (self.gripper_action in ((- 1), 1))
jnt_ps = self.mixed_ik.get_ik(target_ee_pos, target_ee_orn)
for i in range(self.end_effector_link_id):
p.setJointMotorControl2(bodyIndex=self.robot_uid, jointIndex=i, controlMode=p.POSITION_CONTROL, force=self.max_joint_force, targetPosition=jnt_ps[i], maxVelocity=self.max_velocity, physicsClientId=self.cid)
self.control_gripper(self.gripper_action)
def control_gripper(self, gripper_action):
if (gripper_action == 1):
gripper_finger_position = self.gripper_joint_limits[1]
gripper_force = (self.gripper_force / 100)
else:
gripper_finger_position = self.gripper_joint_limits[0]
gripper_force = self.gripper_force
for id in self.gripper_joint_ids:
p.setJointMotorControl2(bodyIndex=self.robot_uid, jointIndex=id, controlMode=p.POSITION_CONTROL, targetPosition=gripper_finger_position, force=gripper_force, maxVelocity=1, physicsClientId=self.cid)
def serialize(self):
return {'uid': self.robot_uid, 'info': p.getBodyInfo(self.robot_uid, physicsClientId=self.cid), 'pose': p.getBasePositionAndOrientation(self.robot_uid, physicsClientId=self.cid), 'joints': p.getJointStates(self.robot_uid, list(range(p.getNumJoints(self.robot_uid, physicsClientId=self.cid))), physicsClientId=self.cid), 'gripper_action': self.gripper_action}
def reset_from_storage(self, data):
p.resetBasePositionAndOrientation(bodyUniqueId=self.robot_uid, posObj=data['pose'][0], ornObj=data['pose'][1], physicsClientId=self.cid)
num_joints = len(data['joints'])
assert (num_joints == p.getNumJoints(self.robot_uid, physicsClientId=self.cid))
for (i, (value, velocity, *_)) in enumerate(data['joints']):
p.resetJointState(bodyUniqueId=self.robot_uid, jointIndex=i, targetValue=value, targetVelocity=velocity, physicsClientId=self.cid)
p.setJointMotorControl2(bodyIndex=self.robot_uid, jointIndex=i, controlMode=p.POSITION_CONTROL, force=self.max_joint_force, targetPosition=value, maxVelocity=self.max_velocity, physicsClientId=self.cid)
self.control_gripper(data['gripper_action'])
def __str__(self):
return f'{self.filename} : {self.__dict__}' |
def _check_valid_values(data: str) -> bool:
not_valid = ((data in NULL_VALUES) or pd.isna(data))
return (not not_valid) |
def create_data_module(env: gym.Env, env_name: str, rollout_directory: str, batch_size: int=256, val_frac: float=0.1, unconditional_policy: bool=False, reward_conditioning: bool=False, average_reward_to_go: bool=True, seed: Optional[int]=None) -> AbstractDataModule:
if (unconditional_policy and reward_conditioning):
raise ValueError('Cannot condition on reward with an unconditional policy.')
if (env_name in step.d4rl_env_names):
if unconditional_policy:
data_module = D4RLBCDataModule(env, batch_size=batch_size, val_frac=val_frac, seed=seed)
elif reward_conditioning:
data_module = D4RLRvSRDataModule(env, batch_size=batch_size, val_frac=val_frac, average_reward_to_go=average_reward_to_go, seed=seed)
else:
data_module = D4RLRvSGDataModule(env, batch_size=batch_size, seed=seed, val_frac=val_frac)
elif unconditional_policy:
raise NotImplementedError
else:
data_module = GCSLDataModule(rollout_directory, batch_size=batch_size, val_frac=val_frac, seed=seed, num_workers=os.cpu_count())
return data_module |
def tf_efficientnet_b5_ap(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model |
def _set_components_and_inputs(parser, args):
args.components = []
if (args.translate or args.run_all):
args.components.append('translate')
if (args.preprocess or args.run_all):
args.components.append('preprocess')
if (args.search or args.run_all):
args.components.append('search')
if (not args.components):
_set_components_automatically(parser, args)
if (args.validate or (args.debug and (len(args.components) == 2))):
args.components.append('validate')
args.translate_inputs = []
args.preprocess_input = 'output.sas'
args.search_input = 'output.sas'
assert args.components
first = args.components[0]
num_files = len(args.filenames)
if (first == 'translate'):
if (('--help' in args.translate_options) or ('-h' in args.translate_options)):
args.translate_inputs = []
elif (num_files == 1):
(task_file,) = args.filenames
domain_file = util.find_domain_filename(task_file)
args.translate_inputs = [domain_file, task_file]
elif (num_files == 2):
args.translate_inputs = args.filenames
else:
print_usage_and_exit_with_driver_input_error(parser, 'translator needs one or two input files')
elif (first == 'preprocess'):
if ('--help' in args.preprocess_options):
args.preprocess_input = None
elif (num_files == 1):
(args.preprocess_input,) = args.filenames
else:
parser.error('preprocessor needs exactly one input file')
elif (first == 'search'):
if ('--help' in args.search_options):
args.search_input = None
elif (num_files == 1):
(args.search_input,) = args.filenames
else:
print_usage_and_exit_with_driver_input_error(parser, 'search needs exactly one input file')
else:
assert False, first |
class MarkdownParser(ParserStrategy):
def read(self, file_path: str) -> str:
with open(file_path, 'r') as f:
html = markdown.markdown(f.read())
text = ''.join(BeautifulSoup(html, 'html.parser').findAll(string=True))
return text |
def performance_fit(y_label, y_output):
y_output_logistic = fit_function(y_label, y_output)
PLCC = stats.pearsonr(y_output_logistic, y_label)[0]
SRCC = stats.spearmanr(y_output, y_label)[0]
KRCC = stats.stats.kendalltau(y_output, y_label)[0]
RMSE = np.sqrt(((y_output_logistic - y_label) ** 2).mean())
return (PLCC, SRCC, KRCC, RMSE) |
def generate_lemp_decision_rule_table(lemp_decision_rule_df):
csv_fname = 'lemp-decision-rule.csv'
with open(csv_fname, 'w') as csv_out:
print('model,K,avg_num_items_visited,num_users,num_items,mm_time,lemp_time', file=csv_out)
for (_, row) in lemp_decision_rule_df.iterrows():
model = row['model']
K = row['K']
avg_num_items_visited = (row['num_comparisons'] / row['num_users'])
num_users = (480189 if ('Netflix' in model) else (1823179 if ('R2' in model) else 100990))
num_items = row['num_items']
mm_time = blocked_mm_df.query(('model == "%s" and K == %d' % (model, K)))['comp_time'].min()
lemp_time = lemp_df.query(('model == "%s" and K == %d' % (model, K)))['comp_time'].min()
print(('%s,%d,%d,%d,%d,%f,%f' % (model, K, avg_num_items_visited, num_users, num_items, mm_time, lemp_time)), file=csv_out)
return pd.read_csv(csv_fname).sort_values(by=['model', 'K']) |
def main():
start_time = time.time()
threads = []
lock = threading.Lock()
for snr_idx in range(len(snrs)):
tx_payload_file = './data/tx_payload{}.txt'.format(snr_idx)
rx_payload_file = './data/rx_payload{}.txt'.format(snr_idx)
rx_crc_file = './data/rx_crc_valid{}.txt'.format(snr_idx)
f = open(tx_payload_file, 'w')
letters = string.ascii_lowercase
for i in range(n_frames):
payload = ''.join((random.choice(letters) for i in range(pay_len)))
f.write((payload + ','))
f.close()
t = Thread(target=run_exp, args=(lock, snr_idx, tx_payload_file, rx_payload_file, rx_crc_file))
threads.append(t)
t.start()
for t in threads:
t.join()
print(('--- Simulation time: %s seconds ---' % (time.time() - start_time)))
plt.figure()
plt.semilogy(snrs, FER, '-d', label='FER')
plt.semilogy(snrs, Glob_FER, '-d', label='FER including frame miss')
plt.xlabel('SNR [dB]')
plt.ylabel('Error rate')
plt.ylim([(10 / n_frames), 1.05])
plt.xlim([min(snrs), max(snrs)])
plt.grid('minor')
plt.legend(loc='upper right')
curve_name = 'samp{}_bw{}_sf{}_cr{}_payLen{}_clk_offset_ppm{}_soft{}_ldro{}'.format(samp_rate, bw, sf, cr, pay_len, clk_offset_ppm, soft_decoding, ldro)
plt.savefig((('results/' + curve_name) + '.png'))
with open((('results/' + curve_name) + '.pkl'), 'wb') as f:
pickle.dump([snrs, FER, Glob_FER], f)
plt.show() |
_task('speech_recognition')
class SpeechRecognitionTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--silence-token', default='', help='token for silence (used by w2l)')
parser.add_argument('--max-source-positions', default=sys.maxsize, type=int, metavar='N', help='max number of frames in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
def setup_task(cls, args, **kwargs):
dict_path = os.path.join(args.data, 'dict.txt')
if (not os.path.isfile(dict_path)):
raise FileNotFoundError('Dict not found: {}'.format(dict_path))
tgt_dict = Dictionary.load(dict_path)
if (args.criterion == 'ctc_loss'):
tgt_dict.add_symbol('<ctc_blank>')
elif (args.criterion == 'asg_loss'):
for i in range(1, (args.max_replabel + 1)):
tgt_dict.add_symbol(replabel_symbol(i))
print('| dictionary: {} types'.format(len(tgt_dict)))
return cls(args, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
data_json_path = os.path.join(self.args.data, '{}.json'.format(split))
self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
def build_generator(self, models, args):
w2l_decoder = getattr(args, 'w2l_decoder', None)
if (w2l_decoder == 'viterbi'):
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.target_dictionary)
elif (w2l_decoder == 'kenlm'):
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, self.target_dictionary)
else:
return super().build_generator(models, args)
def target_dictionary(self):
return self.tgt_dict
def source_dictionary(self):
return None
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions) |
class DefectInputFeatures(object):
def __init__(self, example_id, source_ids, label):
self.example_id = example_id
self.source_ids = source_ids
self.label = label |
class SpeciesWrapper(CombinatorialClass):
def __init__(self, species, labels, iterator, generating_series, name, structure_class):
self._species = species
self._labels = labels
self._iterator = iterator
self._generating_series = generating_series
self._name = ('%s for %s with labels %s' % (name, species, labels))
self._structure_class = (structure_class if (structure_class is not None) else species._default_structure_class)
def labels(self):
return copy(self._labels)
def __iter__(self):
if ((self._species._min is not None) and (len(self._labels) < self._species._min)):
return iter([])
if ((self._species._max is not None) and (len(self._labels) >= self._species._max)):
return iter([])
try:
if (self.cardinality() == 0):
return iter([])
except TypeError:
raise NotImplementedError
return getattr(self._species, self._iterator)(self._structure_class, self._labels)
def cardinality(self):
return getattr(self._species, self._generating_series)().count(len(self._labels)) |
class qcdevice():
def __init__(self, name: str, nqubits: int=None, connection: list=None, swap_duration: int=None, fmeas: list=None, fsingle: list=None, ftwo: list=None):
if (not isinstance(name, str)):
raise TypeError('name should be a string.')
if (nqubits is not None):
if (not isinstance(nqubits, int)):
raise TypeError('nqubits should be an integer.')
if (swap_duration is not None):
if (not isinstance(swap_duration, int)):
raise TypeError('swap_duration should be an integer.')
if (connection is not None):
if (not isinstance(connection, (list, tuple))):
raise TypeError('connection should be a list or tuple.')
else:
for edge in connection:
if (not isinstance(edge, (list, tuple))):
raise TypeError(f'{edge} is not a list or tuple.')
elif (len(edge) != 2):
raise TypeError(f'{edge} does not connect two qubits.')
if (not isinstance(edge[0], int)):
raise TypeError(f'{edge[0]} is not an integer.')
if (not isinstance(edge[1], int)):
raise TypeError(f'{edge[1]} is not an integer.')
if (fmeas is not None):
if (not isinstance(fmeas, (list, tuple))):
raise TypeError('fmeas should be a list or tuple.')
else:
for fmeas_i in fmeas:
if (not isinstance(fmeas_i, (int, float))):
raise TypeError(f'{fmeas_i} is not a number.')
if (fsingle is not None):
if (not isinstance(fsingle, (list, tuple))):
raise TypeError('fsingle should be a list or tuple.')
else:
for fsingle_i in fsingle:
if (not isinstance(fsingle_i, (int, float))):
raise TypeError(f'{fsingle_i} is not a number.')
if (ftwo is not None):
if (not isinstance(ftwo, (list, tuple))):
raise TypeError('ftwo should be a list or tuple.')
else:
for ftwo_i in ftwo:
if (not isinstance(ftwo_i, (int, float))):
raise TypeError(f'{ftwo_i} is not a number.')
if name.startswith('default_'):
f = pkgutil.get_data(__name__, (('devices/' + name) + '.json'))
data = json.loads(f)
self.name = data['name']
self.count_physical_qubit = data['count_physical_qubit']
self.list_qubit_edge = tuple((tuple(edge) for edge in data['list_qubit_edge']))
self.swap_duration = data['swap_duration']
if ('list_fidelity_measure' in data):
self.list_fidelity_measure = tuple(data['list_fidelity_measure'])
if ('list_fidelity_single' in data):
self.list_fidelity_single = tuple(data['list_fidelity_single'])
if ('list_fidelity_two' in data):
self.list_fidelity_two = tuple(data['list_fidelity_two'])
else:
self.name = name
if (nqubits is not None):
self.count_physical_qubit = nqubits
if ('count_physical_qubit' not in self.__dict__):
raise AttributeError('No physical qubit count specified.')
if (connection is not None):
for edge in connection:
if ((edge[0] < 0) or (edge[0] >= self.count_physical_qubit)):
raise ValueError(f'{edge[0]} is outside of range [0, {self.count_physical_qubit}).')
if ((edge[1] < 0) or (edge[1] >= self.count_physical_qubit)):
raise ValueError(f'{edge[1]} is outside of range [0, {self.count_physical_qubit}).')
self.list_qubit_edge = tuple((tuple(edge) for edge in connection))
if ('list_qubit_edge' not in self.__dict__):
raise AttributeError('No edge set is specified.')
if (swap_duration is not None):
self.swap_duration = swap_duration
else:
self.swap_duration = 3
if (fmeas is not None):
if (len(fmeas) != self.count_physical_qubit):
raise ValueError(f'fmeas should have {self.count_physical_qubit} data.')
self.list_fidelity_measure = tuple(fmeas)
if ('list_fidelity_measure' not in self.__dict__):
self.list_fidelity_measure = tuple((1 for _ in range(self.count_physical_qubit)))
if (fsingle is not None):
if (len(fsingle) != self.count_physical_qubit):
raise ValueError(f'fsingle should have {self.count_physical_qubit} data.')
self.list_fidelity_single = tuple(fsingle)
if ('list_fidelity_single' not in self.__dict__):
self.list_fidelity_single = tuple((1 for _ in range(self.count_physical_qubit)))
if (ftwo is not None):
if (len(ftwo) != len(self.list_qubit_edge)):
raise ValueError(f'ftwo should have {len(self.list_qubit_edge)} data.')
self.list_fidelity_two = tuple(ftwo)
if ('list_fidelity_two' not in self.__dict__):
self.list_fidelity_two = tuple((1 for _ in range(len(self.list_qubit_edge)))) |
def symbolic_expression(x):
from sage.symbolic.expression import Expression
from sage.symbolic.ring import SR
from sage.modules.free_module_element import is_FreeModuleElement
from sage.structure.element import is_Matrix
if isinstance(x, Expression):
return x
elif hasattr(x, '_symbolic_'):
return x._symbolic_(SR)
elif (isinstance(x, (tuple, list)) or is_FreeModuleElement(x)):
expressions = [symbolic_expression(item) for item in x]
if (not expressions):
return vector(SR, 0)
if is_FreeModuleElement(expressions[0]):
return matrix(expressions)
return vector(expressions)
elif is_Matrix(x):
if ((not x.nrows()) or (not x.ncols())):
return matrix(SR, x.nrows(), x.ncols())
rows = [symbolic_expression(row) for row in x.rows()]
return matrix(rows)
elif callable(x):
from inspect import signature, Parameter
try:
s = signature(x)
except ValueError:
pass
else:
if all(((param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)) for param in s.parameters.values())):
vars = [SR.var(name) for name in s.parameters.keys()]
result = x(*vars)
if isinstance(result, (tuple, list)):
return vector(SR, result).function(*vars)
else:
return SR(result).function(*vars)
return SR(x) |
class OPRA(Dataset):
root = 'datasets/opra'
num_actions = 7
actions = ['hold', 'touch', 'rotate', 'push', 'pull', 'pick up', 'put down']
def __init__(self, split, clip_length_in_frames, frames_between_clips, frame_rate, resize):
super().__init__()
self.resize = resize
self.training = (split == 'train')
annotation_path = os.path.join(self.root, f'annotations/{split}.json')
self.annos = json.load(open(annotation_path))
if (not self.training):
self.collect = getattr(self, f'collect_{split}')
self.accumulate = getattr(self, f'accumulate_{split}')
self.evaluate = getattr(self, f'evaluate_{split}')
eval_gt_heatmaps_path = os.path.join(self.root, f'annotations/{split}_gt_heatmaps.pt')
eval_gt_actions_path = os.path.join(self.root, f'annotations/{split}_gt_actions.pt')
if os.path.exists(eval_gt_heatmaps_path):
self.eval_gt_heatmaps = torch.load(eval_gt_heatmaps_path)
self.eval_gt_actions = torch.load(eval_gt_actions_path)
print(f'load cached {split}_gt_heatmaps.pt and {split}_gt_actions.pt ...')
else:
self.eval_gt_heatmaps = {}
self.eval_gt_actions = {}
print(f'compute and cache {split}_gt_heatmaps.pt and {split}_gt_actions.pt ...')
for (video_idx, anno) in tqdm.tqdm(enumerate(self.annos)):
(heatmap, action) = self.make_gt(anno)
self.eval_gt_heatmaps[video_idx] = heatmap
self.eval_gt_actions[video_idx] = action
if (torch.cuda.current_device() == 0):
torch.save(self.eval_gt_heatmaps, eval_gt_heatmaps_path)
torch.save(self.eval_gt_actions, eval_gt_actions_path)
video_paths = [anno['video_path'] for anno in self.annos]
meta_path = os.path.join(self.root, f'annotations/{split}_meta.pt')
if os.path.exists(meta_path):
self.video_clips = UnevenVideoClips(video_paths, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, frame_rate=frame_rate, _precomputed_metadata=torch.load(meta_path), num_workers=4, output_format='TCHW')
print('load cached _precomputed_metadata')
else:
self.video_clips = UnevenVideoClips(video_paths, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, frame_rate=frame_rate, num_workers=4, output_format='TCHW')
if (torch.cuda.current_device() == 0):
torch.save(self.video_clips.metadata, meta_path)
def to_heatmaps(pointmaps, k_ratio=3.0, offset=1e-10):
(c, h, w) = pointmaps.shape
k = int((math.sqrt((h * w)) / k_ratio))
if ((k % 2) == 0):
k += 1
heatmaps = gaussian_blur((pointmaps + offset), (k, k))
return heatmaps
def make_gt(anno):
actions = (torch.tensor(anno['actions']) - 1)
pointmaps = torch.zeros(OPRA.num_actions, anno['height'], anno['width'])
for (points, action) in zip(anno['heatmaps'], actions):
(x, y) = torch.tensor(points).long().hsplit(2)
pointmaps[(action, y, x)] = 1
actions = actions.unique(sorted=False)
pointmaps = pointmaps[actions]
heatmaps = OPRA.to_heatmaps(pointmaps)
return (heatmaps, actions)
def __getitem__(self, idx):
(frames, video_idx) = self.video_clips.get_clip(idx)
anno = self.annos[video_idx]
image = read_image(anno['image_path'], ImageReadMode.RGB)
image = self.resize(image)
frames = self.resize(frames)
if self.training:
(heatmaps, actions) = OPRA.make_gt(anno)
if (torch.rand(1) < 0.5):
(image, frames, heatmaps) = (hflip(image), hflip(frames), hflip(heatmaps))
return (image, frames, len(frames), video_idx, heatmaps, actions)
else:
return (image, frames, len(frames), video_idx, None, None)
def collate_fn(x):
(images, videos, num_frames_list, indices, heatmaps, actions) = zip(*x)
return (torch.stack(images), torch.cat(videos), num_frames_list, indices, heatmaps, actions)
def collect_test(self, predictions, batch):
(heatmaps, actions) = predictions
video_idxs = batch[(- 3)]
heatmaps = heatmaps.cpu()
actions = actions.cpu()
predictions = [{'video_idx': video_idx, 'heatmap': heatmap[self.eval_gt_actions[video_idx]], 'action': action} for (video_idx, heatmap, action) in zip(video_idxs, heatmaps, actions)]
return predictions
def accumulate_test(self, predictions):
predictions = sum(predictions, [])
accumulated_predictions = ([None] * torch.distributed.get_world_size())
torch.distributed.all_gather_object(accumulated_predictions, predictions)
accumulated_predictions = sum(accumulated_predictions, [])
return accumulated_predictions
def evaluate_test(self, predictions):
(heatmaps, actions) = ({}, {})
print('merge predictions with same video_idx')
for prediction in tqdm.tqdm(predictions):
video_idx = prediction['video_idx']
if (video_idx not in heatmaps):
heatmaps[video_idx] = prediction['heatmap']
actions[video_idx] = prediction['action']
else:
heatmaps[video_idx] += prediction['heatmap']
actions[video_idx] += prediction['action']
video_idxs = heatmaps.keys()
heatmaps = torch.cat(list(heatmaps.values()))
gt_heatmaps = torch.cat([self.eval_gt_heatmaps[video_idx] for video_idx in video_idxs]).view_as(heatmaps)
actions = list(actions.values())
actions_gt = [self.eval_gt_actions[video_idx] for video_idx in video_idxs]
count = len(heatmaps)
heatmaps = heatmaps.cuda()
gt_heatmaps = gt_heatmaps.cuda()
from .metrics import KLD, SIM, AUC_Judd
kld = (KLD(heatmaps, gt_heatmaps).item() / count)
sim = (SIM(heatmaps, gt_heatmaps).item() / count)
auc_judd = []
for (p, g) in zip(heatmaps, gt_heatmaps):
_auc_judd = AUC_Judd(p, g)
if (_auc_judd >= 0):
auc_judd.append(_auc_judd)
auc_judd = (sum(auc_judd).item() / len(auc_judd))
(count, top1_acc) = (0, 0)
for (action, action_gt) in zip(actions, actions_gt):
for a in action.float().topk(len(action_gt)).indices.tolist():
if (a in action_gt):
top1_acc += 1
count += len(action_gt)
top1_acc /= count
return dict(kld=kld, sim=sim, auc_judd=auc_judd, top1_acc=top1_acc)
def __len__(self):
return self.video_clips.num_clips() |
class LALR_Parser(Serialize):
def __init__(self, parser_conf, debug=False):
analysis = LALR_Analyzer(parser_conf, debug=debug)
analysis.compute_lalr()
callbacks = parser_conf.callbacks
self._parse_table = analysis.parse_table
self.parser_conf = parser_conf
self.parser = _Parser(analysis.parse_table, callbacks, debug)
def deserialize(cls, data, memo, callbacks, debug=False):
inst = cls.__new__(cls)
inst._parse_table = IntParseTable.deserialize(data, memo)
inst.parser = _Parser(inst._parse_table, callbacks, debug)
return inst
def serialize(self, memo):
return self._parse_table.serialize(memo)
def parse_interactive(self, lexer, start):
return self.parser.parse(lexer, start, start_interactive=True)
def parse(self, lexer, start, on_error=None):
try:
return self.parser.parse(lexer, start)
except UnexpectedInput as e:
if (on_error is None):
raise
while True:
if isinstance(e, UnexpectedCharacters):
s = e.interactive_parser.lexer_state.state
p = s.line_ctr.char_pos
if (not on_error(e)):
raise e
if isinstance(e, UnexpectedCharacters):
if (p == s.line_ctr.char_pos):
s.line_ctr.feed(s.text[p:(p + 1)])
try:
return e.interactive_parser.resume_parse()
except UnexpectedToken as e2:
if (isinstance(e, UnexpectedToken) and (e.token.type == e2.token.type == '$END') and (e.interactive_parser == e2.interactive_parser)):
raise e2
e = e2
except UnexpectedCharacters as e2:
e = e2 |
class UnaryOpTest(serial.SerializedTestCase):
def _test_unary_op(self, opname, X, rtol=1e-05, atol=1e-08):
workspace.ResetWorkspace()
pred_net = caffe2_pb2.NetDef()
pred_net.name = 'pred'
pred_net.external_input.append('X')
pred_net.external_output.append('Y')
pred_net.op.add().CopyFrom(core.CreateOperator(opname, ['X'], ['Y']))
ref_net = caffe2_pb2.NetDef()
ref_net.name = 'ref'
ref_net.external_input.append('X')
ref_net.external_output.append('Y')
ref_net.op.add().CopyFrom(core.CreateOperator((opname + 'FakeFp16NNPI'), ['X'], ['Y']))
print('REF NET = {}'.format(ref_net))
shape_hints = {'X': X.shape}
pred_net_onnxified = onnxifi_caffe2_net(pred_net, shape_hints, debug=True, adjust_batch=False, use_onnx=False)
num_onnxified_ops = sum(((1 if (o.type == 'Onnxifi') else 0) for o in pred_net_onnxified.op))
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.SwitchWorkspace('glow_test_ws', True)
workspace.FeedBlob('X', X)
workspace.CreateNet(ref_net)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob('Y')
if (not np.allclose(Y_c2, Y_glow, rtol=atol, atol=atol)):
diff = np.abs((Y_c2 - Y_glow))
np.save((('/tmp/' + opname) + 'diff'), diff)
np.save((('/tmp/' + opname) + 'result'), Y_c2)
print_test_debug_info(opname, {'X': X, 'Y_c2': Y_c2, 'Y_glow': Y_glow, 'diff': diff})
assert 0
return Y_glow
def _test_op_w_ulp_error(self, seed, opname, regions, atol=0, err_threshold=2):
ulp_err = 0
for (x0, x1) in regions:
X = np.linspace(x0, x1, num=1025, dtype=np.float16).astype(np.float32)
Y_glow = self._test_unary_op(opname, X, atol=atol)
region_err = compute_ulp_error(opname, X, Y_glow)
ulp_err = max(np.max(np.abs(region_err)), ulp_err)
if (ulp_err > err_threshold):
print('{} Op detected ulp_err={}'.format(opname, ulp_err))
assert 0
(seed=st.integers(0, 65534))
(deadline=None)
def test_sigmoid(self, seed):
np.random.seed(seed)
opname = 'Sigmoid'
regions = [[(- 8.0), (- 4.0)], [(- 4.0), (- 2.0)], [(- 2.0), (- 1.0)], [(- 1.0), (- 0.5)], [(- 0.5), (- 0.25)], [(- 0.25), 0.25], [0.25, 0.5], [0.5, 1.0], [1.0, 2.0], [2.0, 4.0], [4.0, 8.0]]
self._test_op_w_ulp_error(seed, opname, regions, atol=0, err_threshold=2.5)
(seed=st.integers(0, 65534))
(deadline=None)
def test_tanh(self, seed):
np.random.seed(seed)
opname = 'Tanh'
regions = [[(2.0 ** (- 9)), (2.0 ** (- 8))], [(2.0 ** (- 8)), (2.0 ** (- 7))], [(2.0 ** (- 7)), (2.0 ** (- 6))], [(2.0 ** (- 6)), (2.0 ** (- 5))], [(2.0 ** (- 5)), (2.0 ** (- 4))], [(2.0 ** (- 4)), (2.0 ** (- 3))], [(2.0 ** (- 3)), (2.0 ** (- 2))], [(2.0 ** (- 2)), (2.0 ** (- 1))], [(2.0 ** (- 1)), 1.0], [1.0, 2.0], [2.0, 4.0], [4.0, 8.0]]
self._test_op_w_ulp_error(seed, opname, regions, atol=0, err_threshold=2)
(seed=st.integers(0, 65534))
(deadline=None)
def test_swish(self, seed):
np.random.seed(seed)
opname = 'Swish'
regions = [[(- 20.5), (- 11.0)], [(- 11.0), (- 8.0)], [(- 8.0), (- 1.0)], [(- 1.0), (- 0.1)], [((- 1.0) / 8.0), (1.0 / 8.0)], [(1.0 / 8), 5.0], [5.0, 8.0]]
self._test_op_w_ulp_error(seed, opname, regions, atol=0.008, err_threshold=384)
(seed=st.integers(0, 65534))
(deadline=None)
def test_logit(self, seed):
np.random.seed(seed)
workspace.ResetWorkspace()
n = 1
m = 15361
X = np.linspace(0, 1, num=m, dtype=np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = 'pred'
pred_net.external_input.append('X')
pred_net.external_output.append('Y')
pred_net.op.add().CopyFrom(core.CreateOperator('Logit', ['X'], ['Y'], eps=1e-06))
ref_net = caffe2_pb2.NetDef()
ref_net.name = 'ref'
ref_net.external_input.append('X')
ref_net.external_output.append('Y')
ref_net.op.add().CopyFrom(core.CreateOperator('LogitFakeFp16NNPI', ['X'], ['Y'], eps=1e-06))
print('REF NET = {}'.format(ref_net))
shape_hints = {'X': (n, m)}
pred_net_onnxified = onnxifi_caffe2_net(pred_net, shape_hints, debug=True, adjust_batch=False, use_onnx=False)
num_onnxified_ops = sum(((1 if (o.type == 'Onnxifi') else 0) for o in pred_net_onnxified.op))
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.SwitchWorkspace('glow_test_ws', True)
workspace.FeedBlob('X', X)
workspace.CreateNet(ref_net)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow))
if (np.nanmax(diff) > 0.009):
np.save('/tmp/logit_diff', diff)
np.save('/tmp/logit_result', Y_c2)
print_test_debug_info('Logit', {'X': X, 'Y_c2': Y_c2, 'Y_glow': Y_glow, 'diff': diff})
assert 0 |
class TestWeighting():
def test_zero_on_xaxis(self):
pim = PersImage()
wf = pim.weighting()
assert (wf([1, 0]) == 0)
assert (wf([100, 0]) == 0)
assert (wf([99, 1.4]) == 1.4)
def test_scales(self):
pim = PersImage()
wf = pim.weighting(np.array([[0, 1], [1, 2], [3, 4]]))
assert (wf([1, 0]) == 0)
assert (wf([1, 4]) == 1)
assert (wf([1, 2]) == 0.5) |
def draw_bounding_boxes_on_image(image, boxes, color='red', thickness=4, display_str_list_list=()):
boxes_shape = boxes.shape
if (not boxes_shape):
return
if ((len(boxes_shape) != 2) or (boxes_shape[1] != 4)):
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[(i, 0)], boxes[(i, 1)], boxes[(i, 2)], boxes[(i, 3)], color, thickness, display_str_list) |
.unit
.convert
def test_imread_default():
helpers.setup(with_data=True)
test_file = os.path.join(helpers.TEST_PATH, 'test_tiling_image.jpg')
expected_array = np.flipud(Image.open(test_file))
empty_array = np.zeros([256, 256])
actual_array = convert.imread_default(test_file, empty_array)
helpers.tear_down()
np.testing.assert_equal(expected_array, actual_array) |
def cauchy(v, z, w, conj=False):
expr = 'ComplexDivide(v, z-w)'
cauchy_mult = Genred(expr, ['v = Vj(2)', 'z = Vi(2)', 'w = Vj(2)'], reduction_op='Sum', axis=1)
if conj:
v = _conj(v)
w = _conj(w)
(v, z, w) = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = cauchy_mult(v, z, w, backend='GPU')
return _r2c(r) |
def sample_from_model(sess):
x_gen = np.random.normal(0.0, 1.0, ((args.sample_batch_size,) + obs_shape))
new_x_gen_np = sess.run(new_x_gen, {x_sample: x_gen})
return new_x_gen_np |
def _assert_n_smooth(x, n):
x_orig = x
if (n < 2):
assert False
while True:
(q, r) = divmod(x, 2)
if (r != 0):
break
x = q
for d in range(3, (n + 1), 2):
while True:
(q, r) = divmod(x, d)
if (r != 0):
break
x = q
assert (x == 1), f'x={x_orig} is not {n}-smooth, remainder={x}' |
class BiGRU(BaseBiRNN):
def __init__(self, hidden_units, name='BiGRU'):
super(BiGRU, self).__init__(name)
self.fw_cell = tf.nn.rnn_cell.GRUCell(hidden_units, name=(name + '_fw_cell'))
self.bw_cell = tf.nn.rnn_cell.GRUCell(hidden_units, name=(name + '_bw_cell')) |
class VariationalAutoencoder(Autoencoder):
def __init__(self, encoder, decoder, mean, log_var, len_dim=1, latent_padding=None, mask_latent=True, mask_out=True, out_mask_value=0.0, latent_mask_value=0.0, latent_stochastic=True):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.mean = mean
self.log_var = log_var
self.len_dim = len_dim
self.latent_padding = latent_padding
self.mask_latent = mask_latent
self.mask_out = mask_out
self.out_mask_value = out_mask_value
self.latent_mask_value = latent_mask_value
self.latent_stochastic = latent_stochastic
def encode(self, x, length=None):
encoder_out = self.encoder(x)
return self.mean(encoder_out)
def decode(self, latent):
return self.decoder(latent)
def reparameterize(self, mean, log_var):
epsilon = torch.randn_like(log_var)
return (mean + (epsilon * torch.exp((0.5 * log_var))))
def train_sample(self, x, length=None, out_mask_value=None, latent_mask_value=None):
if (out_mask_value is None):
out_mask_value = self.out_mask_value
if (latent_mask_value is None):
latent_mask_value = self.latent_mask_value
encoder_out = self.encoder(x)
mean = self.mean(encoder_out)
log_var = self.log_var(encoder_out)
latent_sample = self.reparameterize(mean, log_var)
if (self.latent_padding is not None):
(latent_sample, latent_length) = self.latent_padding(latent_sample, length=length)
else:
latent_length = length
if (self.mask_latent and (length is not None)):
latent_sample = clean_padding(latent_sample, latent_length, self.len_dim, latent_mask_value)
x_rec = self.decode(latent_sample)
x_rec = trim_as(x_rec, x)
if (self.mask_out and (length is not None)):
x_rec = clean_padding(x_rec, length, self.len_dim, out_mask_value)
if self.latent_stochastic:
latent = latent_sample
else:
(latent, latent_length) = self.latent_padding(mean, length=length)
return VariationalAutoencoderOutput(x_rec, latent, mean, log_var, latent_sample, latent_length) |
class DeviceTypeTestBase(TestCase):
device_type: str = 'generic_device_type'
_stop_test_suite = False
_tls = threading.local()
_tls.precision = TestCase._precision
_tls.rel_tol = TestCase._rel_tol
def precision(self):
return self._tls.precision
def precision(self, prec):
self._tls.precision = prec
def rel_tol(self):
return self._tls.rel_tol
_tol.setter
def rel_tol(self, prec):
self._tls.rel_tol = prec
def get_primary_device(cls):
return cls.device_type
def get_all_devices(cls):
return [cls.get_primary_device()]
def _get_dtypes(cls, test):
if (not hasattr(test, 'dtypes')):
return None
return test.dtypes.get(cls.device_type, test.dtypes.get('all', None))
def _get_precision_override(self, test, dtype):
if (not hasattr(test, 'precision_overrides')):
return self.precision
return test.precision_overrides.get(dtype, self.precision)
def _get_tolerance_override(self, test, dtype):
if (not hasattr(test, 'tolerance_overrides')):
return (self.precision, self.rel_tol)
return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
def _apply_precision_override_for_test(self, test, param_kwargs):
dtype = (param_kwargs['dtype'] if ('dtype' in param_kwargs) else None)
dtype = (param_kwargs['dtypes'] if ('dtypes' in param_kwargs) else dtype)
if dtype:
self.precision = self._get_precision_override(test, dtype)
(self.precision, self.rel_tol) = self._get_tolerance_override(test, dtype)
def instantiate_test(cls, name, test, *, generic_cls=None):
def instantiate_test_helper(cls, name, *, test, param_kwargs=None):
(test)
def instantiated_test(self, param_kwargs=param_kwargs):
param_kwargs = ({} if (param_kwargs is None) else param_kwargs)
test_sig_params = inspect.signature(test).parameters
if (('device' in test_sig_params) or ('devices' in test_sig_params)):
device_arg: str = cls.get_primary_device()
if hasattr(test, 'num_required_devices'):
device_arg = cls.get_all_devices()
_update_param_kwargs(param_kwargs, 'device', device_arg)
guard_precision = self.precision
guard_rel_tol = self.rel_tol
try:
self._apply_precision_override_for_test(test, param_kwargs)
result = test(self, **param_kwargs)
except RuntimeError as rte:
self._stop_test_suite = self._should_stop_test_suite()
raise rte
finally:
self.precision = guard_precision
self.rel_tol = guard_rel_tol
return result
assert (not hasattr(cls, name)), 'Redefinition of test {0}'.format(name)
setattr(cls, name, instantiated_test)
def default_parametrize_fn(test, generic_cls, cls):
test_suffix = cls.device_type
(yield (test, test_suffix, {}))
parametrize_fn = (test.parametrize_fn if hasattr(test, 'parametrize_fn') else default_parametrize_fn)
for (test, test_suffix, param_kwargs) in parametrize_fn(test, generic_cls, cls):
if (hasattr(test, 'handles_dtypes') and test.handles_dtypes):
full_name = '{}_{}'.format(name, test_suffix)
instantiate_test_helper(cls=cls, name=full_name, test=test, param_kwargs=param_kwargs)
else:
dtypes = cls._get_dtypes(test)
dtypes = (tuple(dtypes) if (dtypes is not None) else (None,))
for dtype in dtypes:
all_param_kwargs = dict(param_kwargs)
_update_param_kwargs(all_param_kwargs, 'dtype', dtype)
full_name = '{}_{}{}'.format(name, test_suffix, _dtype_test_suffix(dtype))
instantiate_test_helper(cls=cls, name=full_name, test=test, param_kwargs=all_param_kwargs)
def run(self, result=None):
super().run(result=result)
if self._stop_test_suite:
result.stop() |
def register_Ns3LoopbackNetDevice_methods(root_module, cls):
cls.add_constructor([param('ns3::LoopbackNetDevice const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True)
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True)
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
class MagicModule(nn.Module):
def __init__(self, module):
nn.Module.__init__(self)
self._type = type(module)
for (key, value) in module._parameters.items():
if (value is not None):
self.register_parameter(('_origin_' + key), value)
self.register_buffer(key, value.data)
else:
self.register_buffer(key, None)
for (key, value) in module._buffers.items():
self.register_buffer(key, copy.deepcopy(value))
for (key, value) in module._modules.items():
self.add_module(key, MagicModule(value))
for (key, value) in module.__dict__.items():
if ((not (key in self.__dict__)) and (not (key in self._buffers)) and (not (key in self._modules))):
self.__setattr__(key, value)
def forward(self, *args, **kwargs):
return self._type.forward(self, *args, **kwargs)
def update_params(self, deltas):
sub_params = {}
for (key, delta) in deltas.items():
if (not ('.' in key)):
self._buffers[key] = (self._buffers[key] + delta)
else:
attr = key.split('.')[0]
if (not (attr in sub_params)):
sub_params[attr] = {}
sub_params[attr]['.'.join(key.split('.')[1:])] = delta
for (key, value) in sub_params.items():
self._modules[key].update_params(value)
def check_forward_args(self, *args, **kwargs):
assert issubclass(self._type, nn.RNNBase)
return nn.RNNBase.check_forward_args(self, *args, **kwargs)
def _flat_weights(self):
assert issubclass(self._type, nn.RNNBase)
return [p for layerparams in self.all_weights for p in layerparams]
def all_weights(self):
assert issubclass(self._type, nn.RNNBase)
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
def _get_abs_string_index(self, idx):
assert issubclass(self._type, nn.ModuleList)
idx = operator.index(idx)
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
return str(idx)
def __getitem__(self, idx):
assert issubclass(self._type, nn.ModuleList)
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __len__(self):
assert issubclass(self._type, nn.ModuleList)
return len(self._modules)
def transpose_for_scores(self, x):
assert issubclass(self._type, BertSelfAttention)
new_x_shape = (x.size()[:(- 1)] + (self.num_attention_heads, self.attention_head_size))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def conv2d_forward(self, input, weight):
assert issubclass(self._type, nn.Conv2d)
if (self.padding_mode == 'circular'):
expanded_padding = (((self.padding[1] + 1) // 2), (self.padding[1] // 2), ((self.padding[0] + 1) // 2), (self.padding[0] // 2))
return F.conv2d(F.pad(input, expanded_padding, mode='circular'), weight, self.bias, self.stride, _pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def _check_input_dim(self, input):
assert issubclass(self._type, nn.BatchNorm2d)
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim())) |
def train_model(space):
params_model = {'dropout': 0.5, 'no_latent_features': 200, 'norm_mean': 0.0, 'norm_std': 0.001, 'input_size': 8936, 'output_size': 8936, 'enc1_out': 600, 'enc2_in': 600, 'enc2_out': 400, 'dec1_in': 200, 'dec1_out': 600, 'dec2_in': 600}
params_trainer = {'seed': 42, 'normalize_gradients': True, 'learning_rate': 0.001, 'batch_size_training': 500, 'shuffle_training': True, 'drop_last_batch_training': True, 'batch_size_validation': 500, 'shuffle_validation': True, 'drop_last_batch_validation': False, 'batch_size_testing': 500, 'shuffle_testing': True, 'drop_last_batch_testing': True, 'number_of_epochs': 10, 'frank_wolfe_max_iter': 100, 'anneal': True, 'beta_start': 0, 'beta_cap': 0.3, 'beta_step': '0.3/10000'}
params_model['dropout'] = space['drop_out']
params_trainer['learning_rate'] = space['learning_rate']
space_str = ''
for key in space:
space_str += 'KEY{},VALUE{}-'.format(key, space[key])
space_str = space_str[:(- 1)]
save_to_path = os.path.join(save_to_path_parent, space_str)
model = MultiVAE(params=params_model)
correctness_loss = VAELoss()
revenue_loss = VAELoss(weighted_vector=products_data_torch)
losses = [correctness_loss, revenue_loss]
recallAtK = RecallAtK(k=10)
revenueAtK = RevenueAtK(k=10, revenue=products_data_np)
validation_metrics = [recallAtK, revenueAtK]
trainer = Trainer(data_handler, model, losses, validation_metrics, save_to_path, params=params_trainer)
val_loss = trainer.train()
result = {'loss': val_loss, 'space': space, 'status': STATUS_OK}
return result |
.filterwarnings('ignore:Ignoring n_components with whiten=False.')
.parametrize('whiten, n_components, expected_mixing_shape', [('arbitrary-variance', 5, (10, 5)), ('arbitrary-variance', 10, (10, 10)), ('unit-variance', 5, (10, 5)), ('unit-variance', 10, (10, 10)), (False, 5, (10, 10)), (False, 10, (10, 10))])
def test_inverse_transform(whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype):
n_samples = 100
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((n_samples, 10)).astype(global_dtype)
ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten)
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
Xt = ica.fit_transform(X)
assert (ica.mixing_.shape == expected_mixing_shape)
X2 = ica.inverse_transform(Xt)
assert (X.shape == X2.shape)
if (n_components == X.shape[1]):
if global_dtype:
atol = (np.abs(X2).mean() / 100000.0)
else:
atol = 0.0
assert_allclose(X, X2, atol=atol) |
def _verify_range(msg, x, vmin, vmax, dtype):
assert_equal(x[0], vmin)
assert_equal(x[(- 1)], vmax)
assert (x.dtype == dtype) |
def read_json(filename):
with open(filename) as filepoint:
data = json.load(filepoint)
return data |
class TFAutoModelWithLMHead():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def retokenize(sent, tokenizer, subword='##'):
tokens = []
abs_char_offsets = []
for i in range(len(sent.words)):
toks = tokenizer.tokenize(sent.words[i])
offsets = [sent.abs_char_offsets[i]]
for w in toks[0:(- 1)]:
offsets.append((len((w if (w[:len(subword)] != subword) else w[len(subword):])) + offsets[(- 1)]))
abs_char_offsets.extend(offsets)
tokens.extend(toks)
return (tokens, abs_char_offsets) |
class SelectedAtoms(ProcessingPlasmaProperty):
outputs = ('selected_atoms',)
def calculate(self, abundance):
return abundance.index |
class MultiDatasetFromFolder(data.Dataset):
def __init__(self, image_dir, nFrames, upscale_factor, data_augmentation, file_list, other_dataset, patch_size, future_frame, transform=None):
super(MultiDatasetFromFolder, self).__init__()
self.nFrames = nFrames
self.upscale_factor = upscale_factor
self.transform = transforms.Compose([transforms.ToTensor()])
self.image_dir = image_dir
self.data_augmentation = data_augmentation
self.other_dataset = other_dataset
self.patch_size = patch_size
self.future_frame = future_frame
self.image_num = []
self.index_compute = []
self.image_filenames = []
for i in range(len(image_dir)):
alist = os.listdir(image_dir[i])
image_num = 0
index_compute = []
image_filenames = [join(image_dir[i], x) for x in alist]
for j in range(len(image_filenames)):
image_list = os.listdir(image_filenames[j])
for img in image_list:
if (img.endswith('jpg') and ('rs' not in img)):
image_num += 1
image_num = ((image_num - self.nFrames) + 1)
index_compute.append(image_num)
self.image_filenames.append(image_filenames)
self.image_num.append(index_compute[(- 1)])
self.index_compute.append(index_compute)
def __getitem__(self, index):
samples = []
for key in range(len(self.image_dir)):
idx = (index % self.image_num[key])
index_compute = self.index_compute[key]
image_filenames = self.image_filenames[key]
file_id = 0
idx = (idx + 1)
for i in range(len(index_compute)):
if (index_compute[i] >= idx):
file_id = i
break
img_id = (idx if (file_id == 0) else (idx - int(index_compute[(file_id - 1)])))
if (key == 0):
(target, neigbor) = load_img_future_de_rain_flow(image_filenames[file_id], self.nFrames, img_id, phase='train')
elif (key == 1):
(target, neigbor) = load_img_future_de_haze_revide(image_filenames[file_id], self.nFrames, img_id, phase='train')
elif (key == 2):
(target, neigbor) = load_img_future_de_snow_kitti(image_filenames[file_id], self.nFrames, img_id, phase='train')
if (self.patch_size != 0):
(target, neigbor, _) = get_patch(target, neigbor, self.patch_size, 1, self.nFrames)
if self.data_augmentation:
(target, neigbor, _) = augment(target, neigbor)
if self.transform:
target = [self.transform(j) for j in target]
neigbor = [self.transform(j) for j in neigbor]
targets = torch.stack(target, 0)
neigbors = torch.stack(neigbor, 0)
samples.append({'target': targets, 'neigbor': neigbors, 'dc': key})
return samples
def __len__(self):
return max(self.image_num) |
def exact_match(anaphor, antecedent):
match = (anaphor.attributes['tokens_as_lowercase_string'] == antecedent.attributes['tokens_as_lowercase_string'])
return ('exact_match', match) |
class TrainerState():
epoch: Optional[float] = None
global_step: int = 0
max_steps: int = 0
num_train_epochs: int = 0
total_flos: float = 0
log_history: List[Dict[(str, float)]] = None
best_metric: Optional[float] = None
best_model_checkpoint: Optional[str] = None
is_local_process_zero: bool = True
is_world_process_zero: bool = True
is_hyper_param_search: bool = False
trial_name: str = None
trial_params: Dict[(str, Union[(str, float, int, bool)])] = None
def __post_init__(self):
if (self.log_history is None):
self.log_history = []
def save_to_json(self, json_path: str):
json_string = (json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + '\n')
with open(json_path, 'w', encoding='utf-8') as f:
f.write(json_string)
def load_from_json(cls, json_path: str):
with open(json_path, 'r', encoding='utf-8') as f:
text = f.read()
return cls(**json.loads(text)) |
def test_initialize_base_classifier():
_bm = BaseBoostedRelationalModel()
assert (_bm.target == 'None')
assert (_bm.n_estimators == 10) |
def mean_pool(input_tensor, sequence_length=None):
with tf.name_scope('mean_pool'):
input_tensor_sum = tf.reduce_sum(input_tensor, axis=(- 2))
if (sequence_length is None):
sequence_length = tf.shape(input_tensor)[(- 2)]
expanded_sequence_length = (tf.cast(tf.expand_dims(sequence_length, (- 1)), 'float32') + 1e-08)
mean_pooled_input = (input_tensor_sum / expanded_sequence_length)
return mean_pooled_input |
def fullname(o):
module = o.__class__.__module__
if ((module is None) or (module == str.__class__.__module__)):
return o.__class__.__name__
else:
return ((module + '.') + o.__class__.__name__) |
def test_1d_ok():
nums = np.arange(7)
filtered = gaussian(nums, preserve_range=True)
assert np.all((filtered > 0.1)) |
.service(data={'title': 'Forbidden', 'status': 403, 'detail': 'FORBIDDEN!'}, status=403, method='GET', path=re.compile('/cli/projects/.*/'))
.openapi_version('3.0')
def test_forbidden(cli, schema_url, service):
result = cli.run('my-api', f'--schemathesis-io-token={service.token}', f'--schemathesis-io-url={service.base_url}')
assert (result.exit_code == ExitCode.TESTS_FAILED), result.stdout
assert (result.stdout.strip() == ' FORBIDDEN!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.