code stringlengths 101 5.91M |
|---|
class GMDataset(Dataset):
def __init__(self, name, length, cls=None, **args):
self.name = name
self.ds = eval(self.name)(**args)
self.length = length
self.obj_size = self.ds.obj_resize
self.classes = self.ds.classes
self.cls = (None if (cls == 'none') else cls)
def __len__(self):
return self.length
def __getitem__(self, idx):
(anno_pair, perm_mat) = self.ds.get_pair(self.cls)
if (perm_mat.size <= (2 * 2)):
return self.__getitem__(idx)
P1_gt = [(kp['x'], kp['y']) for kp in anno_pair[0]['keypoints']]
P2_gt = [(kp['x'], kp['y']) for kp in anno_pair[1]['keypoints']]
P1_gt = np.array(P1_gt)
P2_gt = np.array(P2_gt)
(n1_gt, n2_gt) = (len(P1_gt), len(P2_gt))
(G1_gt, H1_gt, e1_gt, edge_indices1, edge_feat1) = build_graphs(P1_gt, n1_gt, stg=cfg.PAIR.GT_GRAPH_CONSTRUCT)
if (cfg.PAIR.REF_GRAPH_CONSTRUCT == 'same'):
G2_gt = perm_mat.transpose().dot(G1_gt)
H2_gt = perm_mat.transpose().dot(H1_gt)
e2_gt = e1_gt
else:
(G2_gt, H2_gt, e2_gt, edge_indices2, edge_feat2) = build_graphs(P2_gt, n2_gt, stg=cfg.PAIR.REF_GRAPH_CONSTRUCT)
ret_dict = {'Ps': [torch.Tensor(x) for x in [P1_gt, P2_gt]], 'ns': [torch.tensor(x) for x in [n1_gt, n2_gt]], 'es': [torch.tensor(x) for x in [e1_gt, e2_gt]], 'gt_perm_mat': perm_mat, 'Gs': [torch.Tensor(x) for x in [G1_gt, G2_gt]], 'Hs': [torch.Tensor(x) for x in [H1_gt, H2_gt]], 'edge_src': [torch.Tensor(x) for x in [edge_indices1]], 'edge_tgt': [torch.Tensor(x) for x in [edge_indices2]], 'edge_feat1': [torch.Tensor(x) for x in [edge_feat1]], 'edge_feat2': [torch.Tensor(x) for x in [edge_feat2]]}
imgs = [anno['image'] for anno in anno_pair]
if (imgs[0] is not None):
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize(cfg.NORM_MEANS, cfg.NORM_STD)])
imgs = [trans(img) for img in imgs]
ret_dict['images'] = imgs
elif ('feat' in anno_pair[0]['keypoints'][0]):
feat1 = np.stack([kp['feat'] for kp in anno_pair[0]['keypoints']], axis=(- 1))
feat2 = np.stack([kp['feat'] for kp in anno_pair[1]['keypoints']], axis=(- 1))
ret_dict['features'] = [torch.Tensor(x) for x in [feat1, feat2]]
return ret_dict |
class WordTextEncoder(CharacterTextEncoder):
def encode(self, s):
s = s.strip('\r\n ')
words = s.split(' ')
return ([self.vocab_to_idx(v) for v in words] + [self.eos_idx])
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
vocabs.append(v)
return ' '.join(vocabs)
def token_type(self):
return 'word' |
def getFlying3dMetas(root, Type, data_type='clean'):
Metas = []
imgDir = (('flyingthings3d/frames_' + data_type) + 'pass')
dispDir = 'flyingthings3d/disparity'
Parts = ['A', 'B', 'C']
for Part in Parts:
partDir = osp.join(root, dispDir, Type, Part)
idxDirs = os.listdir(partDir)
for idxDir in idxDirs:
dispNames = os.listdir(osp.join(partDir, idxDir, 'left'))
imgNames = ['{}.png'.format(name.split('.')[0]) for name in dispNames]
for (imgName, dispName) in zip(imgNames, dispNames):
meta = dict(left_image_path=osp.join(imgDir, Type, Part, idxDir, 'left', imgName), right_image_path=osp.join(imgDir, Type, Part, idxDir, 'right', imgName), left_disp_map_path=osp.join(dispDir, Type, Part, idxDir, 'left', dispName), right_disp_map_path=osp.join(dispDir, Type, Part, idxDir, 'right', dispName))
Metas.append(meta)
return Metas |
def build_and_train(game='pong', run_ID=0, cuda_idx=None, mid_batch_reset=False, n_parallel=2):
affinity = dict(cuda_idx=cuda_idx, workers_cpus=list(range(n_parallel)))
Collector = (GpuResetCollector if mid_batch_reset else GpuWaitResetCollector)
print(f'To satisfy mid_batch_reset=={mid_batch_reset}, using {Collector}.')
sampler = GpuParallelSampler(EnvCls=AtariEnv, env_kwargs=dict(game=game, num_img_obs=1), CollectorCls=Collector, batch_T=20, batch_B=16, max_decorrelation_steps=400)
algo = A2C()
agent = AtariLstmAgent()
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, n_steps=.0, log_interval_steps=100000.0, affinity=affinity)
config = dict(game=game)
name = ('a2c_' + game)
log_dir = 'example_4'
with logger_context(log_dir, run_ID, name, config):
runner.train() |
def print_layers_dims(model):
l_layers = model.layers
for i in range(len(l_layers)):
print(l_layers[i])
print('Input Shape: ', l_layers[i].input_shape, 'Output Shape: ', l_layers[i].output_shape) |
class RewardModel(nn.Module):
def __init__(self, belief_size, state_size, hidden_size, activation_function='relu'):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear((belief_size + state_size), hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
def forward(self, belief, state):
hidden = self.act_fn(self.fc1(torch.cat([belief, state], dim=1)))
hidden = self.act_fn(self.fc2(hidden))
reward = self.fc3(hidden).squeeze(dim=1)
return reward |
class CorrectMetric():
def __init__(self):
self.item = []
def update(self, samples):
self.item.append(samples)
def result(self):
return 0
def reset(self):
self.item = [] |
class ResNet(nn.Module):
def __init__(self, block, layers, opt):
down_stride_1 = (1, 2, 2)
down_stride_2 = (2, 2, 2)
self.inplanes = 64
self.learning_policy = opt.learning_policy
self.num_classes = opt.n_classes
num_classes = opt.n_classes
shortcut_type = opt.resnet_shortcut
sample_size = opt.sample_size
sample_duration = opt.basic_duration
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=down_stride_1, padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=down_stride_2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=down_stride_2)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=down_stride_2)
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=down_stride_1)
last_duration = int(1)
last_size = int(math.ceil((sample_size / 32.0)))
self.maxpool_final = nn.MaxPool3d((last_duration, last_size, last_size), stride=1)
self.t_all = int((sample_duration / 8.0))
self.dims = int((512 * block.expansion))
self.fc_emd = (self.dims * self.t_all)
if (self.learning_policy == '2stream'):
self.fc_cls_1 = nn.Linear(self.fc_emd, (2 * num_classes)).cuda()
self.fc_box_1 = nn.Linear(self.fc_emd, num_classes).cuda()
self.fc_cls_2 = nn.Linear(self.fc_emd, (2 * num_classes)).cuda()
self.fc_box_2 = nn.Linear(self.fc_emd, num_classes).cuda()
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if (shortcut_type == 'A'):
downsample = partial(downsample_basic_block, planes=(planes * block.expansion), stride=stride)
else:
downsample = nn.Sequential(nn.Conv3d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm3d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.maxpool_final(x)
if (self.learning_policy == '2stream'):
new_x = x.reshape((- 1), self.fc_emd)
pred_cls_1 = self.fc_cls_1(new_x)
pred_cls_1 = pred_cls_1.reshape((- 1), 2, self.num_classes)
pred_box_1 = self.fc_box_1(new_x)
pred_cls_2 = self.fc_cls_2(new_x)
pred_cls_2 = pred_cls_2.reshape((- 1), 2, self.num_classes)
pred_box_2 = self.fc_box_2(new_x)
return (pred_cls_1, pred_box_1, pred_cls_2, pred_box_2) |
def find_pretrained_model(src_lang: str, tgt_lang: str) -> List[str]:
prefix = 'Helsinki-NLP/opus-mt-'
model_list = list_models()
model_ids = [x.modelId for x in model_list if x.modelId.startswith('Helsinki-NLP')]
src_and_targ = [remove_prefix(m, prefix).lower().split('-') for m in model_ids if ('+' not in m)]
matching = [f'{prefix}{a}-{b}' for (a, b) in src_and_targ if ((src_lang in a) and (tgt_lang in b))]
return matching |
def export_mannequin(mode: str, save_stem: ty.N[str]=None, overwrite: bool=False) -> None:
print(f"-> Exporting ground truth depths for Mannequin '{mode}'...")
ds = MannequinDataset(mode, datum='image depth K', shape=None, as_torch=False)
save_file = (ds.split_file.parent / f'{save_stem}.npz')
if ((not overwrite) and save_file.is_file()):
raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite")
(depths, Ks) = ([], [])
for (_, y, m) in tqdm(ds):
depths.append(y['depth'].squeeze())
Ks.append(geo.resize_K(y['K'], y['depth'].shape[(- 2):], shape=MannequinDataset.SHAPE))
save(save_file, depth=np.array(depths, dtype=object), K=np.array(Ks)) |
def generate_python_wrapper(header_directories, include_paths, library_name, cpp_filename, declarations, ignore_declarations={}, ignore_files={}):
warnings.filterwarnings(action='once', category=DeprecationWarning)
(generator_path, generator_name) = utils.find_xml_generator()
compiler = 'g++'
compiler_path = '/usr/bin/g++'
xml_generator_config = parser.xml_generator_configuration_t(xml_generator_path=generator_path, xml_generator=generator_name, compiler=compiler, compiler_path=compiler_path, start_with_declarations=declarations)
xml_generator_config.append_cflags('-std=c++11')
for inc_dir in include_paths:
xml_generator_config.include_paths.append(inc_dir)
for header_dir in header_directories:
xml_generator_config.include_paths.append(header_dir)
header_list = list()
for header_dir in header_directories:
header_list = (header_list + get_list_of_files(header_dir, ignore_files=ignore_files))
builder = module_builder.module_builder_t(header_list, xml_generator_path=generator_path, compilation_mode=parser.COMPILATION_MODE.ALL_AT_ONCE, xml_generator_config=xml_generator_config, indexing_suite_version=2)
for ignore_declaration in ignore_declarations:
builder.decls((lambda decl: (ignore_declaration in decl.name))).exclude()
for decl in builder.decls('length'):
if isinstance(decl, decl_wrappers.variable_wrapper.variable_t):
if isinstance(decl.parent, decl_wrappers.class_wrapper.class_t):
decl.location.file_name = decl.parent.location.file_name
decl.location.line = (decl.parent.location.line + 1)
decl.ignore = False
builder.classes().add_properties(exclude_accessors=True)
builder.build_code_creator(module_name=library_name)
builder.write_module(cpp_filename) |
def parse_report(report):
out = {}
result_regexp = '(\\d*)MHz\\/(\\d*)MHz.*complexity:\\s(\\d*)\\sMACC'
matches = list(re.finditer(result_regexp, report, re.MULTILINE))
(cpu_freq, cpu_freq_max, macc) = matches[0].groups()
out['cpu_mhz'] = int(cpu_freq)
out['macc'] = int(macc)
key_value_regex = '(.*)\\s:\\s(.*)'
matches = re.finditer(key_value_regex, report, re.MULTILINE)
for (matchNum, match) in enumerate(matches, start=1):
(key, value) = match.groups()
key = key.strip()
value = value.strip()
if (key == 'used stack'):
out['stack'] = int(value.rstrip(' bytes'))
if (key == 'duration'):
out['duration_avg'] = (float(value.rstrip(' ms (average)')) / 1000)
if (key == 'CPU cycles'):
out['cycles_avg'] = int(value.split()[0])
out['cycles_macc'] = (out['cycles_avg'] / out['macc'])
return out |
def uniform_noise(Cifar10_Y, noise_ratio):
array1 = Cifar10_Y.tolist()
array = Cifar10_Y.tolist()
array2 = Cifar10_Y
ratio = (5 * noise_ratio)
noisy_ratio = (ratio * 10)
for class_number in range(10):
ss = array.count(class_number)
first_pos = 0
find_out = []
for i in range(array.count(class_number)):
new_list = array[first_pos:]
next_pos = (new_list.index(class_number) + 1)
find_out.append((first_pos + new_list.index(class_number)))
first_pos += next_pos
label_choose_index = random.sample(find_out, noisy_ratio)
Noise_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
noisy_label = (Noise_list * ratio)
random.shuffle(noisy_label)
number = 0
for index_label in label_choose_index:
array1[index_label] = noisy_label[number]
number += 1
array1 = np.array(array1, dtype=int)
return array1 |
class dependencyGraph(object):
def __init__(self, dep, head, tok, tgt):
self.graph = nx.DiGraph()
self.name2concept = tok
self.root = None
for (i, x) in enumerate(head):
if (x == 0):
assert (self.root is None)
self.root = i
self.graph.add_node(i)
for (src, (des1, rel)) in enumerate(zip(head, dep)):
des = (des1 - 1)
if (des < 0):
continue
self._add_edge(rel, src, des)
self.target = tgt
def __len__(self):
return ((len(self.name2concept) ** 2) + len(self.target))
def _add_edge(self, rel, src, des):
self.graph.add_node(src)
self.graph.add_node(des)
self.graph.add_edge(src, des, label=rel)
self.graph.add_edge(des, src, label=(rel + '_r_'))
def bfs(self):
g = self.graph
queue = [self.root]
depths = [0]
visited = set(queue)
step = 0
while (step < len(queue)):
u = queue[step]
depth = depths[step]
step += 1
for v in g.neighbors(u):
if (v not in visited):
queue.append(v)
depths.append((depth + 1))
visited.add(v)
is_connected = (len(queue) == g.number_of_nodes())
return (queue, depths, is_connected)
def collect_concepts_and_relations(self):
g = self.graph
(nodes, depths, is_connected) = self.bfs()
concepts = [self.name2concept[n] for n in nodes]
relations = dict()
for (i, src) in enumerate(nodes):
relations[i] = dict()
paths = nx.single_source_shortest_path(g, src)
for (j, tgt) in enumerate(nodes):
relations[i][j] = list()
assert (tgt in paths)
path = paths[tgt]
info = dict()
info['edge'] = [g[path[i]][path[(i + 1)]]['label'] for i in range((len(path) - 1))]
info['length'] = len(info['edge'])
relations[i][j].append(info)
depths = nodes
return (concepts, depths, relations, is_connected) |
def generate_encrypted_file(kms, primary_key_path, data_key_path, input_path, output_path):
callBigDlFunc('float', 'generateEncryptedFile', kms, primary_key_path, data_key_path, input_path, output_path) |
_registry(pattern_type='RmsNorm')
class RmsNorm(Pattern):
def __call__(self, model):
pattern_mapping_config = {'RmsNorm': [{'patterns': {'in': [[(0, 'Pow'), (1, 'ReduceMean'), (2, 'Add'), (3, 'Rsqrt'), (4, 'Mul'), (5, 'Mul')]], 'out': [[(0, 'RmsNorm')]]}, 'search_mode': 'op_type', 'node_names': {0: 5}, 'input_tensors': {0: [[{0: [0]}, {5: [0]}], [[0, 1], 2]]}, 'output_tensors': {0: [[{5: [0]}], [[0], 1]]}, 'returns': [2]}]}
def _set_attr(epsilon, node_names, model):
attr = OrderedDict()
attr['epsilon'] = float(epsilon.input_tensors[1].data)
ln_node_idx = model.get_node_id(node_names[0])
model.nodes[ln_node_idx].attr = attr
if (len(model.nodes[ln_node_idx].input_tensors) == 2):
hidden_size = model.nodes[ln_node_idx].input_tensors[1].data.shape[0]
model.add_config_item('hidden_size', hidden_size)
pattern_dict = pattern_mapping_config['RmsNorm'][0]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('RmsNorm', pattern_dict, model)
if (len(new_node_names) != 0):
for i in range(len(new_node_names)):
epsilon = ret_old_nodes[i][0]
_set_attr(epsilon, new_node_names[i], model)
return model
return model |
def logTrendFn(**kwargs):
dampening = kwargs['dampening']
displacement = kwargs['displacement']
timeSteps = kwargs['timeSteps']
if ('tStart' in kwargs):
tStart = kwargs['tStart']
else:
tStart = 0
steps = range((1 + tStart), (timeSteps + 1))
return (np.log(steps) + displacement) |
def AddConvMaxpLayer(config_lines, name, input, args):
if ('3d-dim' not in input):
raise Exception("The input to AddConvMaxpLayer() needs '3d-dim' parameters.")
input = nodes.AddConvolutionLayer(config_lines, name, input, input['3d-dim'][0], input['3d-dim'][1], input['3d-dim'][2], args.filt_x_dim, args.filt_y_dim, args.filt_x_step, args.filt_y_step, args.num_filters, input['vectorization'])
if ((args.pool_x_size > 1) or (args.pool_y_size > 1) or (args.pool_z_size > 1)):
input = nodes.AddMaxpoolingLayer(config_lines, name, input, input['3d-dim'][0], input['3d-dim'][1], input['3d-dim'][2], args.pool_x_size, args.pool_y_size, args.pool_z_size, args.pool_x_step, args.pool_y_step, args.pool_z_step)
return input |
def get_division_counts_by_season(season: Optional[int]) -> int:
if (season is None):
season = (most_recent_season() - 1)
if (season >= 1994):
return 6
if (season >= 1969):
return 4
return 1 |
class ShapleyModule(ShapleyNetwork, ABC):
def __init__(self, inner_function: nn.Module, dimensions: ModuleDimensions=None, reference_values: torch.Tensor=None) -> None:
super(ShapleyModule, self).__init__(dimensions=dimensions, reference_values=reference_values)
self.inner_function = inner_function
self.passes = (2 ** self.dimensions.features)
self.masks = generate_binary_sequence(dimensions.features)
self.masks = self.masks.refine_names(NAME_NUM_PASSES, NAME_FEATURES, NAME_META_CHANNELS)
self.subtraction_matrix = self._get_subtraction_matrix()
def _preprocess(inputs: torch.Tensor) -> torch.Tensor:
return named_tensor_add_dim(inputs, 0, NAME_NUM_PASSES)
def explained_forward(self, inputs: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
inputs_reshaped = self._preprocess(inputs)
masks = self.masks.align_as(inputs_reshaped)
inputs_extended = ((masks * inputs_reshaped) + ((1 - masks) * self.reference_values))
inputs_reshaped = inputs_extended.flatten([NAME_FEATURES, NAME_META_CHANNELS], NAME_FEATURES_META_CHANNEL)
names = inputs_reshaped.names
function_outputs = self.inner_function(inputs_reshaped.rename(None)).refine_names(*names)
function_outputs = function_outputs.rename(feature_times_channels=NAME_META_CHANNELS)
shapley_values = self.compute_shapley(function_outputs)
shapley_values = shapley_values.align_to(NAME_BATCH_SIZE, ..., NAME_META_CHANNELS)
return (function_outputs[(- 1)], shapley_values)
def unexplained_forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.inner_function(inputs.flatten([NAME_FEATURES, NAME_META_CHANNELS], NAME_FEATURES_META_CHANNEL))
def compute_shapley(self, function_outputs: torch.Tensor) -> torch.Tensor:
shapley_values = torch.matmul(function_outputs.align_to(..., NAME_NUM_PASSES), self.subtraction_matrix).align_to(NAME_BATCH_SIZE, ..., NAME_FEATURES, NAME_META_CHANNELS)
return shapley_values
def _get_subtraction_matrix(self) -> torch.Tensor:
shapley_weights = self._weight_compute()
num_features = self.dimensions.features
matrix = torch.zeros((2 ** num_features), num_features)
for i in range(num_features):
(minuend, subtrahend) = self._generate_subtraction_sequence(i)
weights = named_tensor_vector_indexing_single_dim(shapley_weights, NAME_NUM_PASSES, indices=subtrahend).rename(None)
matrix[(minuend, i)] = weights
matrix[(subtrahend, i)] = (- weights)
matrix = matrix.refine_names(NAME_NUM_PASSES, NAME_FEATURES)
return matrix
def _generate_subtraction_sequence(self, index: int) -> torch.LongTensor:
self.passes = (2 ** self.dimensions.features)
sequence = torch.empty(2, (self.passes // 2)).long()
for i in range((self.passes // 2)):
(sequence[(0, i)], sequence[(1, i)]) = get_indices(total_num=self.passes, index_of_interest=index, index_in_index=i)
return sequence
def _weight_compute(self) -> torch.Tensor:
z_size = self.masks.sum((- 2)).squeeze().numpy()
m_factorial = factorial(self.dimensions.features)
z_factorial = factorial(z_size)
mz_factorial = factorial(((self.dimensions.features - z_size) - 1))
weight = ((z_factorial * mz_factorial) / m_factorial)
return torch.from_numpy(weight).float().refine_names(NAME_NUM_PASSES) |
def diapreresnet1202_cifar100(num_classes=100, **kwargs):
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name='diapreresnet1202_cifar100', **kwargs) |
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out |
def determine_thresholds(confidence, resolution=100):
if isinstance(confidence, list):
confidence = np.array(confidence)
confidence = confidence.flatten()
confidence = confidence[(~ np.isnan(confidence))]
confidence.sort()
assert ((len(confidence) > resolution) and (resolution > 2))
thresholds = np.ones(resolution)
thresholds[0] = (- np.inf)
thresholds[(- 1)] = np.inf
delta = np.floor((len(confidence) / (resolution - 2)))
idxs = np.linspace(delta, (len(confidence) - delta), (resolution - 2), dtype=np.int32)
thresholds[1:(- 1)] = confidence[idxs]
return thresholds |
class QuantizationLayer(tf.keras.layers.Layer):
def __init__(self):
super(QuantizationLayer, self).__init__()
def call(self, x, weight):
i = tf.transpose(tf.constant([[1, 0], [0, 1], [0, 1]], dtype='float32'))
w = tf.matmul(weight, i)
return tf.div(x, w) |
class CelebA(data.Dataset):
def __init__(self, image_dir, attr_path, selected_attrs, transform, mode):
self.image_dir = image_dir
self.attr_path = attr_path
self.selected_attrs = selected_attrs
self.transform = transform
self.mode = mode
self.train_dataset = []
self.test_dataset = []
self.attr2idx = {}
self.idx2attr = {}
self.preprocess()
if mode:
self.num_images = len(self.train_dataset)
else:
self.num_images = len(self.test_dataset)
def preprocess(self):
lines = [line.rstrip() for line in open(self.attr_path, 'r')]
all_attr_names = lines[1].split()
for (i, attr_name) in enumerate(all_attr_names):
self.attr2idx[attr_name] = i
self.idx2attr[i] = attr_name
lines = lines[2:]
for (i, line) in enumerate(lines):
split = line.split()
filename = split[0]
values = split[1:]
label = []
for attr_name in self.selected_attrs:
idx = self.attr2idx[attr_name]
label.append(int(values[idx]))
self.train_dataset.append([filename, label])
print('Finished preprocessing the CelebA dataset...')
def __getitem__(self, index):
dataset = self.train_dataset
(filename, label) = dataset[index]
image = Image.open(os.path.join(self.image_dir, filename))
return (self.transform(image), torch.FloatTensor(label))
def __len__(self):
return self.num_images |
def update_context(context, sentence, entities):
for (idx, (ent_key, ent_vals)) in enumerate(entities.items()):
for w in sentence:
if (w in ent_vals):
context[idx] = 1 |
def analytical_leg_jacobian(leg_angles, leg_id):
l_up = 0.2
l_low = 0.2
l_hip = (0.08505 * ((- 1) ** (leg_id + 1)))
(t1, t2, t3) = (leg_angles[0], leg_angles[1], leg_angles[2])
l_eff = np.sqrt((((l_up ** 2) + (l_low ** 2)) + (((2 * l_up) * l_low) * np.cos(t3))))
t_eff = (t2 + (t3 / 2))
J = np.zeros((3, 3))
J[(0, 0)] = 0
J[(0, 1)] = ((- l_eff) * np.cos(t_eff))
J[(0, 2)] = (((((l_low * l_up) * np.sin(t3)) * np.sin(t_eff)) / l_eff) - ((l_eff * np.cos(t_eff)) / 2))
J[(1, 0)] = (((- l_hip) * np.sin(t1)) + ((l_eff * np.cos(t1)) * np.cos(t_eff)))
J[(1, 1)] = (((- l_eff) * np.sin(t1)) * np.sin(t_eff))
J[(1, 2)] = (((((((- l_low) * l_up) * np.sin(t1)) * np.sin(t3)) * np.cos(t_eff)) / l_eff) - (((l_eff * np.sin(t1)) * np.sin(t_eff)) / 2))
J[(2, 0)] = ((l_hip * np.cos(t1)) + ((l_eff * np.sin(t1)) * np.cos(t_eff)))
J[(2, 1)] = ((l_eff * np.sin(t_eff)) * np.cos(t1))
J[(2, 2)] = ((((((l_low * l_up) * np.sin(t3)) * np.cos(t1)) * np.cos(t_eff)) / l_eff) + (((l_eff * np.sin(t_eff)) * np.cos(t1)) / 2))
return J |
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
self.parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=2000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau')
self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.parser.add_argument('--identity', type=float, default=0.5, help='use identity mapping. Setting identity other than 1 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set optidentity = 0.1')
self.parser.add_argument('--adversarial_loss_p', action='store_true', help='also train the prediction model with an adversarial loss')
self.isTrain = True |
def include_pip(tile_type, p):
is_xc7_logic = (tile_type in ('CLBLL_L', 'CLBLL_R', 'CLBLM_L', 'CLBLM_R'))
if (p.is_route_thru() and p.src_wire().name().endswith('_CE_INT')):
return False
if (p.is_route_thru() and is_xc7_logic):
return False
if (p.is_route_thru() and ('TFB' in p.dst_wire().name())):
return False
if p.src_wire().name().startswith('CLK_BUFG_R_FBG_OUT'):
return False
if ('CLK_HROW_CK_INT' in p.src_wire().name()):
return False
if (tile_type.startswith('HCLK_CMT') and ('FREQ_REF' in p.dst_wire().name())):
return False
if tile_type.startswith('CMT_TOP_L_LOWER'):
return False
if tile_type.startswith('CLK_HROW_TOP'):
if (('CK_BUFG_CASCO' in p.dst_wire().name()) and ('CK_BUFG_CASCIN' in p.src_wire().name())):
return False
if tile_type.startswith('HCLK_IOI'):
if (('RCLK_BEFORE_DIV' in p.dst_wire().name()) and ('IMUX' in p.src_wire().name())):
return False
if ('IOI' in tile_type):
if (('CLKB' in p.dst_wire().name()) and ('IMUX22' in p.src_wire().name())):
return False
if (('OCLKB' in p.dst_wire().name()) and ('IOI_OCLK_' in p.src_wire().name())):
return False
if (('OCLKM' in p.dst_wire().name()) and ('IMUX31' in p.src_wire().name())):
return False
if ('CMT_TOP_R' in tile_type):
if ('PLLOUT_CLK_FREQ_BB_REBUFOUT' in p.dst_wire().name()):
return False
if ('MMCM_CLK_FREQ_BB' in p.dst_wire().name()):
return False
return True |
class MostVisitedExtract(AbstractExtract):
def __call__(self, node):
nodes = [node]
while ((not node.terminal) and (len(node.children) > 0)):
visits = [(i, child.n_visits) for (i, child) in enumerate(node.children)]
(max_idx, max_num_visits) = max(visits, key=operator.itemgetter(1))
node = node.children[max_idx]
nodes.append(node)
return nodes |
def train_step(epoch, loss_save):
for (_, data_train_group) in tqdm(enumerate(dataloader_train), desc='Training', total=len(dataloader_train)):
model.train()
mlp.train()
for data_train in data_train_group:
vector3 = []
regularization = 0
for data_train_item in data_train:
(adj, features, labels, idx_train, idx_val, idx_test) = cuda_input(*data_train_item[:(- 2)])
(and_children, or_children) = data_train_item[(- 2):]
t = time.time()
output = model(features.squeeze(0), adj.squeeze(0), labels.squeeze(0))
vector3.append(output[0])
if and_or:
if (len(and_children) != 0):
for addgate in range(len(and_children)):
add_child_tensor = None
for childidx in range(len(and_children[addgate])):
if (add_child_tensor is None):
add_child_tensor = output[and_children[addgate][childidx]]
else:
add_child_tensor = torch.cat((add_child_tensor, output[and_children[addgate][childidx]]))
regularization += andloss(add_child_tensor)
if (len(or_children) != 0):
for orgate in range(len(or_children)):
or_child_tensor = None
for childidx in range(len(or_children[orgate])):
if (or_child_tensor is None):
or_child_tensor = output[or_children[orgate][childidx]]
else:
or_child_tensor = torch.cat((or_child_tensor, output[or_children[orgate][childidx]]))
regularization += orloss(or_child_tensor)
loss_train = creterion(vector3[0].unsqueeze(0), vector3[1].unsqueeze(0), vector3[2].unsqueeze(0))
loss_train += (args.w_reg * regularization)
loss_list.add(float(loss_train.cpu()))
optimizer.zero_grad()
mlp.train()
input = torch.cat((torch.cat((vector3[0], vector3[1])).unsqueeze(0), torch.cat((vector3[0], vector3[2])).unsqueeze(0)))
pred = mlp(input)
target = torch.LongTensor([1, 0]).cuda()
mlp_loss = CE(pred, target)
loss_by_iter.append(float(mlp_loss.cpu()))
(loss_train + (args.cls_reg * mlp_loss)).backward()
optimizer.step()
loss_list_CE.add(float(mlp_loss.cpu()))
(_, predicted) = torch.max(pred.data, 1)
acc = ((predicted == target).sum().item() / target.size(0))
acc_list.add(float(acc))
loss_avg = loss_list.avg()
CE_loss_avg = loss_list_CE.avg()
acc_avg = acc_list.avg()
print('Epoch: {:04d}'.format((epoch + 1)), 'Avg loss: {:.4f}'.format(loss_avg), 'Avg CE loss: {:.4f}'.format(CE_loss_avg), 'Avg Acc: {:.4f}'.format(acc_avg), 'time: {:.4f}s'.format((time.time() - t)))
loss_save['triplet_loss'].append(loss_avg)
loss_save['CE_loss'].append(CE_loss_avg)
loss_save['acc'].append(acc_avg)
return (loss_avg, CE_loss_avg, acc_avg) |
class AgentIDWrapper(Wrapper):
def __init__(self, env: Environment, has_global_state: bool=False):
super().__init__(env)
self.has_global_state = has_global_state
def _add_agent_ids(self, timestep: TimeStep, num_agents: int) -> Union[(Observation, ObservationGlobalState)]:
agent_ids = jnp.eye(num_agents)
new_agents_view = jnp.concatenate([agent_ids, timestep.observation.agents_view], axis=(- 1))
if self.has_global_state:
new_global_state = jnp.concatenate([agent_ids, timestep.observation.global_state], axis=(- 1))
return ObservationGlobalState(agents_view=new_agents_view, action_mask=timestep.observation.action_mask, step_count=timestep.observation.step_count, global_state=new_global_state)
else:
return Observation(agents_view=new_agents_view, action_mask=timestep.observation.action_mask, step_count=timestep.observation.step_count)
def reset(self, key: chex.PRNGKey) -> Tuple[(State, TimeStep)]:
(state, timestep) = self._env.reset(key)
timestep.observation = self._add_agent_ids(timestep, self._env.num_agents)
return (state, timestep)
def step(self, state: State, action: chex.Array) -> Tuple[(State, TimeStep)]:
(state, timestep) = self._env.step(state, action)
timestep.observation = self._add_agent_ids(timestep, self._env.num_agents)
return (state, timestep)
def observation_spec(self) -> Union[(specs.Spec[Observation], specs.Spec[ObservationGlobalState])]:
obs_spec = self._env.observation_spec()
num_obs_features = (obs_spec.agents_view.shape[(- 1)] + self._env.num_agents)
agents_view = specs.Array((self._env.num_agents, num_obs_features), jnp.int32, 'agents_view')
global_state = specs.Array((self._env.num_agents, ((num_obs_features * self._env.num_agents) + self._env.num_agents)), jnp.int32, 'global_state')
if self.has_global_state:
return obs_spec.replace(agents_view=agents_view, global_state=global_state)
return obs_spec.replace(agents_view=agents_view) |
def get_score(x):
candidate_classification = x[0]
candidate_embedding = x[1]
classScore = (1.0 if (query_classification == candidate_classification) else 0.0)
visualScore = np.dot(query_embedding, candidate_embedding)
return (classScore + visualScore) |
def has_labels(dataset_dir, filename=LABELS_FILENAME):
return tf.gfile.Exists(os.path.join(dataset_dir, filename)) |
def parse_response(response):
(func_name, func_args) = ('', '')
i = response.rfind('\nAction:')
j = response.rfind('\nAction Input:')
k = response.rfind('\nObservation:')
if (0 <= i < j):
if (k < j):
response = (response.rstrip() + '\nObservation:')
k = response.rfind('\nObservation:')
func_name = response[(i + len('\nAction:')):j].strip()
func_args = response[(j + len('\nAction Input:')):k].strip()
if func_name:
choice_data = ChatCompletionResponseChoice(index=0, message=ChatMessage(role='assistant', content=response[:i], function_call={'name': func_name, 'arguments': func_args}), finish_reason='function_call')
return choice_data
z = response.rfind('\nFinal Answer: ')
if (z >= 0):
response = response[(z + len('\nFinal Answer: ')):]
choice_data = ChatCompletionResponseChoice(index=0, message=ChatMessage(role='assistant', content=response), finish_reason='stop')
return choice_data |
class VideoWriter():
def __init__(self, path, frame_size, codec='FFV1', fps=30.0, color=True):
codec = cv2.VideoWriter_fourcc(*codec)
self.stream = cv2.VideoWriter(path, codec, fps, frame_size, color)
def write(self, frame):
self.stream.write(frame)
def write_batch(self, batch):
for frame in batch:
self.write(frame)
def close(self):
self.stream.release()
return (not self.stream.isOpened()) |
class FlashDistillationConfig(object):
def __init__(self, block_names: list=[], layer_mappings_for_knowledge_transfer: list=[], loss_types: list=[], loss_weights: list=[], add_origin_loss: list=[], train_steps: list=[]):
super().__init__()
self.block_names = block_names
self.layer_mappings_for_knowledge_transfer = layer_mappings_for_knowledge_transfer
self.loss_types = loss_types
self.loss_weights = loss_weights
self.add_origin_loss = add_origin_loss
self.train_steps = train_steps |
def register_model_architecture(model_name, arch_name):
def register_model_arch_fn(fn):
if (model_name not in MODEL_REGISTRY):
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if (arch_name in ARCH_MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if (not callable(fn)):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn |
class CheckingTestCases(unittest.TestCase):
def test_should_is_not_null_raise_an_exception(self) -> None:
with self.assertRaises(NoneParameterException):
Check.is_not_none(None)
def test_should_is_valid_probability_raise_an_exception_if_the_value_is_negative(self) -> None:
with self.assertRaises(InvalidProbabilityValueException):
Check.probability_is_valid((- 1.0))
def test_should_is_valid_probability_raise_an_exception_if_the_value_is_higher_than_one(self) -> None:
with self.assertRaises(InvalidProbabilityValueException):
Check.probability_is_valid(1.1)
def test_should_is_value_in_range_raise_an_exception_if_the_value_is_lower_than_the_lower_bound(self) -> None:
with self.assertRaises(ValueOutOfRangeException):
Check.value_is_in_range(2, 3, 5)
def test_should_is_value_in_range_raise_an_exception_if_the_value_is_higher_than_the_upper_bound(self) -> None:
with self.assertRaises(ValueOutOfRangeException):
Check.value_is_in_range(7, 3, 5)
def test_should_that_raise_an_exception_if_the_expression_is_false(self) -> None:
with self.assertRaises(InvalidConditionException):
Check.that(False, 'The expression is false') |
def get_dataset_mt(dataset_args: Dict[(str, str)], model: str) -> DatasetDict:
dataset = DatasetDict()
for config in dataset_args['dataset_configs']:
dataset[config] = load_dataset(dataset_args['dataset_mt'], model, split=config)
return dataset |
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bottleneck_width, drop_rate):
super(_DenseLayer, self).__init__()
growth_rate = int((growth_rate / 2))
inter_channel = (int(((growth_rate * bottleneck_width) / 4)) * 4)
if (inter_channel > (num_input_features / 2)):
inter_channel = (int((num_input_features / 8)) * 4)
print('adjust inter_channel to ', inter_channel)
self.branch1a = BasicConv2d(num_input_features, inter_channel, kernel_size=1)
self.branch1b = BasicConv2d(inter_channel, growth_rate, kernel_size=3, padding=1)
self.branch2a = BasicConv2d(num_input_features, inter_channel, kernel_size=1)
self.branch2b = BasicConv2d(inter_channel, growth_rate, kernel_size=3, padding=1)
self.branch2c = BasicConv2d(growth_rate, growth_rate, kernel_size=3, padding=1)
def forward(self, x):
branch1 = self.branch1a(x)
branch1 = self.branch1b(branch1)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch2 = self.branch2c(branch2)
if (x.dtype == torch.quint8):
x = x.dequantize()
return torch.cat([x, branch1, branch2], 1) |
class FairseqDecoder(nn.Module):
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
(x, extra) = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
x = self.output_layer(x)
return (x, extra)
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
raise NotImplementedError
def output_layer(self, features, **kwargs):
raise NotImplementedError
def get_normalized_probs(self, net_output: Tuple[(Tensor, Dict[(str, List[Optional[Tensor]])])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]):
if (hasattr(self, 'adaptive_softmax') and (self.adaptive_softmax is not None)):
if (sample is not None):
assert ('target' in sample)
target = sample['target']
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return (out.exp_() if (not log_probs) else out)
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
def max_positions(self):
return 1000000.0
def upgrade_state_dict(self, state_dict):
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True |
def train_models_alpha_num_blocks_sweep(params, alpha, num_blocks, run_ctr_start=1, name_prefix=None):
p = params
(x_train, y_train, x_test, y_test) = get_dataset(cifar10, p)
run_ctr = run_ctr_start
for a in alpha:
p.alpha = a
for nb in num_blocks:
p.num_blocks = nb
run_name = p.to_string()
if (name_prefix is not None):
run_name = ((name_prefix + '_') + run_name)
mcd = gen_model_checkpoint_dir(run_ctr, run_name, params.working_dir)
train_model(params, x_train, y_train, x_test, y_test, mcd)
run_ctr += 1 |
def batch_norm_template(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'):
bn_decay = (bn_decay if (bn_decay is not None) else 0.9)
return tf.contrib.layers.batch_norm(inputs, center=True, scale=True, is_training=is_training, decay=bn_decay, updates_collections=None, scope=scope, data_format=data_format) |
def setup_train(args):
set_up_gpu(args)
export_root = create_experiment_export_folder(args)
export_experiments_config_as_json(args, export_root)
pp.pprint({k: v for (k, v) in vars(args).items() if (v is not None)}, width=1)
return export_root |
class CondenseInitBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(CondenseInitBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=False)
def forward(self, x):
x = self.conv(x)
return x |
def set_seed(seed):
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed) |
class GhostConv(nn.Module):
def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
super(GhostConv, self).__init__()
c_ = (c2 // 2)
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1) |
def accuracy(pr, gt, threshold=None, ignore_channels=None):
pr = _threshold(pr, threshold=threshold)
(pr, gt) = _take_channels(pr, gt, ignore_channels=ignore_channels)
tp = (torch.sum((gt == pr), dtype=pr.dtype) * 1.0)
score = (tp / gt.view((- 1)).shape[0])
return score |
def _is_float(num: str) -> bool:
try:
float(num)
return True
except ValueError:
return False |
def plot_shelf_freqz(treble, fs):
Rpot = 10000.0
C = 3.9e-09
G1 = (1.0 / 100000.0)
G2 = (1.0 / (1800.0 + ((1 - treble) * Rpot)))
G3 = (1.0 / (4700.0 + (treble * Rpot)))
G4 = (1.0 / 100000.0)
b0s = (C * (G1 + G2))
b1s = (G1 * (G2 + G3))
a0s = (C * (G3 - G4))
a1s = ((- G4) * (G2 + G3))
T = (1.0 / fs)
wc = (G1 / C)
c = (wc / np.tan(((wc * T) / 2.0)))
aU = np.zeros(2)
bU = np.zeros(2)
a0 = ((a0s * c) + a1s)
aU[0] = (a0 / a0)
aU[1] = ((((- a0s) * c) + a1s) / a0)
bU[0] = (((b0s * c) + b1s) / a0)
bU[1] = ((((- b0s) * c) + b1s) / a0)
a = np.array([1.0, (1.0 / aU[1])])
b = np.array([(bU[0] / aU[1]), (bU[1] / aU[1])])
print(np.abs(np.roots(a)))
print(np.abs(np.roots(b)))
(w, h) = signal.freqz(bU, aU, worN=np.logspace(0, 4.3, 1000), fs=fs)
plt.semilogx(w, (20 * np.log10(abs(h))), '--') |
class Pix2PixHDGenerator(BaseNetwork):
def modify_commandline_options(parser, is_train):
parser.add_argument('--resnet_n_downsample', type=int, default=4, help='number of downsampling layers in netG')
parser.add_argument('--resnet_n_blocks', type=int, default=9, help='number of residual blocks in the global generator network')
parser.add_argument('--resnet_kernel_size', type=int, default=3, help='kernel size of the resnet block')
parser.add_argument('--resnet_initial_kernel_size', type=int, default=7, help='kernel size of the first convolution')
parser.set_defaults(norm_G='instance')
return parser
def __init__(self, opt):
super().__init__()
input_nc = ((opt.label_nc + (1 if opt.contain_dontcare_label else 0)) + (0 if opt.no_instance else 1))
norm_layer = get_nonspade_norm_layer(opt, opt.norm_G)
activation = nn.ReLU(False)
model = []
model += [nn.ReflectionPad2d((opt.resnet_initial_kernel_size // 2)), norm_layer(nn.Conv2d(input_nc, opt.ngf, kernel_size=opt.resnet_initial_kernel_size, padding=0)), activation]
mult = 1
for i in range(opt.resnet_n_downsample):
model += [norm_layer(nn.Conv2d((opt.ngf * mult), ((opt.ngf * mult) * 2), kernel_size=3, stride=2, padding=1)), activation]
mult *= 2
for i in range(opt.resnet_n_blocks):
model += [ResnetBlock((opt.ngf * mult), norm_layer=norm_layer, activation=activation, kernel_size=opt.resnet_kernel_size)]
for i in range(opt.resnet_n_downsample):
nc_in = int((opt.ngf * mult))
nc_out = int(((opt.ngf * mult) / 2))
model += [norm_layer(nn.ConvTranspose2d(nc_in, nc_out, kernel_size=3, stride=2, padding=1, output_padding=1)), activation]
mult = (mult // 2)
model += [nn.ReflectionPad2d(3), nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, z=None):
return self.model(input) |
def shufflenet_v2_mpncov_x0_5(pretrained=False, progress=True, **kwargs):
return _shufflenetv2_mpncov('shufflenetv2_mpncov_x0.5', pretrained, progress, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs) |
def timm_rn101(**kwargs):
default_kwargs = {}
default_kwargs.update(**kwargs)
return Myrn101(**default_kwargs) |
class TestToArray(unittest.TestCase):
((platform.system().lower() == 'windows'), 'not support mxnet on windows yet')
def testParse(self):
random_array = (np.random.random_sample([10, 10, 3]) * 255)
random_array = random_array.astype(np.uint8)
img1 = Image.fromarray(random_array)
onnx_transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
onnx_parse = onnx_transforms['ToArray']()
(img, _) = onnx_parse((img1, None))
self.assertTrue(isinstance(img, np.ndarray))
mxnet_transforms = TRANSFORMS('mxnet', 'preprocess')
mxnet_parse = mxnet_transforms['ToArray']()
(img, _) = mxnet_parse((mx.nd.array(random_array), None))
self.assertTrue(isinstance(img, np.ndarray))
self.assertRaises(ValueError, mxnet_parse, ([1, 2], None)) |
_registry
class AutoMixedPrecisionTuneStrategy(TuneStrategy):
def _initialize_config(self, conf):
config = conf.mixed_precision
config.approach = getattr(config, 'approach', None)
config.recipes = getattr(config, 'recipes', {})
config.calibration_sampling_size = getattr(config, 'calibration_sampling_size', [0])
config.op_type_dict = getattr(config, 'op_type_dict', None)
config.op_name_dict = getattr(config, 'op_name_dict', None)
config.quant_format = getattr(config, 'quant_format', '')
config.domain = getattr(config, 'domain', None)
config.reduce_range = getattr(config, 'reduce_range', None)
config.example_inputs = getattr(config, 'example_inputs', None)
config.quant_level = getattr(config, 'quant_level', 'auto')
return config
def next_tune_cfg(self):
from copy import deepcopy
target_dtypes = self.config.precisions
target_dtypes = list((set(target_dtypes) - set(['fp32'])))
tuning_space = self.tuning_space
initial_op_tuning_cfg = {}
for item in tuning_space.root_item.options:
if (item.item_type == 'op'):
(op_name, op_type) = item.name
initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, 'fp32', tuning_space)
if (not target_dtypes):
target_dtypes = ['bf16']
lower_precision_items_name = []
op_tuning_cfg = {}
for (idx, target_dtype) in enumerate(target_dtypes):
lower_precision_items = tuning_space.query_items_by_quant_mode(target_dtype)
if ((len(lower_precision_items) == 0) and (not ((idx == (len(target_dtypes) - 1)) and (len(lower_precision_items_name) == 0)))):
continue
lower_precision_items_name = [item.name for item in lower_precision_items]
op_tuning_cfg = deepcopy(initial_op_tuning_cfg)
for op_name_type in lower_precision_items_name:
op_tuning_cfg[op_name_type] = OpTuningConfig(op_name_type[0], op_name_type[1], target_dtype, tuning_space)
calib_sampling_size = 1
op_tuning_cfg['calib_sampling_size'] = calib_sampling_size
(yield op_tuning_cfg)
target_dtype = 'fp32'
fallback_items_name_lst = lower_precision_items_name[::(- 1)]
if fallback_items_name_lst:
logger.info('[Strategy] start fallback op into fp32.')
initial_op_tuning_cfg = deepcopy(op_tuning_cfg)
if (self.config.quant_level in ['auto', 0]):
logger.info(f'[Strategy] fallback op into fp32 in op type wise, as quant level is {self.config.quant_level}')
for op_tuning_cfg in self.fallback_in_op_type_wise(tuning_space, fallback_items_name_lst, deepcopy(initial_op_tuning_cfg), target_dtype):
(yield op_tuning_cfg)
if (self.config.quant_level in ['auto', 1]):
logger.info(f'[Strategy] fallback op into fp32 in op wise, as quant level is {self.config.quant_level}')
for op_tuning_cfg in self.fallback_in_op_wise(tuning_space, fallback_items_name_lst, deepcopy(initial_op_tuning_cfg), target_dtype):
(yield op_tuning_cfg)
def fallback_in_op_type_wise(self, tuning_space, fallback_items_name_lst, initial_op_tuning_cfg, target_dtype):
fallback_items_name_lst.sort(key=(lambda x: x[1]))
op_type_groups = groupby(fallback_items_name_lst, key=(lambda x: x[1]))
ops_dtypes = OrderedDict()
for (op_type, op_lst) in op_type_groups:
ops_dtypes[tuple(op_lst)] = target_dtype
fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], initial_op_tuning_cfg=initial_op_tuning_cfg, op_dtypes=ops_dtypes, accumulate=False)
op_fallback_acc_impact = OrderedDict()
for (op_index, op_tuning_cfg) in enumerate(fallback_sampler):
op_tuning_cfg['calib_sampling_size'] = (- 1)
(yield op_tuning_cfg)
(acc, _) = self.last_tune_result
op_fallback_acc_impact[fallback_items_name_lst[op_index]] = acc
def fallback_in_op_wise(self, tuning_space, fallback_items_name_lst, initial_op_tuning_cfg, target_dtype):
op_dtypes = OrderedDict(zip(fallback_items_name_lst, ([target_dtype] * len(fallback_items_name_lst))))
fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], initial_op_tuning_cfg=initial_op_tuning_cfg, op_dtypes=op_dtypes, accumulate=False)
op_fallback_acc_impact = OrderedDict()
for (op_index, op_tuning_cfg) in enumerate(fallback_sampler):
op_tuning_cfg['calib_sampling_size'] = (- 1)
(yield op_tuning_cfg)
(acc, _) = self.last_tune_result
op_fallback_acc_impact[fallback_items_name_lst[op_index]] = acc
if (len(op_fallback_acc_impact) > 0):
ordered_ops = sorted(op_fallback_acc_impact.keys(), key=(lambda key: op_fallback_acc_impact[key]), reverse=self.higher_is_better)
op_dtypes = OrderedDict(zip(ordered_ops, ([target_dtype] * len(fallback_items_name_lst))))
logger.info('Start to accumulate fallback to {target_dtype}.')
initial_op_tuning_cfg = copy.deepcopy(op_tuning_cfg)
fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], initial_op_tuning_cfg=initial_op_tuning_cfg, op_dtypes=op_dtypes, accumulate=True)
for op_tuning_cfg in fallback_sampler:
op_tuning_cfg['calib_sampling_size'] = (- 1)
(yield op_tuning_cfg)
def traverse(self):
if (self.config.backend == 'ipex'):
self.best_qmodel = ipex_mixed_precision(self.model, self.config.example_inputs, self.config.device)
if (self.eval_dataloader or self.eval_func):
self._evaluate(self.best_qmodel)
return
self._prepare_tuning()
for op_tuning_cfg in self.next_tune_cfg():
tune_cfg = self._tune_cfg_converter(op_tuning_cfg)
self.trials_count += 1
tuning_history = self._find_tuning_history(tune_cfg)
if (tuning_history and (self.trials_count < self.config.tuning_criterion.max_trials)):
self.last_tune_result = tuning_history['last_tune_result']
self.best_tune_result = tuning_history['best_tune_result']
logger.warn('Find evaluated tuning config, skip.')
continue
logger.debug('Dump current mixed precision configuration:')
logger.debug(tune_cfg)
self.last_qmodel = self.adaptor.quantize(tune_cfg, self.model, self.calib_dataloader, self.q_func)
assert self.last_qmodel
if self._not_tuning:
self.best_qmodel = self.last_qmodel
self._add_tuning_history(copy.deepcopy(tune_cfg), ((- 1), [0]), q_config=self.last_qmodel.q_config)
return
self.last_tune_cfg = copy.deepcopy(tune_cfg)
if (self.eval_dataloader or self.eval_func):
q_config = copy.deepcopy(self.last_qmodel.q_config)
self.last_tune_result = self._evaluate(self.last_qmodel)
(self.cur_best_acc, self.cur_best_tuning_cfg) = self.update_best_op_tuning_cfg(op_tuning_cfg)
need_stop = self.stop(self.config.tuning_criterion.timeout, self.trials_count)
saved_tune_cfg = copy.deepcopy(tune_cfg)
saved_last_tune_result = copy.deepcopy(self.last_tune_result)
self._add_tuning_history(saved_tune_cfg, saved_last_tune_result, q_config=q_config)
else:
self.best_qmodel = self.last_qmodel
need_stop = True
if need_stop:
break |
def main():
args = parse(sys.argv[1:])
out_dir = os.path.join(args.results_dir, args.run)
common.ensure_directories(out_dir)
urbansound8k.maybe_download_dataset(args.datasets_dir)
data = urbansound8k.load_dataset()
folds = urbansound8k.folds(data)
exsettings = common.load_settings_path(args.settings_path)
frames = exsettings['frames']
voting = exsettings['voting']
overlap = exsettings['voting_overlap']
settings = features.settings(exsettings)
def load_sample(sample):
return features.load_sample(sample, settings, start_time=sample.start, window_frames=frames, feature_dir=args.features_dir, normalize=exsettings['normalize'])
def predict(model, data):
return features.predict_voted(exsettings, model, data, loader=load_sample, method=voting, overlap=overlap)
history = load_train_history(args.models_dir, args.run)
n_folds = len(history.fold.unique())
n_experiments = len(history.experiment.unique())
print('Found {} experiments across {} folds', n_folds, n_experiments)
best = pick_best(history)
print('Best models\n', best[['epoch', 'fold', 'voted_val_acc']])
print('Computing model info')
def get_stats(row):
ex = row.iloc[0]
model = ex['model_path']
(model_stats, layer_info) = stats.model_info(model)
layer_info_path = os.path.join(out_dir, '{}.layers.csv'.format(ex['experiment']))
layer_info.to_csv(layer_info_path)
return pandas.Series(model_stats)
if (not args.skip_stats):
model_stats = best.groupby(level='experiment').apply(get_stats)
print('Model stats\n', model_stats)
model_stats.to_csv(os.path.join(out_dir, 'stm32stats.csv'))
print('Testing models...')
results = evaluate(best, folds, predictor=predict, out_dir=out_dir, dry_run=args.check) |
class Dumper():
def __init__(self, dumping_path=None, dic={}):
self.dumping_path = dumping_path
self.dic = dic
def dump(self, dict_to_dump=None, dumping_path=None):
if (dumping_path is None):
dumping_path = self.dumping_path
if (dumping_path is None):
raise ValueError('Where should I dump ? No dump_path provided')
if (dict_to_dump is None):
dict_to_dump = self.dic
j = json.dumps(dict_to_dump, indent=4)
with open(dumping_path, 'w') as f:
print(j, file=f)
def load(self, file_to_read, update=False):
with open(file_to_read, 'r') as f:
json_dict = json.load(f)
if update:
self.dic.update(json_dict)
return json_dict |
def add_panoptic_deeplab_config(cfg):
add_deeplab_config(cfg)
cfg.INPUT.GAUSSIAN_SIGMA = 10
cfg.INPUT.IGNORE_STUFF_IN_OFFSET = True
cfg.INPUT.SMALL_INSTANCE_AREA = 4096
cfg.INPUT.SMALL_INSTANCE_WEIGHT = 3
cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC = False
cfg.SOLVER.OPTIMIZER = 'ADAM'
cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS = 256
cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K = 0.2
cfg.MODEL.INS_EMBED_HEAD = CN()
cfg.MODEL.INS_EMBED_HEAD.NAME = 'PanopticDeepLabInsEmbedHead'
cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES = ['res2', 'res3', 'res5']
cfg.MODEL.INS_EMBED_HEAD.PROJECT_FEATURES = ['res2', 'res3']
cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS = [32, 64]
cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS = 256
cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS = [6, 12, 18]
cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT = 0.1
cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS = 32
cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM = 128
cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE = 4
cfg.MODEL.INS_EMBED_HEAD.NORM = 'SyncBN'
cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT = 200.0
cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT = 0.01
cfg.MODEL.PANOPTIC_DEEPLAB = CN()
cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 2048
cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD = 0.1
cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL = 7
cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE = 200
cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES = True
cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV = False
cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY = (- 1)
cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED = False |
def get_non_dominated_solutions(solutions: List[Solution]) -> List[Solution]:
archive: Archive = NonDominatedSolutionsArchive()
for solution in solutions:
archive.add(solution)
return archive.solution_list |
def parallel(func, arr: Collection, max_workers: int=None, leave=False):
max_workers = ifnone(max_workers, defaults.cpus)
if (max_workers < 2):
results = [func(o, i) for (i, o) in progress_bar(enumerate(arr), total=len(arr), leave=leave)]
else:
with ProcessPoolExecutor(max_workers=max_workers) as ex:
futures = [ex.submit(func, o, i) for (i, o) in enumerate(arr)]
results = []
for f in progress_bar(concurrent.futures.as_completed(futures), total=len(arr), leave=leave):
results.append(f.result())
if any([(o is not None) for o in results]):
return results |
class DiscourseUnit():
def __init__(self, unq_idx, sent_idx, rel_start, rel_end):
self.unq_idx = unq_idx
self.sent_idx = sent_idx
self.original_start_in_sent = rel_start
self.original_end_in_sent = rel_end
self.raw_words = []
self.bert_word_pieces = []
self.mentions = []
self.corefs = []
def add_dep_info(self, tree_node):
self.disco_id = (tree_node[0] - 1)
assert (self.disco_id == self.unq_idx)
self.disco_type = tree_node[1]
self.disco_rel = tree_node[2]
self.disco_dep = (tree_node[3] - 1)
def add_dep(self, all_deps):
avail_deps = [x for x in all_deps if ((x[0] - 1) == self.unq_idx)]
self.dep = [(y[1] - 1) for y in avail_deps]
def get_readable_words_as_list(self):
return self.raw_words
def get_readable_words_as_str(self):
return ' '.join(self.raw_words)
def get_bpe_only(self):
return self.bert_word_pieces
def add_word(self, word_to_add, tokenizer=glob_bert_tokenizer):
self.raw_words.append(word_to_add)
self.bert_word_pieces += tokenizer.tokenize(word_to_add)
def add_mention(self, word_index):
self.mentions.append('{}_{}'.format(self.sent_idx, word_index))
def add_coref(self, coref_list):
s = coref_list[0]
w = coref_list[1]
_t = '{}_{}'.format(s, w)
if (_t not in self.mentions):
self.corefs.append(_t)
def get_original_length(self):
return len(self.raw_words)
def get_bert_wp_length(self):
return len(self.bert_word_pieces)
def get_original_location_sent(self):
return self.sent_idx
def respond_broadcast(self, sent_idx, word_idx) -> bool:
if ((self.sent_idx == sent_idx) and (word_idx <= self.original_end_in_sent) and (word_idx >= self.original_start_in_sent)):
return True
else:
return False |
def get_results(output_dir, split='eval'):
path = os.path.join(output_dir, f'{split}_results.json')
if os.path.exists(path):
with open(path, 'r') as f:
return json.load(f)
raise ValueError(f"can't find {path}") |
def vis_keypoints(img, kps, alpha=1):
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, (len(kps) + 2))]
colors = [((c[2] * 255), (c[1] * 255), (c[0] * 255)) for c in colors]
kp_mask = np.copy(img)
for i in range(len(kps)):
p = (kps[i][0].astype(np.int32), kps[i][1].astype(np.int32))
cv2.circle(kp_mask, p, radius=3, color=colors[i], thickness=(- 1), lineType=cv2.LINE_AA)
return cv2.addWeighted(img, (1.0 - alpha), kp_mask, alpha, 0) |
class WindFieldClass(ABC):
def __init__(self, np_random: (None | np.random.RandomState)=None):
self.np_random = (np.random.RandomState() if (np_random is None) else np_random)
def __call__(self, time: float, position: np.ndarray) -> np.ndarray:
pass
def _check_wind_field_validity(wind_field):
test_velocity = wind_field(0.0, np.array(([[0.0, 0.0, 1.0]] * 5)))
assert isinstance(test_velocity, np.ndarray), f'Returned wind velocity must be a np.ndarray, got {type(test_velocity)}.'
assert np.issubdtype(test_velocity.dtype, np.floating), f'Returned wind velocity must be type float, got {test_velocity.dtype}.'
assert (test_velocity.shape == (5, 3)), f'Returned wind velocity must be array of shape (n, 3), got (n+({(test_velocity.shape[0] - 5)}), {test_velocity.shape[1:]}).' |
class StratifiedBootstrap(BaseShuffleSplit):
def __init__(self, n_splits: int=5, test_size: float=0.5, train_size: Optional[float]=None, random_state: Optional[Union[(int, RandomState)]]=None):
super().__init__(n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state)
def _iter_indices(self, X: np.ndarray, y: np.ndarray, groups: Optional[np.ndarray]=None):
y_labels = np.unique(y)
y_inds = [np.where((y == t_y))[0] for t_y in y_labels]
n_samples = [_validate_shuffle_split(len(t_inds), self.test_size, self.train_size, default_test_size=self._default_test_size) for t_inds in y_inds]
for _ in range(self.n_splits):
train = []
test = []
for (t_inds, (n_train, _)) in zip(y_inds, n_samples):
bs_inds = np.random.choice(t_inds, len(t_inds), replace=True)
train.extend(bs_inds[:n_train])
test.extend(bs_inds[n_train:])
(yield (train, test))
def split(self, X: np.ndarray, y: np.ndarray, groups: Optional[np.ndarray]=None):
return super().split(X, y, groups) |
def _logssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=channel)
mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d((img1 * img1), window, padding=(window_size // 2), groups=channel) - mu1_sq)
sigma2_sq = (F.conv2d((img2 * img2), window, padding=(window_size // 2), groups=channel) - mu2_sq)
sigma12 = (F.conv2d((img1 * img2), window, padding=(window_size // 2), groups=channel) - mu1_mu2)
C1 = (0.01 ** 2)
C2 = (0.03 ** 2)
ssim_map = ((((2 * mu1_mu2) + C1) * ((2 * sigma12) + C2)) / (((mu1_sq + mu2_sq) + C1) * ((sigma1_sq + sigma2_sq) + C2)))
ssim_map = ((ssim_map - torch.min(ssim_map)) / (torch.max(ssim_map) - torch.min(ssim_map)))
ssim_map = (- torch.log((ssim_map + 1e-08)))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1) |
def return_sets(list_of_neighbors, k, stats, u_rels):
each_set = defaultdict(set)
for neigh in list_of_neighbors:
each_set[neigh['rts']].add((neigh['e'], neigh['s']))
for (key, v) in each_set.items():
v_l = list(v)
np.random.shuffle(v_l)
for (end, start) in v_l[:k]:
stats[end][key] += 1
u_rels[end].append((key, start))
each_set = None |
class TestTransaction(unittest.TestCase):
def test_init(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
transaction1 = Transaction(row1, header1, ('Class', 0))
transaction2 = UniqueTransaction(row1, header1, ('Class', 0))
def test_getclass(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
transaction1 = Transaction(row1, header1, ('Class', 0))
assert (transaction1.getclass() == ('Class', 0))
def test_unique_hash(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
transaction2 = UniqueTransaction(row1, header1, ('Class', 0))
(hash(transaction2) == hash(transaction2.tid))
def test_getitem(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
transaction1 = Transaction(row1, header1, ('Class', 0))
assert (transaction1[0] == Item('A', 1))
assert (transaction1[1] == Item('B', 1))
assert (transaction1[2] == Item('C', 0))
def test_hash(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
row2 = [1, 1, 0]
header2 = ['A', 'B', 'C']
row3 = [1, 1, 1]
header3 = 'cde'
transaction1 = Transaction(row1, header1, ('Class', 0))
transaction2 = Transaction(row2, header2, ('Class', 0))
transaction3 = Transaction(row3, header3, ('Class', 2))
assert (transaction1 == transaction2)
assert (transaction1 != transaction3)
assert (transaction2 != transaction3)
def test_string_items(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
transaction1 = Transaction(row1, header1, ('Y', 0))
assert (transaction1.string_items == ['A:=:1', 'B:=:1', 'C:=:0', 'Y:=:0']) |
class TransformerDecoderLayer(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.quant_noise = getattr(args, 'quant_noise_pq', 0)
self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8)
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.self_attn = self.build_self_attention(self.embed_dim, args, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn)
self.activation_fn = utils.get_activation_fn(activation=(str(args.activation_fn) if (getattr(args, 'activation_fn', None) is not None) else 'relu'))
activation_dropout_p = (getattr(args, 'activation_dropout', 0) or 0)
if (activation_dropout_p == 0):
activation_dropout_p = (getattr(args, 'relu_dropout', 0) or 0)
self.activation_dropout_module = FairseqDropout(float(activation_dropout_p), module_name=self.__class__.__name__)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size)
self.fc2 = self.build_fc2(args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=(not getattr(args, 'cross_self_attention', False)), q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return (residual + x)
def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, prev_self_attn_state: Optional[List[torch.Tensor]]=None, prev_attn_state: Optional[List[torch.Tensor]]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False):
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if (prev_self_attn_state is not None):
(prev_key, prev_value) = prev_self_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_self_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_self_attn_state[2]
assert (incremental_state is not None)
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if (self.cross_self_attention and (not ((incremental_state is not None) and (_self_attn_input_buffer is not None) and ('prev_key' in _self_attn_input_buffer)))):
if (self_attn_mask is not None):
assert (encoder_out is not None)
self_attn_mask = torch.cat((x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1)
if (self_attn_padding_mask is not None):
if (encoder_padding_mask is None):
assert (encoder_out is not None)
encoder_padding_mask = self_attn_padding_mask.new_zeros(encoder_out.size(1), encoder_out.size(0))
self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1)
assert (encoder_out is not None)
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
(x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
if ((self.encoder_attn is not None) and (encoder_out is not None)):
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if (prev_attn_state is not None):
(prev_key, prev_value) = prev_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_attn_state[2]
assert (incremental_state is not None)
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
(x, attn) = self.encoder_attn(query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if (not self.normalize_before):
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if (not self.normalize_before):
x = self.final_layer_norm(x)
if (self.onnx_trace and (incremental_state is not None)):
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert (saved_state is not None)
if (self_attn_padding_mask is not None):
self_attn_state = [saved_state['prev_key'], saved_state['prev_value'], saved_state['prev_key_padding_mask']]
else:
self_attn_state = [saved_state['prev_key'], saved_state['prev_value']]
return (x, attn, self_attn_state)
return (x, attn, None)
def make_generation_fast_(self, need_attn: bool=False, **kwargs):
self.need_attn = need_attn |
def load_images(image_paths, image_size, image_names):
loaded_images = []
loaded_image_paths = []
for (i, img_path) in enumerate(image_paths):
try:
image = load_img(img_path, target_size=image_size)
image = keras.preprocessing.image.img_to_array(image)
image /= 255
loaded_images.append(image)
loaded_image_paths.append(image_names[i])
except Exception as ex:
logging.exception('Error reading {} {}'.format(img_path, ex), exc_info=True)
return (np.asarray(loaded_images), loaded_image_paths) |
class StochasticDepth(layers.Layer):
def __init__(self, drop_path_rate, **kwargs):
super().__init__(**kwargs)
self.drop_path_rate = drop_path_rate
def call(self, x, training=None):
if training:
keep_prob = (1 - self.drop_path_rate)
shape = ((tf.shape(x)[0],) + ((1,) * (len(tf.shape(x)) - 1)))
random_tensor = (keep_prob + tf.random.uniform(shape, 0, 1))
random_tensor = tf.floor(random_tensor)
return ((x / keep_prob) * random_tensor)
return x
def get_config(self):
config = super().get_config()
config.update({'drop_path_rate': self.drop_path_rate})
return config |
def reverse(tensor):
idx = [i for i in range((tensor.size(0) - 1), (- 1), (- 1))]
return tensor[idx] |
(name='Cohere', emoji='', models=['command', 'command-nightly', 'command-light', 'command-light-nightly'], rate_limit='sequential', settings_schema=COHERE_SETTINGS_SCHEMA)
def CohereCompletion(prompt: str, model: str, temperature: float=0.75, **kwargs) -> str:
print(f"Calling Cohere model {model} with prompt '{prompt}'...")
response = co.generate(model=model, prompt=prompt, temperature=temperature, **kwargs)
return response.generations[0].text |
class BarlowTwins(DCCA):
def __init__(self, *args, lamb=0.005, **kwargs):
super().__init__(*args, **kwargs)
self.lamb = lamb
self.bns = torch.nn.ModuleList([torch.nn.BatchNorm1d(self.latent_dimensions, affine=False) for _ in self.encoders])
def forward(self, views, **kwargs):
z = []
for (i, (encoder, bn)) in enumerate(zip(self.encoders, self.bns)):
z.append(bn(encoder(views[i])))
return z
def loss(self, batch, **kwargs):
z = self(batch['views'])
cross_cov = ((z[0].T z[1]) / z[0].shape[0])
invariance = torch.sum(torch.pow((1 - torch.diag(cross_cov)), 2))
covariance = (torch.sum(torch.triu(torch.pow(cross_cov, 2), diagonal=1)) + torch.sum(torch.tril(torch.pow(cross_cov, 2), diagonal=(- 1))))
return {'objective': (invariance + (self.lamb * covariance)), 'invariance': invariance, 'covariance': covariance} |
def make_sub_graph(node, inits, input_data, output_data, reduce_range, opset, ir_version):
from onnx import TensorProto, helper, numpy_helper
input = helper.make_tensor_value_info(node.input[0], dtype_map[input_data.dtype], input_data.shape)
output = helper.make_tensor_value_info(node.output[0], dtype_map[output_data.dtype], output_data.shape)
graph = helper.make_graph([node], 'sub_graph', [input], [output], inits)
model = helper.make_model(graph, opset_imports=opset)
model.ir_version = ir_version
return model |
def _shuffle_split(path, output_path, dataset_info, split, seed):
assert os.path.exists(os.path.join(path, f"{dataset_info['name']}-{split}.index"))
_cache()
def _get_shard_index(idx):
index_file = os.path.join(path, f"{dataset_info['name']}-{split}-{idx:06d}-of-{dataset_info[f'{split}_size']:06d}.index")
tfrecord_file = os.path.join(path, f"{dataset_info['name']}-{split}-{idx:06d}-of-{dataset_info[f'{split}_size']:06d}.tfrecord")
if (not os.path.exists(index_file)):
build_shard_index(tfrecord_file, index_file)
return [tuple(map(int, x.rstrip('\n').split(' '))) for x in open(index_file, 'r')]
index = (x.rstrip('\n').split(' ') for x in open(os.path.join(path, f"{dataset_info['name']}-{split}.index"), 'r'))
index = ((i, int(shard.lstrip('0')), int(seq_len)) for (i, (shard, seq_len)) in enumerate(index))
index = (list(x) for (_, x) in groupby(index, key=(lambda x: x[1])))
index = [(x + (i,)) for split in index for (i, x) in enumerate(split)]
os.makedirs(output_path, exist_ok=True)
rng = Random(seed)
rng.shuffle(index)
max_images_per_shard = dataset_info[f'{split}_max_images_per_shard']
max_sequences_per_shard = dataset_info[f'{split}_max_sequences_per_shard']
output_index = []
current_shard_id = 0
current_shard_len = 0
current_shard_seq = 0
for (_, _, seq_len, shard_local_id) in index:
output_index.append(((current_shard_id + 1), seq_len))
current_shard_len += seq_len
current_shard_seq += 1
if (((max_sequences_per_shard is not None) and (current_shard_seq >= max_sequences_per_shard)) or ((max_images_per_shard is not None) and (current_shard_len >= max_images_per_shard))):
current_shard_id += 1
current_shard_len = 0
current_shard_seq = 0
output_stream = tqdm(zip(output_index, index), desc=f'shuffling {split}', total=len(index))
output_offset = 0
for (shard_id, sequences) in groupby(output_stream, key=(lambda x: x[0][0])):
tfrecord_file = os.path.join(output_path, f"{dataset_info['name']}-{split}-{shard_id:06d}-of-{dataset_info[f'{split}_size']:06d}.tfrecord")
index_file = os.path.join(output_path, f"{dataset_info['name']}-{split}-{shard_id:06d}-of-{dataset_info[f'{split}_size']:06d}.index")
with open(tfrecord_file, 'wb+') as tf_f, open(index_file, 'w+') as index_f:
for ((_, seq_len), (_, i_shard_id, _, i_shard_local_id)) in sequences:
index_f.write(f'''{shard_id:06d} {seq_len}
''')
with open(os.path.join(path, f"{dataset_info['name']}-{split}-{i_shard_id:06d}-of-{dataset_info[f'{split}_size']:06d}.tfrecord"), 'rb') as input_f:
(start, record_len) = _get_shard_index(i_shard_id)[i_shard_local_id]
input_f.seek(start)
record_bytes = input_f.read(record_len)
index_f.write(f'''{output_offset} {record_len}
''')
tf_f.write(record_bytes)
output_offset += record_len
if (shard_id != dataset_info[f'{split}_size']):
dataset_info[f'{split}_size'] = shard_id
with open(os.path.join(output_path, 'info.json'), 'w') as info_f:
json.dump(dataset_info, info_f) |
class PickleWidget():
def __init__(self, viz):
self.viz = viz
self.search_dirs = []
self.cur_pkl = None
self.user_pkl = ''
self.recent_pkls = []
self.browse_cache = dict()
self.browse_refocus = False
self.load('', ignore_errors=True)
def add_recent(self, pkl, ignore_errors=False):
try:
resolved = self.resolve_pkl(pkl)
if (resolved not in self.recent_pkls):
self.recent_pkls.append(resolved)
except:
if (not ignore_errors):
raise
def load(self, pkl, ignore_errors=False):
viz = self.viz
viz.clear_result()
viz.skip_frame()
try:
resolved = self.resolve_pkl(pkl)
name = resolved.replace('\\', '/').split('/')[(- 1)]
self.cur_pkl = resolved
self.user_pkl = resolved
viz.result.message = f'Loading {name}...'
viz.defer_rendering()
if (resolved in self.recent_pkls):
self.recent_pkls.remove(resolved)
self.recent_pkls.insert(0, resolved)
except:
self.cur_pkl = None
self.user_pkl = pkl
if (pkl == ''):
viz.result = dnnlib.EasyDict(message='No network pickle loaded')
else:
viz.result = dnnlib.EasyDict(error=renderer.CapturedException())
if (not ignore_errors):
raise
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
recent_pkls = [pkl for pkl in self.recent_pkls if (pkl != self.user_pkl)]
if show:
imgui.text('Pickle')
imgui.same_line(viz.label_w)
(changed, self.user_pkl) = imgui_utils.input_text('##pkl', self.user_pkl, 1024, flags=(imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE), width=(((- 1) - (viz.button_w * 2)) - (viz.spacing * 2)), help_text='<PATH> | <URL> | <RUN_DIR> | <RUN_ID> | <RUN_ID>/<KIMG>.pkl')
if changed:
self.load(self.user_pkl, ignore_errors=True)
if (imgui.is_item_hovered() and (not imgui.is_item_active()) and (self.user_pkl != '')):
imgui.set_tooltip(self.user_pkl)
imgui.same_line()
if imgui_utils.button('Recent...', width=viz.button_w, enabled=(len(recent_pkls) != 0)):
imgui.open_popup('recent_pkls_popup')
imgui.same_line()
if imgui_utils.button('Browse...', enabled=(len(self.search_dirs) > 0), width=(- 1)):
imgui.open_popup('browse_pkls_popup')
self.browse_cache.clear()
self.browse_refocus = True
if imgui.begin_popup('recent_pkls_popup'):
for pkl in recent_pkls:
(clicked, _state) = imgui.menu_item(pkl)
if clicked:
self.load(pkl, ignore_errors=True)
imgui.end_popup()
if imgui.begin_popup('browse_pkls_popup'):
def recurse(parents):
key = tuple(parents)
items = self.browse_cache.get(key, None)
if (items is None):
items = self.list_runs_and_pkls(parents)
self.browse_cache[key] = items
for item in items:
if ((item.type == 'run') and imgui.begin_menu(item.name)):
recurse([item.path])
imgui.end_menu()
if (item.type == 'pkl'):
(clicked, _state) = imgui.menu_item(item.name)
if clicked:
self.load(item.path, ignore_errors=True)
if (len(items) == 0):
with imgui_utils.grayed_out():
imgui.menu_item('No results found')
recurse(self.search_dirs)
if self.browse_refocus:
imgui.set_scroll_here()
viz.skip_frame()
self.browse_refocus = False
imgui.end_popup()
paths = viz.pop_drag_and_drop_paths()
if ((paths is not None) and (len(paths) >= 1)):
self.load(paths[0], ignore_errors=True)
viz.args.pkl = self.cur_pkl
def list_runs_and_pkls(self, parents):
items = []
run_regex = re.compile('\\d+-.*')
pkl_regex = re.compile('network-snapshot-\\d+\\.pkl')
for parent in set(parents):
if os.path.isdir(parent):
for entry in os.scandir(parent):
if (entry.is_dir() and run_regex.fullmatch(entry.name)):
items.append(dnnlib.EasyDict(type='run', name=entry.name, path=os.path.join(parent, entry.name)))
if (entry.is_file() and pkl_regex.fullmatch(entry.name)):
items.append(dnnlib.EasyDict(type='pkl', name=entry.name, path=os.path.join(parent, entry.name)))
items = sorted(items, key=(lambda item: (item.name.replace('_', ' '), item.path)))
return items
def resolve_pkl(self, pattern):
assert isinstance(pattern, str)
assert (pattern != '')
if dnnlib.util.is_url(pattern):
return pattern
path = _locate_results(pattern)
if os.path.isdir(path):
pkl_files = sorted(glob.glob(os.path.join(path, 'network-snapshot-*.pkl')))
if (len(pkl_files) == 0):
raise IOError(f'No network pickle found in "{path}"')
path = pkl_files[(- 1)]
path = os.path.abspath(path)
return path |
def unwrap_(*args: Any) -> Any:
return tuple(((t.raw if isinstance(t, Tensor) else t) for t in args)) |
class LockedDropout(nn.Module):
def __init__(self, dropout=None):
super().__init__()
self.dropout = dropout
def forward(self, x):
if ((not self.training) or (not self.dropout)):
return x
m = x.data.new(1, *x.size()[1:]).bernoulli_((1 - self.dropout))
mask = (Variable(m, requires_grad=False) / (1 - self.dropout))
mask = mask.expand_as(x)
return (mask * x) |
def main():
args = parse_args()
dist_world_size = (args.nproc_per_node * args.nnodes)
current_env = os.environ.copy()
current_env['MASTER_ADDR'] = args.master_addr
current_env['MASTER_PORT'] = str(args.master_port)
current_env['WORLD_SIZE'] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
dist_rank = ((args.nproc_per_node * args.node_rank) + local_rank)
current_env['RANK'] = str(dist_rank)
current_env['LOCAL_RANK'] = str(local_rank)
cmd = ([args.training_script] + args.training_script_args)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=process.args) |
def get_parser():
parser = argparse.ArgumentParser(description='GRA Transformer')
parser.add_argument('--work-dir', default='./work_dir/temp', help='the work folder for storing results')
parser.add_argument('-model_saved_name', default='')
parser.add_argument('--config', default='./config/nturgbd-cross-view/test_bone.yaml', help='path to the configuration file')
parser.add_argument('--phase', default='train', help='must be train or test')
parser.add_argument('--save-score', type=str2bool, default=False, help='if ture, the classification score will be stored')
parser.add_argument('--joint-label', type=list, default=[], help='tells which group each joint belongs to')
parser.add_argument('--seed', type=int, default=2, help='random seed for pytorch')
parser.add_argument('--log-interval', type=int, default=100, help='the interval for printing messages (#iteration)')
parser.add_argument('--save-interval', type=int, default=1, help='the interval for storing models (#iteration)')
parser.add_argument('--save-epoch', type=int, default=10, help='the start epoch to save model (#iteration)')
parser.add_argument('--eval-interval', type=int, default=5, help='the interval for evaluating models (#iteration)')
parser.add_argument('--ema', action='store_true', default=False, help='ema weight for eval')
parser.add_argument('--lambda_1', type=float, default=0.0001)
parser.add_argument('--lambda_2', type=float, default=0.1)
parser.add_argument('--print-log', type=str2bool, default=True, help='print logging or not')
parser.add_argument('--show-topk', type=int, default=[1, 5], nargs='+', help='which Top K accuracy will be shown')
parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument('--num-worker', type=int, default=48, help='the number of worker for data loader')
parser.add_argument('--train-feeder-args', action=DictAction, default=dict(), help='the arguments of data loader for training')
parser.add_argument('--test-feeder-args', action=DictAction, default=dict(), help='the arguments of data loader for test')
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument('--model-args', action=DictAction, default=dict(), help='the arguments of model')
parser.add_argument('--weights', default=None, help='the weights for network initialization')
parser.add_argument('--ignore-weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')
parser.add_argument('--base-lr', type=float, default=0.025, help='initial learning rate')
parser.add_argument('--step', type=int, default=[110, 120], nargs='+', help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--device', type=int, default=[0, 1], nargs='+', help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument('--momentum', type=float, default=0.9, help='nesterov momentum')
parser.add_argument('--batch-size', type=int, default=256, help='training batch size')
parser.add_argument('--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument('--start-epoch', type=int, default=0, help='start training from which epoch')
parser.add_argument('--num-epoch', type=int, default=80, help='stop training in which epoch')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay for optimizer')
parser.add_argument('--lr-decay-rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--warm_up_epoch', type=int, default=0)
return parser |
def test_summarize(model, X):
d1 = model.distributions[0]
d2 = model.distributions[1]
model.summarize(X)
assert_array_almost_equal(model._xw_sum, [0., 1.895243, 2.635099, 3.469392], 4)
assert_array_almost_equal(model._xw_starts_sum, [0.136405, 1.863595], 4)
assert_array_almost_equal(model._xw_ends_sum, [0.876264, 1.123736], 4)
assert_array_almost_equal(d1._w_sum, [2.771771, 2.771771, 2.771771], 4)
assert_array_almost_equal(d1._xw_sum, [5.403805, 2.901283, 0.006904], 4)
assert_array_almost_equal(d2._w_sum, [7.228225, 7.228225, 7.228225], 4)
assert_array_almost_equal(d2._xw_sum, [10.596191, 8.098713, 11.993094], 4) |
def get_example_inputs(model):
onnx_config_class = TasksManager.get_exporter_config_constructor(model_type=model.config.model_type, exporter='onnx', task='text2text-generation')
onnx_config = onnx_config_class(model.config, use_past=model.config.use_cache, use_past_in_inputs=model.config.use_cache)
encoder_onnx_config = onnx_config.with_behavior('encoder')
decoder_onnx_config = onnx_config.with_behavior('decoder', use_past=False)
decoder_with_past_onnx_config = onnx_config.with_behavior('decoder', use_past=True, use_past_in_inputs=model.config.use_cache)
encoder_dummy_inputs = encoder_onnx_config.generate_dummy_inputs(framework='pt')
decoder_dummy_inputs = decoder_onnx_config.generate_dummy_inputs(framework='pt')
decoder_dummy_inputs['encoder_outputs'] = tuple(decoder_dummy_inputs['encoder_outputs'][0:1])
del decoder_dummy_inputs['attention_mask']
decoder_with_past_dummy_inputs = decoder_with_past_onnx_config.generate_dummy_inputs(framework='pt')
decoder_with_past_dummy_inputs['encoder_outputs'] = tuple(decoder_with_past_dummy_inputs['encoder_outputs'][0:1])
decoder_with_past_dummy_inputs['past_key_values'] = tuple(decoder_with_past_dummy_inputs['past_key_values'])
del decoder_with_past_dummy_inputs['attention_mask']
return (encoder_dummy_inputs, decoder_dummy_inputs, decoder_with_past_dummy_inputs) |
def shutdown():
global world
Logger.print('Shutting down...')
world.shutdown()
return |
def get_map(image_id, save_dir=None, coco_class=None, dataset='pascal'):
if (dataset == 'pascal'):
img_name = ((image_id.split('_')[0] + '_') + image_id.split('_')[1])
image = imread(f'{image_path}/{img_name}.jpg')
elif (dataset == 'coco'):
image = imread(f'{image_path}/{int(image_id):012d}.jpg')
masks = get_masks(image_id, image.shape[:2], coco_class)
pred_image = imread(f'{root_path}/{image_id}/box_prediction.jpg')
if (len(image.shape) == 3):
fig = plt.figure()
aximg = fig.add_subplot(1, 2, 1)
ax = fig.add_subplot(1, 2, 2)
aximg.imshow(pred_image)
cmaps = [LinearSegmentedColormap('myreds', myreddict), LinearSegmentedColormap('myblues', mybluedict), LinearSegmentedColormap('mygreens', mygreendict), LinearSegmentedColormap('mypurples', mypurpledict), LinearSegmentedColormap('myoranges', myorangedict), LinearSegmentedColormap('myyellows', myyellowdict), LinearSegmentedColormap('mygrays', mygraydict), LinearSegmentedColormap('myrealorange', myorangerealdict), LinearSegmentedColormap('mypink', mypinkdict)]
ax.imshow(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), cmap='gray', alpha=1)
ax.imshow((np.ones_like(image) * 255), alpha=0.8)
ax.set_axis_off()
for (m_idx, mask) in enumerate(masks):
ax.imshow(mask, cmap=cmaps[(m_idx % len(cmaps))])
for (cmap, mask) in zip(cmaps, masks):
ax.imshow(mask, cmap=cmap)
plt.tight_layout()
if (save_dir is not None):
plt.savefig((os.path.join(save_dir, '%s,jpg') % image_id))
plt.show() |
class AverageMeter(object):
def __init__(self):
self.value = 0
self.average = 0
self.sum = 0
self.count = 0
def reset(self):
self.value = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, value, n=1):
self.value = value
self.sum += (value * n)
self.count += n
self.average = (self.sum / self.count) |
class DiffTransformerEncoder(nn.TransformerEncoder):
def forward(self, src, pe, degree=None, mask=None, src_key_padding_mask=None):
output = src
for mod in self.layers:
output = mod(output, pe=pe, degree=degree, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if (self.norm is not None):
output = self.norm(output)
return output |
class DataSetCSVagentActPred(DataSetCSVslotTagging):
def __init__(self, csv_file, window_size=5, train_data=None, flag='train'):
if (flag == 'train'):
self.window_size = window_size
elif (flag == 'test'):
self.window_size = train_data.window_size
else:
raise Exception('Unknown flag: {}'.format(flag))
super(DataSetCSVagentActPred, self).__init__(csv_file, train_data, flag)
def transform_data(self, maxlen):
super(DataSetCSVagentActPred, self).transform_data(maxlen)
tagIntent_vecBin = np.hstack((self.userTag_1hotPad.max(axis=1), self.userIntent_vecBin))
self.userTagIntent_vecBin = get_windowedVec(tagIntent_vecBin, self.window_size)
(self.agentAct_vecBin, self.agentAct_txt) = vectorizing_binaryVec(self.agentAct_txt, self.agentAct_vocab_size, self.agentAct2id, prefix='act-')
assert (self.userTagIntent_vecBin.shape[0] == self.agentAct_vecBin.shape[0]) |
def _get_images_opts():
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, required=True)
parser.add_argument('--dataset_path', type=str, required=True)
return parser.parse_args() |
_model
def tv_resnet152(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)
return _create_resnet('tv_resnet152', pretrained, **model_args) |
def _get_config(num_ghosts, maze_size):
agent_factors = dict(shape='circle', scale=0.05, c0=0.33, c1=1.0, c2=0.66)
prey_factors = dict(shape='circle', scale=0.025, c0=0.2, c1=1.0, c2=1.0)
ghost_factors = dict(shape='circle', scale=0.05, mass=np.inf, c0=0.0, c1=1.0, c2=0.8)
def state_initializer():
maze = maze_lib.generate_random_maze_matrix(size=maze_size, ambient_size=12)
maze = maze_lib.Maze(np.flip(maze, axis=0))
walls = maze.to_sprites(c0=0.0, c1=0.0, c2=0.8)
n_ghosts = num_ghosts()
points = maze.sample_distinct_open_points((1 + n_ghosts))
positions = [(maze.grid_side * (0.5 + np.array(x))) for x in points]
agent_position = positions[0]
agent = [sprite.Sprite(x=agent_position[1], y=agent_position[0], **agent_factors)]
ghosts = []
for i in range(n_ghosts):
position = positions[(i + 1)]
ghosts.append(sprite.Sprite(x=position[1], y=position[0], **ghost_factors))
prey = []
open_maze_points = np.argwhere((maze.maze == 0))
for p in open_maze_points[:5]:
pos = (maze.grid_side * (0.5 + np.array(p)))
prey.append(sprite.Sprite(x=pos[1], y=pos[0], **prey_factors))
state = collections.OrderedDict([('walls', walls), ('prey', prey), ('ghosts', ghosts), ('agent', agent)])
return state
maze_physics = physics_lib.MazePhysics(maze_layer='walls', avatar_layers=('agent', 'prey', 'ghosts'), constant_speed=0.003)
physics = physics_lib.Physics((physics_lib.RandomMazeWalk(speed=0.003), ['ghosts']), updates_per_env_step=1, corrective_physics=[maze_physics])
ghost_task = tasks.ContactReward((- 5), layers_0='agent', layers_1='ghosts', reset_steps_after_contact=0)
prey_task = tasks.ContactReward(1, layers_0='agent', layers_1='prey')
reset_task = tasks.Reset(condition=(lambda state: (len(state['prey']) == 0)), steps_after_condition=5)
task = tasks.CompositeTask(ghost_task, prey_task, reset_task, timeout_steps=3000)
action_space = action_spaces.Grid(scaling_factor=0.015, action_layers='agent', control_velocity=True, momentum=0.5)
observer = observers.PILRenderer(image_size=(256, 256), anti_aliasing=1, color_to_rgb='hsv_to_rgb')
def _unglue(s):
s.mass = 1.0
def _unglue_condition(state):
return (not np.all((state['agent'][0].velocity == 0)))
unglue = game_rules.ConditionalRule(condition=_unglue_condition, rules=game_rules.ModifySprites(('prey', 'ghosts'), _unglue))
vanish_on_contact = game_rules.VanishOnContact(vanishing_layer='prey', contacting_layer='agent')
rules = (vanish_on_contact, unglue)
config = {'state_initializer': state_initializer, 'physics': physics, 'task': task, 'action_space': action_space, 'observers': {'image': observer}, 'game_rules': rules}
return config |
def get_embeddings(file_enc, opt, flag):
embs = dict()
if (flag == 'enc'):
for (i, l) in enumerate(open(file_enc, 'rb')):
if (i < opt.skip_lines):
continue
if (not l):
break
if (len(l) == 0):
continue
l_split = l.decode('utf8').strip().split(' ')
if (len(l_split) == 2):
continue
embs[l_split[0]] = [float(em) for em in l_split[1:]]
logger.info('Got {} encryption embeddings from {}'.format(len(embs), file_enc))
else:
for (i, l) in enumerate(open(file_enc, 'rb')):
if (not l):
break
if (len(l) == 0):
continue
l_split = l.decode('utf8').strip().split(' ')
if (len(l_split) == 2):
continue
embs[l_split[0]] = [float(em) for em in l_split[1:]]
logger.info('Got {} decryption embeddings from {}'.format(len(embs), file_enc))
return embs |
class Base(object):
__metaclass__ = abc.ABCMeta
def __init__(self, log_name='logs.txt'):
self.cur_epoch = 0
self.tot_timer = Timer()
self.gpu_timer = Timer()
self.read_timer = Timer()
self.logger = colorlogger(cfg.log_dir, log_name=log_name)
def _make_batch_generator(self):
return
def _make_model(self):
return |
def test_linacc_constantacc_x_2d():
lp = potential.LogarithmicHaloPotential(normalize=1.0)
dp = potential.DehnenBarPotential(omegab=1.8, rb=0.5, Af=0.03)
diskpot = (lp + dp)
ax = 0.02
intax = (lambda t: ((ax * (t ** 2.0)) / 2.0))
framepot = potential.NonInertialFrameForce(a0=[ax, 0.0, 0.0])
diskframepot = (AcceleratingPotentialWrapperPotential(pot=diskpot, x0=[intax, (lambda t: 0.0), (lambda t: 0.0)]) + framepot)
def check_orbit(method='odeint', tol=1e-09):
o = Orbit().toPlanar()
o.turn_physical_off()
ts = numpy.linspace(0.0, 20.0, 1001)
o.integrate(ts, diskpot, method=method)
op = o()
op.integrate(ts, diskframepot, method=method)
o_xs = o.x(ts)
o_ys = o.y(ts)
op_xs = (op.x(ts) + intax(ts))
op_ys = op.y(ts)
assert (numpy.amax(numpy.fabs((o_xs - op_xs))) < tol), f'Integrating an orbit in a linearly-accelerating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_ys - op_ys))) < tol), f'Integrating an orbit in a linearly-accelerating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
check_orbit(method='odeint', tol=1e-05)
check_orbit(method='dop853', tol=1e-09)
check_orbit(method='dop853_c', tol=1e-05)
return None |
class Warmup(Callback):
def __init__(self, max_epochs):
super(Warmup, self).__init__()
if (max_epochs <= 0):
self.max_epochs = 1.0
else:
self.max_epochs = max_epochs
def on_epoch_begin(self, epoch, logs={}):
beta = np.minimum(1.0, ((epoch * 1.0) / (self.max_epochs * 1.0)))
if (not isinstance(beta, (float, np.float32, np.float64))):
raise ValueError('The output of the "beta" paramshould be float.')
K.set_value(self.model.beta, beta)
print('Beta (warm-up with {} epochs): {:.3f}'.format(self.max_epochs, beta)) |
class MNLI(PreprocessedTextDataset):
labels = ('contradiction', 'entailment', 'neutral')
def __init__(self, *args, **kwags):
super().__init__(*args, **kwags)
def get_instances(self, text_file):
with open(text_file, 'r') as f:
lines = csv.reader(f, delimiter='\t', quotechar=None)
for line in itertools.islice(lines, 1, None):
(yield (line[(- 1)], line[8], line[9])) |
class Estimator(BaseEstimator):
def fit(self, data, epochs, batch_size=32, feature_cols=None, label_cols=None, validation_data=None, checkpoint_trigger=None):
invalidInputError(False, 'not implemented')
def predict(self, data, batch_size=4, feature_cols=None):
invalidInputError(False, 'not implemented')
def evaluate(self, data, batch_size=32, feature_cols=None, label_cols=None):
invalidInputError(False, 'not implemented')
def get_model(self):
invalidInputError(False, 'not implemented')
def save(self, model_path):
invalidInputError(False, 'not implemented')
def load(self, model_path):
invalidInputError(False, 'not implemented')
def set_tensorboard(self, log_dir, app_name):
self.log_dir = log_dir
self.app_name = app_name
def clear_gradient_clipping(self):
invalidInputError(False, 'not implemented')
def set_constant_gradient_clipping(self, min, max):
invalidInputError(False, 'not implemented')
def set_l2_norm_gradient_clipping(self, clip_norm):
invalidInputError(False, 'not implemented')
def get_train_summary(self, tag=None):
invalidInputError(False, 'not implemented')
def get_validation_summary(self, tag=None):
invalidInputError(False, 'not implemented')
def load_orca_checkpoint(self, path, version):
invalidInputError(False, 'not implemented')
def shutdown(self):
pass |
class MMSeq2SeqModel(nn.Module):
def __init__(self, mm_encoder, history_encoder, input_encoder, response_decoder):
super(MMSeq2SeqModel, self).__init__()
self.history_encoder = history_encoder
self.mm_encoder = mm_encoder
self.input_encoder = input_encoder
self.response_decoder = response_decoder
q_embed = 256
self.s_embed = 256
self.a_embed = 256
high_order_utils = []
self.emb_s = nn.Conv1d(512, self.s_embed, 1)
self.emb_a = nn.Conv1d(128, self.a_embed, 1)
self.emb_temporal_sp = nn.LSTM(self.s_embed, self.s_embed, dropout=0.5, batch_first=True)
self.atten = Atten(util_e=[q_embed, self.s_embed, self.s_embed, self.s_embed, self.s_embed, self.a_embed], high_order_utils=high_order_utils, prior_flag=True, sizes=[10, 49, 49, 49, 49, 10], size_flag=False, pairwise_flag=True, unary_flag=True, self_flag=True)
def loss(self, mx, hx, x, y, t, s):
(ei, ei_len) = self.input_encoder(None, x)
q_prior = torch.zeros(ei.size(0), ei.size(1)).cuda()
idx = torch.from_numpy((ei_len - 1)).long().cuda()
batch_index = torch.arange(0, ei_len.shape[0]).long().cuda()
q_prior[(batch_index, idx)] = 1
num_samples = s.shape[0]
s = s.view((- 1), s.size(2), s.size(3)).transpose(1, 2)
s = self.emb_s(s)
s = s.view(num_samples, (- 1), s.size(1), s.size(2)).transpose(2, 3)
a = mx[0].cuda().permute(1, 2, 0)
a = self.emb_a(a)
a = a.transpose(1, 2)
ei = self.atten(utils=[ei, s[0], s[1], s[2], s[3], a], priors=[q_prior, None, None, None, None, None])
a_q = ei[0]
a_s = [ei[1], ei[2], ei[3], ei[4]]
a_a = ei[5]
a_a_s = torch.cat(([a_a.unsqueeze(1)] + [u.unsqueeze(1) for u in a_s]), dim=1)
(_, hidden_temporal_state) = self.emb_temporal_sp(a_a_s)
eh = self.history_encoder(None, hx)
es = torch.cat((a_q, eh[(- 1)]), dim=1)
if (hasattr(self.response_decoder, 'context_to_state') and (self.response_decoder.context_to_state == True)):
(ds, dy) = self.response_decoder(es, None, y)
else:
(ds, dy) = self.response_decoder(hidden_temporal_state, es, y)
if (t is not None):
tt = torch.cat(t, dim=0)
loss = F.cross_entropy(dy, torch.tensor(tt, dtype=torch.long).cuda())
return (None, ds, loss)
else:
return (None, ds)
def generate(self, mx, hx, x, s, sos=2, eos=2, unk=0, minlen=1, maxlen=100, beam=5, penalty=1.0, nbest=1):
(ei, ei_len) = self.input_encoder(None, x)
q_prior = torch.zeros(ei.size(0), ei.size(1)).cuda()
idx = torch.from_numpy((ei_len - 1)).long().cuda()
batch_index = torch.arange(0, ei_len.shape[0]).long().cuda()
q_prior[(batch_index, idx)] = 1
num_samples = s.shape[0]
s = s.view((- 1), s.size(2), s.size(3)).transpose(1, 2)
s = self.emb_s(s)
s = s.view(num_samples, (- 1), s.size(1), s.size(2)).transpose(2, 3)
a = mx[0].cuda().permute(1, 2, 0)
a = self.emb_a(a)
a = a.transpose(1, 2)
ei = self.atten(utils=[ei, s[0], s[1], s[2], s[3], a], priors=[q_prior, None, None, None, None, None])
a_q = ei[0]
a_s = [ei[1], ei[2], ei[3], ei[4]]
a_a = ei[5]
a_a_s = torch.cat(([a_a.unsqueeze(1)] + [u.unsqueeze(1) for u in a_s]), dim=1)
(_, hidden_temporal_state) = self.emb_temporal_sp(a_a_s)
eh = self.history_encoder(None, hx)
es = torch.cat((a_q, eh[(- 1)]), dim=1)
ds = self.response_decoder.initialize(hidden_temporal_state, es, torch.from_numpy(np.asarray([sos])).cuda())
hyplist = [([], 0.0, ds)]
best_state = None
comp_hyplist = []
for l in six.moves.range(maxlen):
new_hyplist = []
argmin = 0
for (out, lp, st) in hyplist:
logp = self.response_decoder.predict(st)
lp_vec = (logp.cpu().data.numpy() + lp)
lp_vec = np.squeeze(lp_vec)
if (l >= minlen):
new_lp = (lp_vec[eos] + (penalty * (len(out) + 1)))
new_st = self.response_decoder.update(st, torch.from_numpy(np.asarray([eos])).cuda())
comp_hyplist.append((out, new_lp))
if ((best_state is None) or (best_state[0] < new_lp)):
best_state = (new_lp, new_st)
for o in np.argsort(lp_vec)[::(- 1)]:
if ((o == unk) or (o == eos)):
continue
new_lp = lp_vec[o]
if (len(new_hyplist) == beam):
if (new_hyplist[argmin][1] < new_lp):
new_st = self.response_decoder.update(st, torch.from_numpy(np.asarray([o])).cuda())
new_hyplist[argmin] = ((out + [o]), new_lp, new_st)
argmin = min(enumerate(new_hyplist), key=(lambda h: h[1][1]))[0]
else:
break
else:
new_st = self.response_decoder.update(st, torch.from_numpy(np.asarray([o])).cuda())
new_hyplist.append(((out + [o]), new_lp, new_st))
if (len(new_hyplist) == beam):
argmin = min(enumerate(new_hyplist), key=(lambda h: h[1][1]))[0]
hyplist = new_hyplist
if (len(comp_hyplist) > 0):
maxhyps = sorted(comp_hyplist, key=(lambda h: (- h[1])))[:nbest]
return (maxhyps, best_state[1])
else:
return ([([], 0)], None) |
def test(binary_name, tests, logfile, logfile_name):
binary = make_binary(binary_name)
if (not os.path.exists(binary)):
fail_with(('Binary %s does not exist' % binary))
print_to_screen('\n{c.BOLD}# TESTING BINARY {name}{c.NORMAL}'.format(c=bcolors, name=binary_name))
benchmark_id = 1
passed_benchmarks = 0
test_case_id = 1
passed_test_cases = 0
stopwatch_start = time.time()
for test_case in tests:
passed = True
print_to_log(header_benchmark.format(benchmark_id=benchmark_id, purpose=test_case['purpose']))
print_to_screen(' Benchmark {}: {} {}..'.format(benchmark_id, test_case['purpose'], ('- tags: [{}]'.format(test_case['tags']) if ('tags' in test_case) else '')))
cmd_inputs = [make_file(each_file) for each_file in test_case['input_files']]
cmd = (([binary] + cmd_inputs) + test_case['options'].split())
if (('nb_nodes' in test_case) and (test_case['nb_nodes'] > 1)):
cmd = (['mpiexec', '-n', str(test_case['nb_nodes'])] + cmd)
print_to_log((' command : ' + ' '.join(cmd)))
logfile.flush()
subprocess.call(cmd, stdout=logfile, stderr=logfile)
logfile.flush()
files_to_remove = set()
for (expectation_id, expectation) in enumerate(test_case['expectations']):
output_file = make_output_file(expectation['file'])
test_expectation_id = '{}.{}'.format(benchmark_id, expectation_id)
if (not os.path.exists(output_file)):
print_to_log(' File {} does not exist! Test {} failed.'.format(output_file, test_expectation_id))
passed = False
else:
files_to_remove.add(output_file)
(_, file_extension) = os.path.splitext(output_file)
if (file_extension == '.png'):
print_to_log((' Test %s passed.' % test_expectation_id))
passed_test_cases += 1
else:
with open(output_file, 'r') as my_file:
original_content = my_file.read()
content = ' '.join(original_content.split())
expected_content = ' '.join(expectation['content'].split())
position = content.find(expected_content)
if (position >= 0):
print_to_log((' Test %s passed.' % test_expectation_id))
passed_test_cases += 1
else:
passed = False
print_to_log(test_fmt.format(expectation_id=test_expectation_id, expected_content=expectation['content'], original_content=original_content))
test_case_id += len(test_case['expectations'])
for my_file in files_to_remove:
os.remove(my_file)
if passed:
passed_benchmarks += 1
else:
print_to_screen(((bcolors.ERROR + 'FAILED!') + bcolors.NORMAL))
benchmark_id += 1
stopwatch_end = time.time()
print_to_log('\n\n')
print_to_screen_and_log('Total time: {}'.format(str((stopwatch_end - stopwatch_start))))
total_benchmarks = (benchmark_id - 1)
total_test_cases = (test_case_id - 1)
if ((total_benchmarks == passed_benchmarks) and (total_test_cases == passed_test_cases)):
print_to_screen_and_log('All benchmarks ({}/{}) passed successfully.'.format(passed_benchmarks, total_benchmarks))
print_to_screen_and_log('All test cases ({}/{}) passed successfully.'.format(passed_test_cases, total_test_cases))
else:
print_to_screen(((bcolors.WARNING + 'WARNING! Some tests failed.') + bcolors.NORMAL))
print_to_log('WARNING! Some tests failed.')
if (passed_benchmarks == total_benchmarks):
print_to_screen('{2.GOOD}{0}/{1} benchmarks passed successfully.{2.NORMAL}'.format(passed_benchmarks, total_benchmarks, bcolors))
else:
print_to_screen('{2.WARNING}{0}/{1} benchmarks passed successfully.{2.NORMAL}'.format(passed_benchmarks, total_benchmarks, bcolors))
print_to_log('{}/{} benchmarks passed successfully.'.format(passed_benchmarks, total_benchmarks))
if (passed_benchmarks < total_benchmarks):
print_to_screen('{2.ERROR}{0}/{1} benchmarks failed.{2.NORMAL}'.format((total_benchmarks - passed_benchmarks), total_benchmarks, bcolors))
else:
print_to_screen('{}/{} benchmarks failed.'.format((total_benchmarks - passed_benchmarks), total_benchmarks))
print_to_log('{}/{} benchmarks failed.'.format((total_benchmarks - passed_benchmarks), total_benchmarks))
if (passed_test_cases == total_test_cases):
print_to_screen('{2.GOOD}{0}/{1} test cases passed successfully.{2.NORMAL}'.format(passed_test_cases, total_test_cases, bcolors))
else:
print_to_screen('{2.WARNING}{0}/{1} test cases passed successfully.{2.NORMAL}'.format(passed_test_cases, total_test_cases, bcolors))
print_to_log('{}/{} test cases passed successfully.'.format(passed_test_cases, total_test_cases))
if (passed_test_cases < total_test_cases):
print_to_screen('{2.ERROR}{0}/{1} test cases failed.{2.NORMAL}'.format((total_test_cases - passed_test_cases), total_test_cases, bcolors))
else:
print_to_screen('{}/{} test cases failed.'.format((total_test_cases - passed_test_cases), total_test_cases))
print_to_log('{}/{} test cases failed.'.format((total_test_cases - passed_test_cases), total_test_cases))
print_to_screen(('(See %s for details.)' % logfile_name)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.