code stringlengths 101 5.91M |
|---|
class Optimizer(abc.ABC):
def step(self, gradients: Dict[(str, ndarray)]) -> None:
pass
def _set_params_from_model(self, model_interface):
class_name = splitext(model_interface.framework_plugin)[1].strip('.')
module_path = splitext(model_interface.framework_plugin)[0]
framework_adapter = import_module(module_path)
framework_adapter_plugin: FrameworkAdapterPluginInterface = getattr(framework_adapter, class_name, None)
self.params: Dict[(str, ndarray)] = framework_adapter_plugin.get_tensor_dict(model_interface.provide_model()) |
()
('--images', 'image_path', help='Path to the images', metavar='PATH|ZIP', type=str, required=True)
('--ref', 'ref_path', help='Dataset reference statistics ', metavar='NPZ|URL', type=str, required=True)
('--num', 'num_expected', help='Number of images to use', metavar='INT', type=click.IntRange(min=2), default=50000, show_default=True)
('--seed', help='Random seed for selecting the images', metavar='INT', type=int, default=0, show_default=True)
('--batch', help='Maximum batch size', metavar='INT', type=click.IntRange(min=1), default=64, show_default=True)
def calc(image_path, ref_path, num_expected, seed, batch):
torch.multiprocessing.set_start_method('spawn')
dist.init()
dist.print0(f'Loading dataset reference statistics from "{ref_path}"...')
ref = None
if (dist.get_rank() == 0):
with dnnlib.util.open_url(ref_path) as f:
ref = dict(np.load(f))
(mu, sigma) = calculate_inception_stats(image_path=image_path, num_expected=num_expected, seed=seed, max_batch_size=batch)
dist.print0('Calculating FID...')
if (dist.get_rank() == 0):
fid = calculate_fid_from_inception_stats(mu, sigma, ref['mu'], ref['sigma'])
print(f'{fid:g}')
with open(os.path.join(image_path, 'fid.txt'), 'w') as f:
f.write(str(fid))
torch.distributed.barrier() |
def _compare_gpt2_checkpoint_gradients(model_id, revision, config: Optional[Gpt2Config]=None):
import torch
converter = Gpt2Config.default_hf_checkpoint_converter
torch_model: HfGpt2LMHeadModel = AutoModelForCausalLM.from_pretrained(model_id, revision=revision)
torch_model.eval()
model = cast(Gpt2LMHeadModel, converter.load_pretrained((config or Gpt2LMHeadModel), RepoRef(model_id, revision)))
model = inference_mode(model, True)
input = hax.random.randint(PRNGKey(0), model.Pos, 0, model.Vocab.size)
def torch_loss(model, input_ids) -> torch.Tensor:
return model(input_ids, labels=input_ids)[0]
torch_out = torch_loss(torch_model, torch.from_numpy(onp.array(input.array)).to(torch.int64).unsqueeze(0))
causal_mask = hax.nn.attention.causal_mask(model.config.Pos, model.config.KeyPos)
def compute_loss(model, input_ids):
pred_y = model(input_ids, key=None, attn_mask=causal_mask)
return next_token_loss(model.Pos, model.Vocab, pred_y, input_ids).scalar()
jax_compute_grad = equinox.filter_value_and_grad(compute_loss, has_aux=False)
jax_grad: Gpt2LMHeadModel
(jax_loss, jax_grad) = jax_compute_grad(model, input)
torch_out.backward()
state_dict = torch_model.transformer.state_dict(keep_vars=True)
state_dict = {k: v.grad for (k, v) in state_dict.items()}
jax_grad_dict = jax_grad.to_state_dict()
for (jax_key, jax_g) in jax_grad_dict.items():
if (jax_key not in state_dict):
assert (jax_key == 'token_out_embeddings')
continue
torch_g = state_dict[jax_key]
assert onp.isclose(jax_g, torch_g.detach().cpu().numpy(), rtol=0.01, atol=0.01).all(), f'{jax_g} != {torch_g}'
optimizer_config = OptimizerConfig(weight_decay=0.0, learning_rate=0.001, warmup_ratio=0.0, lr_schedule='constant')
if (optimizer_config.max_grad_norm is not None):
torch.nn.utils.clip_grad_norm_(torch_model.parameters(), optimizer_config.max_grad_norm)
torch_optimizer = torch.optim.AdamW(torch_model.parameters(), lr=optimizer_config.learning_rate, weight_decay=optimizer_config.weight_decay, betas=(optimizer_config.beta1, optimizer_config.beta2), eps=optimizer_config.epsilon)
torch_optimizer.step()
jax_optimizer = optimizer_config.build(1000)
state = jax_optimizer.init(model)
(updates, state) = jax_optimizer.update(updates=jax_grad, state=state, params=model)
new_model = equinox.apply_updates(model, updates)
new_model_dict = new_model.to_state_dict()
state_dict = torch_model.transformer.state_dict(keep_vars=True)
for (key, jax_p) in new_model_dict.items():
if (key not in state_dict):
assert (key == 'token_out_embeddings')
continue
torch_p = state_dict[key]
assert onp.isclose(jax_p, torch_p.detach().cpu().numpy(), rtol=0.001, atol=0.002).all(), f'{key}: {onp.linalg.norm((jax_p - torch_p.detach().cpu().numpy()), ord=onp.inf)}' |
class KirillovReshetikhinCrystalFromPromotion(KirillovReshetikhinGenericCrystal, AffineCrystalFromClassicalAndPromotion):
def __init__(self, cartan_type, r, s):
KirillovReshetikhinGenericCrystal.__init__(self, cartan_type, r, s)
AffineCrystalFromClassicalAndPromotion.__init__(self, cartan_type, self.classical_decomposition(), self.promotion(), self.promotion_inverse(), self.dynkin_diagram_automorphism(0), KirillovReshetikhinCrystals()) |
class TestRelationNetsCanProcessSupportSetFolder():
.parametrize('support_set_path', ['easyfsl/tests/datasets/resources/balanced_support_set', 'easyfsl/tests/datasets/resources/unbalanced_support_set'])
def test_relation_nets_can_process_support_set_from_balanced_folder(support_set_path):
support_set = SupportSetFolder(support_set_path)
support_images = support_set.get_images()
support_labels = support_set.get_labels()
model = RelationNetworks(nn.Identity(), relation_module=nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)), nn.Flatten()), feature_dimension=3)
model.process_support_set(support_images, support_labels)
query_images = torch.randn((4, 3, 84, 84))
model(query_images) |
.pure
def test_parse_forward_simple(gpu):
torch_module = copy_to_gpu(gpu, torch.nn.Sequential(torch.nn.Linear(12, 24), torch.nn.Linear(24, 2)))
dace_module = DaceModule(torch_module)
x = copy_to_gpu(gpu, torch.randn(2, 12))
expected = torch_module(x)
result = dace_module(x)
torch_tensors_close('output', expected, result)
def train_step(y):
output = dace_module(y)
cpu = np.empty_like(output)
cpu[:] = output
return cpu.sum()
result = train_step(x)
tensors_close('parsed', expected.sum(), result) |
_args('v', 'i', 'i')
def transpose(g, self, dim0, dim1):
if (dim0 == dim1):
return self
if self.isCompleteTensor():
axes = list(range(self.type().dim()))
(axes[dim0], axes[dim1]) = (axes[dim1], axes[dim0])
return g.op('Transpose', self, perm_i=axes)
elif (sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK):
return g.op('ATen', self, operator_s='transpose', dim0_i=dim0, dim1_i=dim1)
else:
raise RuntimeError('Unsupported: ONNX export of transpose for tensor of unknown rank.') |
class TrackObjective(Callback):
def __init__(self):
self.edge_records = []
self.node_records = []
self.model_records = []
def __call__(self, algo, i, max_iter):
if (i == 0):
self.records = []
algo.update_objective()
model_record = dict(A=algo.A_model, n_iter=algo.n_iter)
self.model_records.append(model_record)
self.edge_records += algo.get_edges_data(['A', 'n_iter', 'direction'])
self.node_records += algo.get_nodes_data(['A', 'n_iter'])
def get_dataframe(self):
edge_df = pd.DataFrame(self.edge_records)
node_df = pd.DataFrame(self.node_records)
model_df = pd.DataFrame(self.model_records)
return (edge_df, node_df, model_df) |
def orient_circuit(circuit, convex=False, precision=53, verbose=False):
vectors = [(v[1].vector() - v[0].vector()) for v in circuit]
circuit_vertex = ((circuit[0][0],) + tuple((e[1] for e in circuit)))
circuit_vertex = tuple(circuit_vertex)
if convex:
pr = matrix([vectors[0], vectors[1]]).determinant()
if (pr > 0):
return circuit_vertex
elif (pr < 0):
return tuple(reversed(circuit_vertex))
prec = precision
while True:
CIF = ComplexIntervalField(prec)
totalangle = sum(((CIF(*vectors[i]) / CIF(*vectors[(i - 1)])).argument() for i in range(len(vectors))))
if (totalangle < 0):
return tuple(reversed(circuit_vertex))
if (totalangle > 0):
return circuit_vertex
prec *= 2
if verbose:
print(prec) |
def compute_predicted_aligned_error(logits: torch.Tensor, max_bin: int=31, no_bins: int=64, **kwargs) -> Dict[(str, torch.Tensor)]:
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=(- 1))
(predicted_aligned_error, max_predicted_aligned_error) = _calculate_expected_aligned_error(alignment_confidence_breaks=boundaries, aligned_distance_error_probs=aligned_confidence_probs)
return {'aligned_confidence_probs': aligned_confidence_probs, 'predicted_aligned_error': predicted_aligned_error, 'max_predicted_aligned_error': max_predicted_aligned_error} |
def test_string():
filename = os.path.join(SAMPLES_DIR, 'string_test_data.avro')
data = ['Hello', 'what', 'should', 'we', 'do', 'for', 'this', 'period', 'of', 'time']
assert (ak.from_avro_file(file=filename).to_list() == data) |
class C(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
def forward(self, input):
output = self.conv(input)
return output |
class RCNNLogLossMetric(BufferedEvalMetric):
def __init__(self):
super(RCNNLogLossMetric, self).__init__('RCNNLogLoss')
self.e2e = config.TRAIN.END2END
(self.pred, self.label) = get_rcnn_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
if self.e2e:
label = preds[self.pred.index('rcnn_label')]
else:
label = labels[self.label.index('rcnn_label')]
last_dim = pred.shape[(- 1)]
pred = pred.asnumpy().reshape((- 1), last_dim)
label = label.asnumpy().reshape((- 1)).astype('int32')
cls = pred[(np.arange(label.shape[0]), label)]
cls += 1e-14
cls_loss = ((- 1) * np.log(cls))
cls_loss = np.sum(cls_loss)
self.addval(cls_loss, label.shape[0]) |
class Fold(Module):
def __init__(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
super(Fold, self).__init__()
self.output_size = output_size
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input):
return F.fold(input, self.output_size, self.kernel_size, self.dilation, self.padding, self.stride)
def extra_repr(self):
return 'output_size={output_size}, kernel_size={kernel_size}, dilation={dilation}, padding={padding}, stride={stride}'.format(**self.__dict__) |
def send_geth_rpc(url, method, params):
myobj = {'jsonrpc': '2.0', 'id': 1}
myobj['method'] = method
myobj['params'] = params
x = requests.post(url, json=myobj)
y = json.loads(x.text)
return y['result'] |
def neighbors_and_flows(flow_list, edge_idx, node_set={}):
n_and_f = []
for (edge, l) in flow_list:
if (edge[edge_idx] in node_set):
n_and_f.append((edge[(1 + edge_idx)], l))
return n_and_f |
def test(epoch, loader, model, criterion, postloader):
t_start = time.time()
model.eval()
with torch.no_grad():
for resolution in FLAGS.resolution_list:
for width_mult in sorted(FLAGS.width_mult_list, reverse=True):
model.apply((lambda m: setattr(m, 'width_mult', width_mult)))
model = ComputePostBN.ComputeBN(model, postloader, resolution)
(loss, acc, cnt) = (0, 0, 0)
for (batch_idx, (input, target)) in enumerate(loader):
(input, target) = (input.cuda(non_blocking=True), target.cuda(non_blocking=True))
output = model(F.interpolate(input, (resolution, resolution), mode='bilinear', align_corners=True))
loss += (criterion(output, target).cpu().numpy() * target.size()[0])
indices = torch.max(output, dim=1)[1]
acc += (indices == target).sum().cpu().numpy()
cnt += target.size()[0]
logger.info('VAL {:.1f}s {}x-{} Epoch:{}/{} Loss:{:.4f} Acc:{:.3f}'.format((time.time() - t_start), str(width_mult), str(resolution), epoch, FLAGS.num_epochs, (loss / cnt), (acc / cnt))) |
def visualize_depth(depth, cmap=cv2.COLORMAP_JET):
x = depth.cpu().numpy()
x = np.nan_to_num(x)
mi = np.min(x)
ma = np.max(x)
x = ((x - mi) / max((ma - mi), 1e-08))
x = (255 * x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_)
return x_ |
def test_measure_overlap():
def Ann(start, end):
return Annotation('', start, end, [])
ref = Ann(5, 14)
ref2 = Ann(2, 3)
assert_almost_equal(0.0, Measure.measure_overlap({ref: []}, 'max'))
assert_almost_equal(0.0, Measure.measure_overlap({ref: []}, 'sum'))
assert_almost_equal(0.3, Measure.measure_overlap({ref: [Ann(1, 7)]}, 'max'))
assert_almost_equal(0.3, Measure.measure_overlap({ref: [Ann(1, 7)]}, 'sum'))
assert_almost_equal(0.4, Measure.measure_overlap({ref: [Ann(1, 7), Ann(11, 15)]}, 'max'))
assert_almost_equal(0.7, Measure.measure_overlap({ref: [Ann(1, 7), Ann(11, 15)]}, 'sum'))
assert_almost_equal(0.4, Measure.measure_overlap({ref: [Ann(1, 8), Ann(12, 15)]}, 'max'))
assert_almost_equal(0.7, Measure.measure_overlap({ref: [Ann(1, 8), Ann(12, 15)]}, 'sum'))
assert_almost_equal(1.0, Measure.measure_overlap({ref: [Ann(5, 14)]}, 'max'))
assert_almost_equal(1.0, Measure.measure_overlap({ref: [Ann(5, 14)]}, 'sum'))
assert_almost_equal(1.4, Measure.measure_overlap({ref: [Ann(1, 8), Ann(12, 15)], ref2: [Ann(1, 8)]}, 'max'))
assert_almost_equal(1.7, Measure.measure_overlap({ref: [Ann(1, 8), Ann(12, 15)], ref2: [Ann(1, 8)]}, 'sum'))
assert_almost_equal(0.9, Measure.measure_overlap({ref: [Ann(1, 7), Ann(6, 15)]}, 'max'))
assert_almost_equal(1.0, Measure.measure_overlap({ref: [Ann(1, 7), Ann(6, 15)]}, 'sum')) |
def init_dataset(name, *args, **kwargs):
if (name not in __factory.keys()):
raise KeyError('Unknown datasets: {}'.format(name))
return __factory[name](*args, **kwargs) |
class CNN_exp(FNN_exp):
def __init__(self, data_path, param_dict, config):
super().__init__(data_path, param_dict, config)
def load_model(self):
model = CNNNet(dropout=self.param_dict['dropout'], hidden_layers=self.param_dict['hidden_layers'], kernel_size=self.param_dict['kernel_size'], stride=self.param_dict['stride'], pooling=self.param_dict['pooling'], num_target_label=len(self.dataloader.new_true_label_mapping))
print_network(model)
return model |
def write_db_path(orig_path: str, new_db_path: str, table2column2elements: Dict[(str, Dict[(str, List)])], overwrite: bool=False) -> None:
if (os.path.exists(new_db_path) and (not overwrite)):
print('new database already exists.')
return
empty_db_path = init_empty_db_from_orig_(orig_path)
copyfile(empty_db_path, new_db_path)
os.unlink(empty_db_path)
cursor = get_cursor_path(new_db_path)
(table_name2column_properties, _) = extract_table_column_properties_path(orig_path)
for (table_name, column2elements) in table2column2elements.items():
columns = list(column2elements.keys())
orig_columns = list(table_name2column_properties[table_name].keys())
assert (columns == orig_columns), (columns, orig_columns)
insert_table(cursor, table_name, column2elements)
cursor.connection.commit()
cursor.connection.close() |
def RewriteContext():
context = task_spec_pb2.TaskSpec()
with gfile.FastGFile(FLAGS.task_context) as fin:
text_format.Merge(fin.read(), context)
for resource in context.input:
if (resource.creator == StageName()):
del resource.part[:]
part = resource.part.add()
part.file_pattern = os.path.join(OutputPath(resource.name))
with gfile.FastGFile(OutputPath('context'), 'w') as fout:
fout.write(str(context)) |
def _init_parser():
global _parser
_parser = ArgumentParser(description='This script runs the SEPP algorithm on an input tree, alignment, fragment file, and RAxML info file.', conflict_handler='resolve')
_parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + version))
decompGroup = _parser.add_argument_group('Decomposition Options'.upper(), ' '.join(['These options determine the alignment decomposition size and', 'taxon insertion size. If None is given, then the default', 'is to align/place at 10% of total taxa. The alignment decomosition size must be', 'less than the taxon insertion size.']))
_parser.groups = dict()
_parser.groups['decompGroup'] = decompGroup
decompGroup.add_argument('-A', '--alignmentSize', type=int, dest='alignment_size', metavar='N', default=None, help='max alignment subset size of N [default: 10%% of the total number of taxa or the placement subset size if given]')
decompGroup.add_argument('-P', '--placementSize', type=int, dest='placement_size', metavar='N', default=None, help='max placement subset size of N [default: 10%% of the total number of taxa or the alignment length (whichever bigger)]')
decompGroup.add_argument('-F', '--fragmentChunkSize', type=int, dest='max_chunk_size', metavar='N', default=20000, help='maximum fragment chunk size of N. Helps controlling memory. [default: 20000]')
decompGroup.add_argument('-D', '--distance', type=float, dest='distance', metavar='DISTANCE', default=1, help='minimum p-distance before stopping the decomposition[default: 1]')
decompGroup.add_argument('-M', '--diameter', type=float, dest='maxDiam', metavar='DIAMETER', default=None, help='maximum tree diameter before stopping the decomposition[default: None]')
decompGroup.add_argument('-S', '--decomp_strategy', type=valid_decomp_strategy, dest='decomp_strategy', metavar='DECOMP', default='normal', help='decomposition strategy [default: using tree branch length]')
outputGroup = _parser.add_argument_group('Output Options'.upper(), 'These options control output.')
_parser.groups['outputGroup'] = outputGroup
outputGroup.add_argument('-p', '--tempdir', dest='tempdir', metavar='DIR', type=valid_dir_path, default=get_default_temp_dir(), help='Tempfile files will be written to DIR. Full-path required. [default: %(default)s]')
outputGroup.add_argument('-rt', '--remtemp', dest='remtemp', action='store_true', help='Remove tempfile directory. [default: disabled]')
outputGroup.set_defaults(remtemp=False)
outputGroup.add_argument('-o', '--output', dest='output', metavar='OUTPUT', default='output', type=valid_file_prefix, help='output files with prefix OUTPUT. [default: %(default)s]')
outputGroup.add_argument('-d', '--outdir', dest='outdir', metavar='OUTPUT_DIR', default=os.path.curdir, type=valid_dir_path, help='output to OUTPUT_DIR directory. full-path required. [default: %(default)s]')
inputGroup = _parser.add_argument_group('Input Options'.upper(), ' '.join(['These options control input. To run SEPP the following is required. A backbone tree (in newick format), a RAxML_info file (this is the file generated by RAxML during estimation of the backbone tree. Pplacer uses this info file to set model parameters), a backbone alignment file (in fasta format), and a fasta file including fragments. The input sequences are assumed to be DNA unless specified otherwise.']))
_parser.groups['inputGroup'] = inputGroup
inputGroup.add_argument('-c', '--config', dest='config_file', metavar='CONFIG', type=argparse.FileType('r'), help='A config file, including options used to run SEPP. Options provided as command line arguments overwrite config file values for those options. [default: %(default)s]')
inputGroup.add_argument('-t', '--tree', dest='tree_file', metavar='TREE', type=argparse.FileType('r'), help='Input tree file (newick format) [default: %(default)s]')
inputGroup.add_argument('-r', '--raxml', dest='info_file', metavar='RAXML', type=argparse.FileType('r'), help='RAxML_info file including model parameters, generated by RAxML.[default: %(default)s]')
inputGroup.add_argument('-a', '--alignment', dest='alignment_file', metavar='ALIGN', type=argparse.FileType('r'), help='Aligned fasta file [default: %(default)s]')
inputGroup.add_argument('-f', '--fragment', dest='fragment_file', metavar='FRAG', type=argparse.FileType('r'), help='fragment file [default: %(default)s]')
inputGroup.add_argument('-m', '--molecule', dest='molecule', metavar='MOLECULE', type=valid_molecule, default='dna', help='Molecule type of sequences. Can be amino, dna, or rna [default: %(default)s]')
inputGroup.add_argument('--ignore-overlap', dest='ignore_overlap', default=False, action='store_true', help='When a query sequence has the same name as a backbone sequence, ignore the query sequences and keep the backbone sequence[default: %(default)s]')
otherGroup = _parser.add_argument_group('Other options'.upper(), 'These options control how SEPP is run')
_parser.groups['otherGroup'] = otherGroup
otherGroup.add_argument('-x', '--cpu', type=set_cpu, dest='cpu', metavar='N', default=set_cpu(cpu_count()), help='Use N cpus [default: number of cpus available on the machine]')
otherGroup.add_argument('-cp', '--checkpoint', type=set_checkpoint, dest='checkpoint', metavar='CHCK_FILE', default=set_checkpoint(None), help='checkpoint file [default: no checkpointing]')
otherGroup.add_argument('-cpi', '--interval', type=int, dest='checkpoint_interval', metavar='N', default=3600, help='Interval (in seconds) between checkpoint writes. Has effect only with -cp provided. [default: 3600]')
otherGroup.add_argument('-seed', '--randomseed', type=int, dest='seed', metavar='N', default=297834, help='random seed number. [default: 297834]')
return _parser |
def tabulate(tabular_data, headers=[], tablefmt='simple', floatfmt='g', numalign='decimal', stralign='left', missingval=''):
(list_of_lists, headers) = _normalize_tabular_data(tabular_data, headers)
plain_text = '\n'.join((['\t'.join(map(_text_type, headers))] + ['\t'.join(map(_text_type, row)) for row in list_of_lists]))
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c] for (c, ct) in zip(cols, coltypes)]
aligns = [(numalign if (ct in [int, float]) else stralign) for ct in coltypes]
minwidths = ([(width_fn(h) + 2) for h in headers] if headers else ([0] * len(cols)))
cols = [_align_column(c, a, minw, has_invisible) for (c, a, minw) in zip(cols, aligns, minwidths)]
if headers:
minwidths = [max(minw, width_fn(c[0])) for (minw, c) in zip(minwidths, cols)]
headers = [_align_header(h, a, minw) for (h, a, minw) in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if (not isinstance(tablefmt, TableFormat)):
tablefmt = _table_formats.get(tablefmt, _table_formats['simple'])
return _format_table(tablefmt, headers, rows, minwidths, aligns) |
class Adafactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False):
if ((lr is not None) and relative_step):
raise ValueError('Cannot combine manual lr and relative_step options')
if (warmup_init and (not relative_step)):
raise ValueError('warmup_init requires relative_step=True')
defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
def supports_memory_efficient_fp16(self):
return True
def supports_flat_params(self):
return False
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group['lr']
if param_group['relative_step']:
min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01)
rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step'])))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps'][1], param_state['RMS'])
return (param_scale * rel_step_sz)
def _get_options(self, param_group, param_shape):
factored = (len(param_shape) >= 2)
use_first_moment = (param_group['beta1'] is not None)
return (factored, use_first_moment)
def _rms(self, tensor):
return (tensor.norm(2) / (tensor.numel() ** 0.5))
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, output):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1)).unsqueeze((- 1))).rsqrt_().unsqueeze((- 1))
c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt()
torch.mul(r_factor, c_factor, out=output)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
(factored, use_first_moment) = self._get_options(group, grad_shape)
if (len(state) == 0):
state['step'] = 0
if use_first_moment:
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).type_as(grad)
state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).type_as(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].type_as(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].type_as(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].type_as(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(grad)
p_data_fp32 = p.data.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
group['lr'] = self._get_lr(group, state)
beta2t = (1.0 - math.pow(state['step'], group['decay_rate']))
update = ((grad ** 2) + group['eps'][0])
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 1)))
exp_avg_sq_col.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 2)))
self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_((1.0 - beta2t), update)
torch.rsqrt(exp_avg_sq, out=update).mul_(grad)
update.div_(max(1.0, (self._rms(update) / group['clip_threshold'])))
update.mul_(group['lr'])
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_((1 - group['beta1']), update)
update = exp_avg
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
p_data_fp32.add_((- update))
if (p.data_ptr() != p_data_fp32.data_ptr()):
p.data.copy_(p_data_fp32)
return loss |
class InitLoader(PTInitializingDataLoader):
def __init__(self, data_loader: DataLoader):
super().__init__(data_loader)
self._data_loader_iter: Iterator
def __iter__(self):
self._data_loader_iter = iter(self._data_loader)
return self
def __next__(self) -> Any:
loaded_item = next(self._data_loader_iter)
return loaded_item['image']
def get_inputs(self, dataloader_output) -> tuple[(tuple, dict)]:
return ((dataloader_output,), {})
def get_target(self, _):
return None |
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertSelfattLayer(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output |
def train(model, data_loader, optimizer, epoch, device, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 10
for (i, (image0, image1, text, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
images = torch.cat([image0, image1], dim=0)
(images, targets) = (images.to(device), targets.to(device))
loss = model(images, text, targets=targets, train=True)
optimizer.zero_grad()
loss.backward()
optimizer.step()
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.update(loss=loss.item())
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger.global_avg())
return {k: '{:.4f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()} |
def compile_timeit_template(*, stmt: str, setup: str, global_setup: str) -> TimeitModuleType:
template_path: str = os.path.join(SOURCE_ROOT, 'timeit_template.cpp')
with open(template_path, 'rt') as f:
src: str = f.read()
module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False)
assert isinstance(module, TimeitModuleType)
return module |
class DatetimeRole(ColumnRole):
_name = 'Datetime'
def __init__(self, dtype: Dtype=np.datetime64, seasonality: Optional[Sequence[str]]=('y', 'm', 'wd'), base_date: bool=False, date_format: Optional[str]=None, unit: Optional[str]=None, origin: Union[(str, datetime)]='unix', force_input: bool=False, base_feats: bool=True, country: Optional[str]=None, prov: Optional[str]=None, state: Optional[str]=None):
self.dtype = dtype
self.seasonality = []
if (seasonality is not None):
self.seasonality = seasonality
self.base_date = base_date
self.format = date_format
self.unit = unit
self.origin = origin
self.force_input = force_input
if self.base_date:
self.force_input = True
self.base_feats = base_feats
self.country = country
self.prov = prov
self.state = state |
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer((num_input_features + (i * growth_rate)), growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient)
self.add_module(('denselayer%d' % (i + 1)), layer)
def forward(self, init_features):
features = [init_features]
for (name, layer) in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1) |
class TestSuiteBranchCoverageFunction(TestSuiteCoverageFunction):
def compute_coverage(self, individual) -> float:
results = self._run_test_suite_chromosome(individual)
merged_trace = analyze_results(results)
tracer = self._executor.tracer
return compute_branch_coverage(merged_trace, tracer.get_subject_properties()) |
def serve_command_factory(args: Namespace):
nlp = pipeline(task=args.task, model=(args.model if args.model else None), config=args.config, tokenizer=args.tokenizer, device=args.device)
return ServeCommand(nlp, args.host, args.port, args.workers) |
def async_copy_to(obj, dev, main_stream=None):
if torch.is_tensor(obj):
v = obj.cuda(dev, non_blocking=True)
if (main_stream is not None):
v.data.record_stream(main_stream)
return v
elif isinstance(obj, collections.Mapping):
return {k: async_copy_to(o, dev, main_stream) for (k, o) in obj.items()}
elif isinstance(obj, collections.Sequence):
return [async_copy_to(o, dev, main_stream) for o in obj]
else:
return obj |
class ZeroBaseline(Baseline):
def __init__(self, env_spec):
pass
def get_param_values(self, **kwargs):
return None
def set_param_values(self, val, **kwargs):
pass
def fit(self, paths):
pass
def predict(self, path):
return np.zeros_like(path['rewards']) |
def create_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='datasets/COCO')
parser.add_argument('--save_root', type=str, default='datasets/shp2gir_coco')
parser.add_argument('--image_size', type=int, default=256, help='image size')
parser.add_argument('--cat1', type=str, default='sheep', help='category 1')
parser.add_argument('--cat2', type=str, default='giraffe', help='category 2')
return parser |
def _glue_convert_examples_to_features(examples: List[InputExample], tokenizer: PreTrainedTokenizer, max_length: Optional[int]=None, task=None, label_list=None, output_mode=None):
if (max_length is None):
max_length = tokenizer.model_max_length
if (task is not None):
processor = glue_processors[task]()
if (label_list is None):
label_list = processor.get_labels()
logger.info(f'Using label list {label_list} for task {task}')
if (output_mode is None):
output_mode = glue_output_modes[task]
logger.info(f'Using output mode {output_mode} for task {task}')
label_map = {label: i for (i, label) in enumerate(label_list)}
def label_from_example(example: InputExample) -> Union[(int, float, None)]:
if (example.label is None):
return None
if (output_mode == 'classification'):
return label_map[example.label]
elif (output_mode == 'regression'):
return float(example.label)
raise KeyError(output_mode)
labels = [label_from_example(example) for example in examples]
batch_encoding = tokenizer([(example.text_a, example.text_b) for example in examples], max_length=max_length, padding='max_length', truncation=True)
features = []
for i in range(len(examples)):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs, label=labels[i])
features.append(feature)
for (i, example) in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f'guid: {example.guid}')
logger.info(f'features: {features[i]}')
return features |
def trace(title: str):
t0 = time()
p = psutil.Process(os.getpid())
m0 = (p.memory_info()[0] / (2.0 ** 30))
(yield)
m1 = (p.memory_info()[0] / (2.0 ** 30))
delta = (m1 - m0)
sign = ('+' if (delta >= 0) else '-')
delta = math.fabs(delta)
print(f'[{m1:.1f}GB ({sign}{delta:.3f}GB): {(time() - t0):.2f}sec] {title} ', file=sys.stderr) |
class LeanSpecGenerator():
file_names: LeanFileNames
lean_info: LeanProgramInfo
simplifier: LeanExprSimplifier
spec_file_exists: bool = False
specs: List[str] = dataclasses.field(default_factory=(lambda : []))
func: Optional[LeanFunctionInfo] = None
def main_scope(self) -> ScopedName:
return self.lean_info.main_scope
def get_existing_specs(self):
try:
codes = get_codes([self.file_names.spec_filename])
if (len(codes) == 0):
return
self.spec_file_exists = True
self.specs = codes[0][0].splitlines()
except:
pass
def find_auto_spec_in_existing(self, func: LeanFunctionInfo) -> Optional[Tuple[(int, int)]]:
if (not self.spec_file_exists):
return None
start_exp = re.compile((('\\s*def ' + mk_lean_auto_spec_name(func.name, [self.main_scope])) + '(_block\\d*)?\\s*\\('))
for (start, line) in enumerate(self.specs):
if (start_exp.match(line) is not None):
break
else:
return None
end = start
def_end: Optional[int] = start
while (def_end is not None):
end = def_end
def_end = self.check_auto_spec_def(func=func, start=def_end, skip_ws=True)
return (start, end)
def check_auto_spec_def(self, func: LeanFunctionInfo, start: int, skip_ws: bool) -> Optional[int]:
if (not self.spec_file_exists):
return None
if skip_ws:
while (start < len(self.specs)):
if (self.specs[start] and (not self.specs[start].isspace())):
break
start += 1
start_str = ('def ' + mk_lean_auto_spec_name(func.name, [self.main_scope]))
if ((start == len(self.specs)) or (not self.specs[start].startswith(start_str))):
return None
for end in range((start + 1), len(self.specs)):
if ((not self.specs[end]) or self.specs[end].isspace()):
end += 1
break
return end
def find_prelude_comment_in_existing(self, before: int) -> int:
in_comment = False
start_prelude = before
for pos in reversed(range(0, before)):
line = self.specs[pos]
if in_comment:
if line.lstrip().startswith('/-'):
start_prelude = pos
in_comment = False
elif ((line == '') or line.isspace()):
continue
elif line.lstrip().startswith('--'):
start_prelude = pos
elif line.rstrip().endswith('-/'):
in_comment = True
else:
break
return start_prelude
def find_first_auto_spec_in_existing(self, funcs: List[LeanFunctionInfo]) -> Optional[Tuple[(int, int, int, LeanFunctionInfo)]]:
if (not self.spec_file_exists):
return None
for func in funcs:
lines = self.find_auto_spec_in_existing(func)
if (lines is not None):
comment_start = self.find_prelude_comment_in_existing(lines[0])
return (comment_start, lines[0], lines[1], func)
else:
return None
def find_user_spec_in_existing(self, func: LeanFunctionInfo, with_prelude: bool=True) -> Optional[Tuple[(int, int)]]:
if (not self.spec_file_exists):
return None
start_str = (('def ' + mk_lean_user_spec_name(func.name, [self.main_scope])) + ' (')
for (line_num, line) in enumerate(self.specs):
if line.strip().startswith(start_str):
for end in range((line_num + 1), len(self.specs)):
if ((not self.specs[end]) or self.specs[end].isspace()):
break
return ((self.find_prelude_comment_in_existing(line_num) if with_prelude else line_num), end)
return None
def has_user_spec_in_existing(self, func: LeanFunctionInfo) -> bool:
return (self.find_user_spec_in_existing(func) is not None)
def find_user_soundness_in_existing(self, func: LeanFunctionInfo, with_prelude: bool=True) -> Optional[int]:
if (not self.spec_file_exists):
return None
start_line = ('theorem ' + mk_lean_user_soundness_name(func.name, [self.main_scope]))
for (line_num, line) in enumerate(self.specs):
if (line.strip() == start_line):
return (self.find_prelude_comment_in_existing(line_num) if with_prelude else line_num)
return None
def has_user_soundness_in_existing(self, func: LeanFunctionInfo) -> bool:
return (self.find_user_soundness_in_existing(func) is not None)
def get_func_spec_arg_list(self, func: LeanFunctionInfo) -> List[str]:
arg_types = func.get_args_with_type(with_ret=True)
return create_arg_defs(arg_types)
def get_block_spec_args(self, func: LeanFunctionInfo, block_desc_num: int) -> Dict[(str, str)]:
assert (block_desc_num in func.block_list)
arg_types = func.block_list[block_desc_num].get_args_with_type()
arg_types.update(func.get_ret_args_with_type())
return arg_types
def get_block_spec_arg_list(self, func: LeanFunctionInfo, block_desc_num: int) -> List[str]:
return create_arg_defs(self.get_block_spec_args(func, block_desc_num))
def get_block_spec_arg_names(self, func: LeanFunctionInfo, block_desc_num: int, name_sub: Optional[Dict[(str, int)]]) -> List[str]:
base_names = list(self.get_block_spec_args(func, block_desc_num))
return ([name_with_sub(name, name_sub) for name in base_names] if (name_sub is not None) else base_names)
def mk_lean_function_auto_spec(self, func: LeanFunctionInfo) -> str:
specs = ''
for block_desc_num in func.join_points:
specs += (self.mk_lean_block_auto_spec(func, block_desc_num) + '\n\n')
self.func = func
name_sub = {name: 0 for name in func.arg_names}
auto_spec_name = mk_lean_auto_spec_name(func.name, [self.main_scope])
arg_defs = self.get_func_spec_arg_list(func)
specs += ((f'def {auto_spec_name} (mem : F F) ( : N)' + ((' ' + ' '.join(arg_defs)) if arg_defs else '')) + ' : Prop :=\n')
if self.func.has_loop:
specs += ((' ' * LEAN_CODE_INDENT) + 'true\n')
return specs
block_ctx = LeanSpecBlockGenerator(func=func, lean_info=self.lean_info, simplifier=self.simplifier, spec_start_lean_desc=0, lean_desc_num=0, name_sub=name_sub, trace_count=LeanTraceCount(), rc_steps=(SpecRCSteps(rc_builtin=func.rc) if (func.rc is not None) else None), num_func_calls=0)
block_ctx.indent()
asserts = block_ctx.mk_block_specs()
specs += ((' ' * LEAN_CODE_INDENT) + ''.join(asserts))
return specs
def mk_lean_block_auto_spec(self, func: LeanFunctionInfo, block_desc_num: int) -> str:
self.func = func
name_sub = {name: 0 for name in self.get_block_spec_arg_names(func, block_desc_num, None)}
auto_spec_name = ((mk_lean_auto_spec_name(func.name, [self.main_scope]) + '_block') + str(block_desc_num))
arg_defs = self.get_block_spec_arg_list(func, block_desc_num)
specs = ((f'def {auto_spec_name} (mem : F F) ( : N)' + ((' ' + ' '.join(arg_defs)) if arg_defs else '')) + ' : Prop :=\n')
if self.func.has_loop:
specs += ((' ' * LEAN_CODE_INDENT) + 'true\n')
return specs
block_ctx = LeanSpecBlockGenerator(func=func, lean_info=self.lean_info, simplifier=self.simplifier, spec_start_lean_desc=block_desc_num, lean_desc_num=block_desc_num, name_sub=name_sub, trace_count=LeanTraceCount(), rc_steps=(SpecRCSteps(rc_builtin=func.rc) if (func.rc is not None) else None), num_func_calls=0)
block_ctx.indent()
asserts = block_ctx.mk_block_specs()
specs += ((' ' * LEAN_CODE_INDENT) + ''.join(asserts))
return specs
def mk_lean_function_user_spec(self, func: LeanFunctionInfo, with_comment: bool=True) -> List[str]:
user_spec_name = mk_lean_user_spec_name(func.name, [self.main_scope])
auto_spec_name = mk_lean_auto_spec_name(func.name, self.lean_info.open_namespaces)
arg_defs = self.get_func_spec_arg_list(func)
arg_names = ' '.join((func.arg_names + func.get_ret_arg_names()))
specs = (['-- You may change anything in this definition except the name and arguments.'] if with_comment else [])
specs += [((f'def {user_spec_name} (mem : F F) ( : N)' + ((' ' + ' '.join(arg_defs)) if arg_defs else '')) + ' : Prop :=')]
if (not func.is_recursive):
spec_def = ((' ' * LEAN_CODE_INDENT) + f'{auto_spec_name} mem ')
if arg_names:
spec_def += (' ' + arg_names)
specs.append(spec_def)
else:
specs.append(((' ' * LEAN_CODE_INDENT) + 'true'))
return specs
def mk_lean_function_user_soundness_theorem(self, func: LeanFunctionInfo, with_comment: bool=True, with_proof: bool=True) -> List[str]:
arg_defs = self.get_func_spec_arg_list(func)
arg_names = ' '.join((func.arg_names + func.get_ret_arg_names()))
auto_spec_name = mk_lean_auto_spec_name(func.name, self.lean_info.open_namespaces)
user_spec_name = mk_lean_user_spec_name(func.name, self.lean_info.open_namespaces)
theorem_name = mk_lean_user_soundness_name(func.name, [self.main_scope])
if with_comment:
specs = ['/- {} soundness theorem -/'.format(get_name_in_scope(func.func_scope, self.main_scope)), '']
specs.append('-- Do not change the statement of this theorem. You may change the proof.')
else:
specs = []
specs.append(f'theorem {theorem_name}')
indent = ((' ' * LEAN_CODE_INDENT) * 2)
specs.append((indent + '{mem : F F}'))
specs.append((indent + '( : N)'))
if arg_defs:
specs.append((indent + ' '.join(arg_defs)))
if arg_names:
specs.append((indent + f'(h_auto : {auto_spec_name} mem {arg_names}) :'))
else:
specs.append((indent + f'(h_auto : {auto_spec_name} mem ) :'))
indent = (' ' * LEAN_CODE_INDENT)
if arg_names:
specs.append((indent + f'{user_spec_name} mem {arg_names} :='))
else:
specs.append((indent + f'{user_spec_name} mem :='))
if (not with_proof):
return specs
return (specs + self.mk_lean_function_user_soundness_proof(func))
def mk_lean_function_user_soundness_proof(self, func: LeanFunctionInfo) -> List[str]:
return ['begin', (((' ' * LEAN_CODE_INDENT) + 'exact h_auto') if (not func.is_recursive) else 'trivial'), 'end']
def make_lean_func_prelude(self, func: LeanFunctionInfo) -> str:
return '\n'.join(['/-', f'-- Function: {get_name_in_scope(func.func_scope, self.main_scope)}', '-/', '', '/- {} autogenerated specification -/'.format(get_name_in_scope(func.func_scope, self.main_scope)), '', '-- Do not change this definition.'])
def add_spec_prelude(self):
if (not self.spec_file_exists):
self.specs = ['/-', f' Specifications file for {self.file_names.spec_base_filename}.cairo', '', (' Do not modify the constant definitions, ' + 'structure definitions, or automatic specifications.'), (' Do not change the name or arguments of ' + 'the user specifications and soundness theorems.'), '', ' You may freely move the definitions around in the file.', ' You may add definitions and theorems wherever you wish in this file.', '-/']
prelude = [[('import ' + mk_lean_core_import_path('prelude'))], [f'namespace {str(self.main_scope)}', ''], ['variables {F : Type} [field F] [decidable_eq F] [prelude_hyps F]']]
self.specs = insert_lean_prelude(prelude, self.specs)
def add_const_and_struct_defs(self):
self.specs = add_lean_defs_to_file(self.specs, self.lean_info)
def add_first_func_specs(self, funcs: List[LeanFunctionInfo]):
found = self.find_first_auto_spec_in_existing(funcs)
had_user_spec = self.has_user_soundness_in_existing(funcs[0])
soundness = self.find_user_soundness_in_existing(funcs[0])
if (found is None):
if (soundness is None):
self.append_specs(funcs[0])
else:
self.insert_specs_before(funcs[0], soundness)
else:
(prelude_start, start, end, func) = found
if (func == funcs[0]):
self.replace_specs(funcs[0], start, end)
else:
start = (prelude_start if (soundness is None) else min(prelude_start, soundness))
self.insert_specs_before(funcs[0], start)
if had_user_spec:
self.fix_existing_user_spec(funcs[0])
if (soundness is not None):
self.fix_existing_user_soundness(funcs[0])
def append_specs(self, func: LeanFunctionInfo):
if (not func.is_recursive):
self.append_in_main_namespace([self.make_lean_func_prelude(func), self.mk_lean_function_auto_spec(func), ''])
if (not self.has_user_spec_in_existing(func)):
self.append_in_main_namespace((self.mk_lean_function_user_spec(func) + ['']))
else:
if (not self.has_user_spec_in_existing(func)):
self.append_in_main_namespace((self.mk_lean_function_user_spec(func) + ['']))
self.append_in_main_namespace([self.make_lean_func_prelude(func), self.mk_lean_function_auto_spec(func), ''])
if (not self.has_user_soundness_in_existing(func)):
self.append_in_main_namespace((self.mk_lean_function_user_soundness_theorem(func) + ['']))
def replace_specs(self, func: LeanFunctionInfo, start: int, end: int):
if (not func.is_recursive):
spec_aux = ([self.mk_lean_function_auto_spec(func), ''] + (self.mk_lean_function_user_spec(func) if (not self.has_user_spec_in_existing(func)) else []))
else:
spec_aux = ((self.mk_lean_function_user_spec(func) if (not self.has_user_spec_in_existing(func)) else []) + [self.mk_lean_function_auto_spec(func), ''])
self.specs = (((self.specs[:start] + spec_aux) + ((self.mk_lean_function_user_soundness_theorem(func) + ['']) if (not self.has_user_soundness_in_existing(func)) else [])) + self.specs[end:])
def insert_specs_before(self, func: LeanFunctionInfo, before: int):
if (not func.is_recursive):
spec_aux = ([self.make_lean_func_prelude(func), self.mk_lean_function_auto_spec(func), ''] + (self.mk_lean_function_user_spec(func) if (not self.has_user_spec_in_existing(func)) else []))
else:
spec_aux = ((self.mk_lean_function_user_spec(func) if (not self.has_user_spec_in_existing(func)) else []) + [self.make_lean_func_prelude(func), self.mk_lean_function_auto_spec(func), ''])
self.specs = (((self.specs[:before] + spec_aux) + ((self.mk_lean_function_user_soundness_theorem(func) + ['']) if (not self.has_user_soundness_in_existing(func)) else [])) + self.specs[before:])
def fix_existing_user_spec(self, func: LeanFunctionInfo):
start_and_end = self.find_user_spec_in_existing(func, False)
if (start_and_end is None):
return
(start_line, end_line) = start_and_end
new_user_spec = self.mk_lean_function_user_spec(func, False)
if (self.specs[start_line].strip() == new_user_spec[0].strip()):
return
if (((end_line - start_line) == 2) and (self.specs[(start_line + 1)].split()[0] == new_user_spec[1].split()[0])):
self.specs = ((self.specs[:start_line] + new_user_spec) + self.specs[end_line:])
else:
self.specs[start_line] = ('-- ' + self.specs[start_line])
self.specs = ((self.specs[:start_line] + [new_user_spec[0], '-- ARGS CHANGED, PREVIOUS ARGS:']) + self.specs[start_line:])
def fix_existing_user_soundness(self, func: LeanFunctionInfo):
start_line = self.find_user_soundness_in_existing(func, False)
if (start_line is None):
return
new_statement = self.mk_lean_function_user_soundness_theorem(func, False, False)
first_diff = 0
for first_diff in range(0, len(new_statement)):
if (len(self.specs) <= (start_line + first_diff)):
self.specs = (self.specs[:start_line] + self.mk_lean_function_user_soundness_theorem(func, False, True))
return
if (new_statement[first_diff].strip() != self.specs[(start_line + first_diff)].strip()):
break
else:
return
for end_statement in range((start_line + first_diff), len(self.specs)):
if (':=' in self.specs[end_statement]):
end_statement += 1
break
trivial_proof = self.mk_lean_function_user_soundness_proof(func)
for proof_line in range(0, len(trivial_proof)):
if ((len(self.specs) <= (end_statement + proof_line)) or (self.specs[(end_statement + proof_line)].strip() != trivial_proof[proof_line].strip())):
for line in range((start_line + first_diff), end_statement):
self.specs[line] = ('-- ' + self.specs[line])
self.specs = (((self.specs[:(start_line + first_diff)] + new_statement[first_diff:]) + ['-- STATEMENT CHANGED, PREVIOUS STATEMENT:']) + self.specs[(start_line + first_diff):])
return
self.specs = ((self.specs[:(start_line + first_diff)] + new_statement[first_diff:]) + self.specs[end_statement:])
def add_out_of_main_scope_imports(self):
import_names = self.lean_info.imported_scopes
self.specs = insert_additional_imports([f'import {mk_lean_spec_import_path(name)}' for name in import_names], self.specs)
self.specs = insert_open_import_namespaces([str(name) for name in import_names], self.specs)
def close_main_namespace(self):
close_line = f'end {str(self.main_scope)}'
if (close_line not in self.specs):
self.specs += ['', close_line]
def find_main_namespace_end(self) -> Optional[int]:
close_line = f'end {str(self.main_scope)}'
return (self.specs.index(close_line) if (close_line in self.specs) else None)
def append_in_main_namespace(self, lines: List[str]):
before = self.find_main_namespace_end()
if (before is None):
self.specs += lines
else:
self.specs = ((self.specs[:before] + lines) + self.specs[before:])
def mk_lean_spec_file(self):
self.get_existing_specs()
self.add_spec_prelude()
self.add_const_and_struct_defs()
funcs = self.lean_info.main_scope_funcs
for first in range(0, len(funcs)):
self.add_first_func_specs(funcs[first:])
self.add_out_of_main_scope_imports()
self.close_main_namespace()
with open(self.file_names.spec_filename, 'w') as out:
for line in self.specs:
print(line, file=out) |
def gen_time_pair():
time_formats = ['am', 'pm', 'standard']
time_format = np.random.choice(time_formats, 1)[0]
if ((time_format == 'am') or (time_format == 'pm')):
hour = random.randint(1, 11)
leave_min = random.randint(10, 29)
arrive_min = (leave_min + random.randint(10, 30))
leave_time = ((((str(hour) + ':') + str(leave_min)) + ' ') + time_format)
arrive_time = ((((str(hour) + ':') + str(arrive_min)) + ' ') + time_format)
else:
hour = random.randint(13, 23)
leave_min = random.randint(10, 29)
arrive_min = (leave_min + random.randint(10, 30))
leave_time = ((str(hour) + ':') + str(leave_min))
arrive_time = ((str(hour) + ':') + str(arrive_min))
return (leave_time, arrive_time) |
def _random_stone_lattice(n):
from sage.arith.misc import factor
from sage.combinat.partition import Partitions
from sage.misc.misc_c import prod
from copy import copy
factors = sum([([f[0]] * f[1]) for f in factor(n)], [])
sage.misc.prandom.shuffle(factors)
part_lengths = list(Partitions(len(factors)).random_element())
parts = []
while part_lengths:
x = part_lengths.pop()
parts.append(prod(factors[:x]))
factors = factors[x:]
result = DiGraph(1)
for p in parts:
g = _random_distributive_lattice((p - 1))
g = copy(Poset(g).order_ideals_lattice(as_ideals=False)._hasse_diagram)
g.add_edge((- 1), 0)
result = result.cartesian_product(g)
result.relabel()
return result |
class ClassificationModule(MLPNodeClassifier):
def __init__(self, *, num_channels: int, num_classes: int, hidden_dim: int=16, base_layers: int=2, head_layers: int=1, combination: MultiMLP.CombType='cat', activation_fn: Callable[([Tensor], Tensor)]=torch.relu_, dropout: float=0.0, batch_norm: bool=False):
super().__init__(num_classes=num_classes)
self.model = MultiMLP(num_channels=num_channels, output_dim=num_classes, hidden_dim=hidden_dim, base_layers=base_layers, head_layers=head_layers, combination=combination, activation_fn=activation_fn, dropout=dropout, batch_norm=batch_norm, plain_last=True) |
class DistributedDataParallel(Module):
def __init__(self, module, device_ids=None, output_device=None, dim=0, broadcast_buffers=True):
super(DistributedDataParallel, self).__init__()
if (dist._backend not in (dist.dist_backend.NCCL, dist.dist_backend.GLOO)):
raise ValueError('Invalid backend, only NCCL and GLOO backends are supported by DistributedDataParallel')
if (device_ids is None):
device_ids = list(range(torch.cuda.device_count()))
if (output_device is None):
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
self.broadcast_buffers = broadcast_buffers
self.need_reduction = False
MB = (1024 * 1024)
self.broadcast_bucket_size = (10 * MB)
self.nccl_reduce_bucket_size = (256 * MB)
module_states = list(self.module.state_dict().values())
if (len(module_states) > 0):
self._dist_broadcast_coalesced(module_states, self.broadcast_bucket_size)
if (len(device_ids) > 1):
self._module_copies = replicate(self.module, self.device_ids, detach=True)
self._module_copies[0] = self.module
for module_copy in self._module_copies[1:]:
for (param, copy_param) in zip(self.module.parameters(), module_copy.parameters()):
copy_param.requires_grad = param.requires_grad
else:
self._module_copies = [self.module]
if (dist._backend == dist.dist_backend.NCCL):
self._register_nccl_grad_hook()
return
bucket_bytes_cap = (1 * MB)
param_buckets = []
for (dev_idx, module) in enumerate(self._module_copies):
param_buckets.append(list(_take_tensors(module.parameters(), bucket_bytes_cap)))
self.bucket_sizes = []
self.bucket_map = {}
for (bucket_idx, param_buckets_tuple) in enumerate(zip(*param_buckets)):
self.bucket_sizes.append(0)
for (idx, param_tuple) in enumerate(zip(*param_buckets_tuple)):
if (idx == 0):
bucket_param_type = param_tuple[0].type()
if ((bucket_param_type == torch.cuda.HalfTensor) and (dist._backend != dist.dist_backend.GLOO)):
raise RuntimeError('DistributedDataParallel currently only supports half precision parameters with Nccl and Gloo backend')
if (not param_tuple[0].requires_grad):
continue
for p in param_tuple:
self.bucket_map[p] = bucket_idx
self.bucket_sizes[bucket_idx] += 1
self.buckets = [[[] for _ in range(len(self.device_ids))] for _ in range(len(self.bucket_sizes))]
self.bucket_events = [([None] * len(self.device_ids)) for _ in range(len(self.bucket_sizes))]
self.reduced = ([False] * len(self.bucket_sizes))
self._register_grad_hooks()
self.dispatch_lock = threading.Lock()
self._start_reduction_threads()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
if (dist._backend != dist.dist_backend.NCCL):
del attrs['_grad_accs'], attrs['_reduction_queues'], attrs['_reduction_streams'], attrs['_reduction_threads'], attrs['_nccl_streams'], attrs['_default_streams']
return attrs
def __setstate__(self, state):
super(DistributedDataParallel, self).__setstate__(state)
if (dist._backend == dist.dist_backend.NCCL):
self._register_nccl_grad_hook()
else:
self._register_grad_hooks()
self._start_reduction_threads()
def forward(self, *inputs, **kwargs):
self.need_reduction = True
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
self._sync_params()
if (len(self.device_ids) == 1):
return self.module(*inputs[0], **kwargs[0])
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
return self.gather(outputs, self.output_device)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
def _dist_broadcast_coalesced(self, tensors, buffer_size):
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for (tensor, synced) in zip(tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
if (len(self.device_ids) > 1):
params = [p.data for p in self.module.parameters()]
result = broadcast_coalesced(params, self.device_ids, self.broadcast_bucket_size)
for (tensors, module) in zip(result[1:], self._module_copies[1:]):
for (tensor, param) in zip(tensors, module.parameters()):
param.data.set_(tensor)
if self.broadcast_buffers:
buffers = [b.data for b in self.module._all_buffers()]
if (len(buffers) > 0):
self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size)
if (len(self.device_ids) > 1):
result = broadcast_coalesced(buffers, self.device_ids, self.broadcast_bucket_size)
for (tensors, module) in zip(result[1:], self._module_copies[1:]):
for (tensor, buf) in zip(tensors, module._all_buffers()):
buf.data.set_(tensor)
def _register_grad_hooks(self):
self._grad_accs = []
for (device_idx, module) in enumerate(self._module_copies):
for p in module.parameters():
if p.requires_grad:
p_tmp = p.expand_as(p)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_param_hook(p, device_idx))
self._grad_accs.append(grad_acc)
def _register_nccl_grad_hook(self):
self.nccl_reduction_group_id = dist.new_group()
def reduction_fn_nccl():
if (not self.need_reduction):
return
self.need_reduction = False
all_grads = [[] for _ in range(len(self._module_copies))]
all_grads_buckets_iters = []
for (dev_idx, module) in enumerate(self._module_copies):
for param in module.parameters():
if ((not param.requires_grad) or (param.grad is None)):
continue
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works with gradients that don't require grad")
all_grads[dev_idx].append(param.grad.data)
dev_grads_buckets = _take_tensors(all_grads[dev_idx], self.nccl_reduce_bucket_size)
all_grads_buckets_iters.append(dev_grads_buckets)
for grads_batch in zip(*all_grads_buckets_iters):
grads_batch_coalesced = []
for (dev_idx, dev_grads_batch) in enumerate(grads_batch):
dev_id = self.device_ids[dev_idx]
with torch.cuda.device(dev_id):
dev_grads_batch_coalesced = _flatten_dense_tensors(dev_grads_batch)
grads_batch_coalesced.append(dev_grads_batch_coalesced)
dist.all_reduce_multigpu(grads_batch_coalesced, group=self.nccl_reduction_group_id)
grads_batch_coalesced[0] /= dist.get_world_size()
grads_batch_reduced = _unflatten_dense_tensors(grads_batch_coalesced[0], grads_batch[0])
for (grad, reduced) in zip(grads_batch[0], grads_batch_reduced):
grad.copy_(reduced)
for module in self._module_copies[1:]:
for param in module.parameters():
if param.requires_grad:
param.grad = None
param.data.set_()
for p in self.module.parameters():
if (not p.requires_grad):
continue
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(reduction_fn_nccl)
p.register_hook(allreduce_hook)
def _make_param_hook(self, param, device_idx):
bucket_idx = self.bucket_map[param]
def distributed_data_parallel_hook(*unused):
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works with gradients that don't require grad")
bucket = self.buckets[bucket_idx][device_idx]
bucket.append(param.grad.data)
if (device_idx > 0):
param.grad = None
param.data.set_()
if (len(bucket) == self.bucket_sizes[bucket_idx]):
with torch.cuda.device(self.device_ids[device_idx]):
event = torch.cuda.Event()
event.record()
with self.dispatch_lock:
self.bucket_events[bucket_idx][device_idx] = event
self._queue_reduction(bucket_idx)
return distributed_data_parallel_hook
def _queue_reduction(self, bucket_idx):
dev_buckets = self.buckets[bucket_idx]
dev_events = self.bucket_events[bucket_idx]
if any(((evt is None) for evt in dev_events)):
return
event = threading.Event()
self._reduction_queues[bucket_idx].put((dev_buckets, dev_events, event))
Variable._execution_engine.queue_callback((lambda : event.wait()))
self.buckets[bucket_idx] = [[] for _ in range(len(self.device_ids))]
self.bucket_events[bucket_idx] = ([None] * len(self.device_ids))
self.reduced[bucket_idx] = True
if all(self.reduced):
self.reduced = ([False] * len(self.bucket_sizes))
def sync_reduction_streams():
r_streams = zip(*self._reduction_streams)
for (dev_id, default_stream, dev_r_streams) in zip(self.device_ids, self._default_streams, r_streams):
with torch.cuda.device(dev_id):
for reduction_stream in dev_r_streams:
default_stream.wait_stream(reduction_stream)
Variable._execution_engine.queue_callback(sync_reduction_streams)
def _start_reduction_threads(self):
num_buckets = len(self.bucket_sizes)
self._reduction_queues = [queue.Queue() for _ in range(num_buckets)]
self._reduction_threads = []
self._reduction_streams = [[] for _ in range(num_buckets)]
self._nccl_streams = []
self._default_streams = []
for dev_id in self.device_ids:
with torch.cuda.device(dev_id):
self._default_streams.append(torch.cuda.current_stream())
self._nccl_streams.append(torch.cuda.Stream())
for (reduction_queue, reduction_streams) in zip(self._reduction_queues, self._reduction_streams):
for dev_id in self.device_ids:
with torch.cuda.device(dev_id):
reduction_streams.append(torch.cuda.Stream())
dist._register_stream(reduction_streams[0])
group_id = dist.new_group()
self._reduction_threads.append(threading.Thread(target=self._reduction_thread_fn, args=(reduction_queue, group_id, self.device_ids, reduction_streams, self._nccl_streams)))
self._reduction_threads[(- 1)].daemon = True
self._reduction_threads[(- 1)].start()
def _reduction_thread_fn(queue, group_id, device_ids, reduction_streams, nccl_streams):
def _process_batch():
(dev_grad_batch, dev_events, job_event) = queue.get()
dev_coalesced = []
for (dev_id, grad_batch, event, stream) in zip(device_ids, dev_grad_batch, dev_events, reduction_streams):
with torch.cuda.device(dev_id), torch.cuda.stream(stream):
stream.wait_event(event)
coalesced = _flatten_dense_tensors(grad_batch)
dev_coalesced.append(coalesced)
for stream in reduction_streams:
stream.synchronize()
nccl.reduce(dev_coalesced, root=0, streams=nccl_streams)
grad_batch = dev_grad_batch[0]
coalesced = dev_coalesced[0]
reduce_stream = reduction_streams[0]
with torch.cuda.stream(reduce_stream):
reduce_stream.wait_stream(nccl_streams[0])
coalesced /= dist.get_world_size()
dist.all_reduce(coalesced, group=group_id)
for (grad, reduced) in zip(grad_batch, _unflatten_dense_tensors(coalesced, grad_batch)):
grad.copy_(reduced)
job_event.set()
with torch.cuda.device(device_ids[0]):
while True:
_process_batch() |
class BinaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype_one, dtype_two, op_func):
self.input_one = torch.randn(M, N, K, device=device).to(dtype=dtype_one)
self.input_two = torch.randn(M, N, K, device=device).to(dtype=dtype_two)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one, self.input_two) |
def test_new_style_tuple():
form = {'class': 'RecordArray', 'fields': None, 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'node1'}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'node2'}], 'parameters': {}, 'form_key': 'node0'}
array = ak.from_buffers(form, 1, {'node1-data': np.array([1], dtype=np.int64), 'node2-data': np.array([2], dtype=np.int64)})
assert array.is_tuple
assert (array.fields == ['0', '1'])
assert (array.to_list() == [(1, 2)]) |
class RandomFourierFeatures(ModelLayer):
def __init__(self, model, input_record, output_dims, sigma, w_init=None, b_init=None, name='random_fourier_features', **kwargs):
super(RandomFourierFeatures, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), 'Incorrect input type'
input_dims = input_record.field_type().shape[0]
assert (input_dims >= 1), ('Expected input dimensions >= 1, got %s' % input_dims)
self.output_dims = output_dims
assert (self.output_dims >= 1), ('Expected output dimensions >= 1, got %s' % self.output_dims)
self.output_schema = schema.Scalar((np.float32, (self.output_dims,)), self.get_next_blob_reference('output'))
assert (sigma > 0.0), ('Expected bandwidth > 0, got %s' % sigma)
w_init = (w_init if w_init else ('GaussianFill', {'mean': 0.0, 'std': (1.0 / sigma)}))
b_init = (b_init if b_init else ('UniformFill', {'min': 0.0, 'max': (2 * np.pi)}))
self.w = self.create_param(param_name='w', shape=[self.output_dims, input_dims], initializer=w_init, optimizer=model.NoOptim)
self.b = self.create_param(param_name='b', shape=[self.output_dims], initializer=b_init, optimizer=model.NoOptim)
def add_ops(self, net):
cosine_arg = net.FC((self.input_record.field_blobs() + [self.w, self.b]), net.NextScopedBlob('cosine_arg'))
new_feature_vec = net.Cos([cosine_arg], net.NextScopedBlob('new_feature_vec'))
scale = np.sqrt((2.0 / self.output_dims))
net.Scale([new_feature_vec], self.output_schema.field_blobs(), scale=scale) |
.no_cover
.timeout(30)
def test_ppo_memorize_digits():
env = os.environ.copy()
env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1'
command = [str((EXAMPLES_ROOT_DIR / 'tf/ppo_memorize_digits.py')), '--batch_size', '4']
assert (subprocess.run(command, check=False, env=env).returncode == 0) |
_utils.test()
def test_double_for_loops_more_nests():
N = 6
a = ti.field(ti.f32, shape=N, needs_dual=True)
b = ti.field(ti.f32, shape=N, needs_dual=True)
c = ti.field(ti.i32, shape=(N, (N // 2)))
f = ti.field(ti.f32, shape=(N, (N // 2)), needs_dual=True)
def double_for():
for i in range(N):
for k in range((N // 2)):
weight = 1.0
for j in range(c[(i, k)]):
weight *= a[i]
s = 0.0
for j in range((c[(i, k)] * 2)):
s += (weight + b[i])
f[(i, k)] = s
a.fill(2)
b.fill(1)
for i in range(N):
for k in range((N // 2)):
c[(i, k)] = (i + k)
double_for()
for i in range(N):
for k in range((N // 2)):
assert (f[(i, k)] == ((2 * (i + k)) * (1 + (2 ** (i + k)))))
with ti.ad.FwdMode(loss=f, param=a, seed=[1.0 for _ in range(N)]):
double_for()
for i in range(N):
total_grad_a = 0
for k in range((N // 2)):
total_grad_a = ((2 * ((i + k) ** 2)) * (2 ** ((i + k) - 1)))
assert (f.dual[(i, k)] == total_grad_a)
with ti.ad.FwdMode(loss=f, param=b, seed=[1.0 for _ in range(N)]):
double_for()
for i in range(N):
total_grad_b = 0
for k in range((N // 2)):
total_grad_b = (2 * (i + k))
assert (f.dual[(i, k)] == total_grad_b) |
class ResNet(nn.Module):
def __init__(self, block, layers, used_layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.feature_size = (128 * block.expansion)
self.used_layers = used_layers
layer3 = (True if (3 in used_layers) else False)
layer4 = (True if (4 in used_layers) else False)
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.feature_size = ((256 + 128) * block.expansion)
else:
self.layer3 = (lambda x: x)
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.feature_size = (512 * block.expansion)
else:
self.layer4 = (lambda x: x)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if ((stride == 1) and (dilation == 1)):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
else:
if (dilation > 1):
dd = (dilation // 2)
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=3, stride=stride, bias=False, padding=padding, dilation=dd), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x_ = self.relu(x)
x = self.maxpool(x_)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
p4 = self.layer4(p3)
out = [x_, p1, p2, p3, p4]
out = [out[i] for i in self.used_layers]
if (len(out) == 1):
return out[0]
else:
return out |
def convert_datasets_with_entity_mention_annotations(train: List, subj_index_mapper: IndexMapper, obj_index_mapper: IndexMapper, rel_index_mapper: IndexMapper, others_train: List[List]=[], valid_and_test: List[List]=[], triple_format_parser=(lambda x: x.strip().split('\t')), mention_format_parser=(lambda x: [y.strip() for y in x.strip().split('|||')]), subj_slot=0, rel_slot=1, obj_slot=2, subj_entity_slot=3, obj_entity_slot=4, filter_func=None, segment=False, collect_mention_vocab_also_from_others=False):
idx_mappers = [subj_index_mapper, rel_index_mapper, obj_index_mapper]
for idx_mapper in idx_mappers:
idx_mapper.init_vocab()
print('Collect vocab from train data')
for x in tqdm(train):
x = triple_format_parser(x)
subj_index_mapper.collect_vocab(x[subj_slot])
rel_index_mapper.collect_vocab(x[rel_slot])
obj_index_mapper.collect_vocab(x[obj_slot])
print('Collect *mentions* vocab (not tokens) also from other train data')
if collect_mention_vocab_also_from_others:
for vt in others_train:
for x in tqdm(vt):
x = triple_format_parser(x)
subj_index_mapper.collect_vocab(x[subj_slot], segment=False)
rel_index_mapper.collect_vocab(x[rel_slot], segment=False)
obj_index_mapper.collect_vocab(x[obj_slot], segment=False)
for vt in valid_and_test:
for x in tqdm(vt):
x = triple_format_parser(x)
subj_index_mapper.collect_vocab(x[subj_slot], segment=False)
rel_index_mapper.collect_vocab(x[rel_slot], segment=False)
obj_index_mapper.collect_vocab(x[obj_slot], segment=False)
for idx_mapper in idx_mappers:
idx_mapper.finalize_vocab()
nr_of_workers = 20
def convert_data_to_idx(data):
in_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
workers = list()
for id in range(nr_of_workers):
worker = Worker(in_queue, out_queue, subj_index_mapper, rel_index_mapper, obj_index_mapper, mention_format_parser, triple_format_parser)
worker.start()
workers.append(worker)
submitted_jobs = 0
n = 10240
for (file_nr, tmp_input) in enumerate(tqdm([data[i:(i + n)] for i in range(0, len(data), n)])):
submitted_jobs += 1
in_queue.put(tmp_input)
result = list()
for _ in tqdm(range(submitted_jobs)):
(tmp_result, in_file_name) = out_queue.get()
result.extend(tmp_result)
for worker in workers:
in_queue.put(None)
out_queue.put(None)
for worker in workers:
worker.join()
return result
print('Apply mention vocab to all data')
train_converted = convert_data_to_idx(train)
others_train_converted = list()
for other in others_train:
others_train_converted.append(convert_data_to_idx(other))
valid_and_test_converted = list()
for vt in valid_and_test:
valid_and_test_converted.append(convert_data_to_idx(vt))
max_number_of_unknowns = (2 / 3)
classify_as_too_many_unknowns = (lambda i: ((sum(map((lambda k: (1 if (k == UNK) else 0)), i)) / (len(i) - 2)) > max_number_of_unknowns))
if segment:
print('Collect mention token map and filter')
mention_id_token_ids_map = OrderedDict()
relation_id_token_ids_map = OrderedDict()
def collect_mentions_and_filter(data):
result = list()
for (triple, triple_segmented) in tqdm(data):
too_many_unknowns = False
mention_id_token_ids_map[triple[subj_slot][0]] = triple_segmented[subj_slot]
if (classify_as_too_many_unknowns(triple_segmented[subj_slot]) or (triple[subj_slot][0] == UNK)):
too_many_unknowns = True
relation_id_token_ids_map[triple[rel_slot][0]] = triple_segmented[rel_slot]
if (classify_as_too_many_unknowns(triple_segmented[rel_slot]) or (triple[rel_slot][0] == UNK)):
too_many_unknowns = True
mention_id_token_ids_map[triple[obj_slot][0]] = triple_segmented[obj_slot]
if (classify_as_too_many_unknowns(triple_segmented[obj_slot]) or (triple[obj_slot][0] == UNK)):
too_many_unknowns = True
if (not too_many_unknowns):
result.append(triple)
return result
train_converted_filtered = collect_mentions_and_filter(train_converted)
others_train_converted_and_filtered = [collect_mentions_and_filter(data) for data in others_train_converted]
valid_and_test_converted_and_filtered = [collect_mentions_and_filter(data) for data in valid_and_test_converted]
return (train_converted_filtered, (others_train_converted_and_filtered + valid_and_test_converted_and_filtered), mention_id_token_ids_map, relation_id_token_ids_map) |
def undo_filter_paeth(filter_unit, scanline, previous, result):
ai = (- filter_unit)
for i in range(len(result)):
x = scanline[i]
if (ai < 0):
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = ((a + b) - c)
pa = abs((p - a))
pb = abs((p - b))
pc = abs((p - c))
if ((pa <= pb) and (pa <= pc)):
pr = a
elif (pb <= pc):
pr = b
else:
pr = c
result[i] = ((x + pr) & 255)
ai += 1 |
class UniformActivationNet(torch.nn.Module):
def __init__(self, input_shape):
super(UniformActivationNet, self).__init__()
(_, in_channels, _, _) = input_shape[0]
self.conv1 = torch.nn.Conv2d(in_channels, 3, kernel_size=(3, 3))
self.bn1 = torch.nn.BatchNorm2d(3)
self.conv2 = torch.nn.Conv2d(3, 4, kernel_size=(5, 5))
def forward(self, inp):
x = self.conv1(inp)
x = self.bn1(x)
x = self.conv2(x)
return x |
def split_auth_from_netloc(netloc):
if ('' not in netloc):
return (netloc, (None, None))
(auth, netloc) = netloc.rsplit('', 1)
if (':' in auth):
user_pass = auth.split(':', 1)
else:
user_pass = (auth, None)
user_pass = tuple(((None if (x is None) else urllib_unquote(x)) for x in user_pass))
return (netloc, user_pass) |
class TranslationTool(PipelineTool):
default_checkpoint = 'facebook/nllb-200-distilled-600M'
description = "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
name = 'translator'
pre_processor_class = AutoTokenizer
model_class = AutoModelForSeq2SeqLM
lang_to_code = LANGUAGE_CODES
inputs = ['text', 'text', 'text']
outputs = ['text']
def encode(self, text, src_lang, tgt_lang):
if (src_lang not in self.lang_to_code):
raise ValueError(f'{src_lang} is not a supported language.')
if (tgt_lang not in self.lang_to_code):
raise ValueError(f'{tgt_lang} is not a supported language.')
src_lang = self.lang_to_code[src_lang]
tgt_lang = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(text, return_tensors='pt', src_lang=src_lang, tgt_lang=tgt_lang)
def forward(self, inputs):
return self.model.generate(**inputs)
def decode(self, outputs):
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True) |
class VectorType(MatrixType):
def __init__(self, n, dtype):
super().__init__(n, 1, 1, dtype)
def __call__(self, *args):
if (len(args) == 0):
raise TaichiSyntaxError('Custom type instances need to be created with an initial value.')
if (len(args) == 1):
if (isinstance(args[0], expr.Expr) and args[0].ptr.is_tensor()):
arg = args[0]
shape = arg.ptr.get_rvalue_type().shape()
assert (len(shape) == 1)
assert (self.n == shape[0])
return expr.Expr(arg.ptr)
if isinstance(args[0], (numbers.Number, expr.Expr)):
entries = [args[0] for _ in range(self.n)]
return self._instantiate(entries)
args = args[0]
entries = []
for x in args:
if isinstance(x, (list, tuple)):
entries += x
elif isinstance(x, np.ndarray):
entries += list(x.ravel())
elif isinstance(x, Matrix):
entries += x.to_list()
else:
entries.append(x)
return self._instantiate(entries)
def _instantiate_in_python_scope(self, entries):
return Vector([(int(entries[i]) if (self.dtype in primitive_types.integer_types) else float(entries[i])) for i in range(self.n)], dt=self.dtype)
def _instantiate(self, entries):
if in_python_scope():
return self._instantiate_in_python_scope(entries)
return make_matrix_with_shape(entries, [self.n], self.dtype)
def field(self, **kwargs):
return Vector.field(self.n, dtype=self.dtype, **kwargs)
def ndarray(self, **kwargs):
return Vector.ndarray(self.n, dtype=self.dtype, **kwargs)
def to_string(self):
dtype_str = (self.dtype.to_string() if (self.dtype is not None) else '')
return f'VectorType[{self.n}, {dtype_str}]' |
class ModelArguments():
model_name_or_path: str = field(default=None, metadata={'help': 'Name to a huggingface native pretrained model or path to a model on disk.'}) |
class EntanglementGenerationB(EntanglementProtocol):
def __init__(self, own: 'BSMNode', name: str, others: List[str]):
super().__init__(own, name)
assert (len(others) == 2)
self.others = others
def bsm_update(self, bsm: 'SingleAtomBSM', info: Dict[(str, Any)]):
assert (info['info_type'] == 'BSM_res')
res = info['res']
time = info['time']
resolution = bsm.resolution
for (i, node) in enumerate(self.others):
message = EntanglementGenerationMessage(GenerationMsgType.MEAS_RES, None, detector=res, time=time, resolution=resolution)
self.own.send_message(node, message)
def received_message(self, src: str, msg: EntanglementGenerationMessage):
raise Exception("EntanglementGenerationB protocol '{}' should not receive message".format(self.name))
def start(self) -> None:
pass
def set_others(self, protocol: str, node: str, memories: List[str]) -> None:
pass
def is_ready(self) -> bool:
return True
def memory_expire(self, memory: 'Memory') -> None:
raise Exception("Memory expire called for EntanglementGenerationB protocol '{}'".format(self.name)) |
def _load_unicode_escapes(v, hexbytes, prefix):
skip = False
i = (len(v) - 1)
while ((i > (- 1)) and (v[i] == '\\')):
skip = (not skip)
i -= 1
for hx in hexbytes:
if skip:
skip = False
i = (len(hx) - 1)
while ((i > (- 1)) and (hx[i] == '\\')):
skip = (not skip)
i -= 1
v += prefix
v += hx
continue
hxb = ''
i = 0
hxblen = 4
if (prefix == '\\U'):
hxblen = 8
hxb = ''.join(hx[i:(i + hxblen)]).lower()
if hxb.strip('abcdef'):
raise ValueError(('Invalid escape sequence: ' + hxb))
if ((hxb[0] == 'd') and hxb[1].strip('')):
raise ValueError((('Invalid escape sequence: ' + hxb) + '. Only scalar unicode points are allowed.'))
v += unichr(int(hxb, 16))
v += unicode(hx[len(hxb):])
return v |
class _MechanicalTurkRequestImporter():
def __init__(self, template: CritiqueTaskTemplate):
self._template: CritiqueTaskTemplate = template
self._request_key_to_results: Dict[(_CritiqueRequestKey, CritiqueRequestResult)] = {}
def _get_directory_path(self):
return os.path.join('mturk', self._template.name)
def _make_request_key(self, fields: Dict[(str, str)]) -> _CritiqueRequestKey:
return tuple(((k, v) for (k, v) in sorted(fields.items())))
def _import_from_file_path(self, file_path: str) -> None:
request_key_to_responses: Dict[(_CritiqueRequestKey, List[CritiqueResponse])] = defaultdict(list)
with open(file_path) as f:
dict_reader = csv.DictReader(f)
for row in dict_reader:
request_key = self._make_request_key(self._get_fields_from_row(row))
response = self._get_response_from_row(row)
request_key_to_responses[request_key].append(response)
for (request_key, responses) in request_key_to_responses.items():
self._request_key_to_results[request_key] = CritiqueRequestResult(responses)
def _get_fields_from_row(self, row: Dict[(str, str)]) -> Dict[(str, str)]:
fields = {}
for (key, value) in row.items():
if key.startswith('Input.'):
field_key = key[len('Input.'):]
fields[field_key] = value
return fields
def _get_response_from_row(self, row: Dict[(str, str)]) -> CritiqueResponse:
answers: Dict[(str, Union[(str, List[str])])] = {}
for question in self._template.questions:
if (question.question_type == QuestionType.MULTIPLE_CHOICE):
for (option_index, option) in enumerate(question.options):
raw_answer = row[f'Answer.{question.name}.{option_index}.on']
if (raw_answer == 'true'):
answers[question.name] = option
break
elif (question.question_type == QuestionType.CHECKBOX):
checkbox_options: List[str] = []
for (option_index, option) in enumerate(question.options):
raw_answer = row[f'Answer.{question.name}.{option_index}.on']
if (raw_answer == 'true'):
checkbox_options.append(option)
answers[question.name] = checkbox_options
elif (question.question_type == QuestionType.FREE_RESPONSE):
answers[question.name] = row[f'Answer.{question.name}']
else:
raise ValueError(f'Unknown question_type: {question.question_type}')
return CritiqueResponse(id=row['AssignmentId'], respondent_id=row['WorkerId'], answers=answers)
def initialize(self) -> None:
if ((not os.path.exists(self._get_directory_path())) or (not os.path.isdir(self._get_directory_path()))):
return
for file_name in os.listdir(self._get_directory_path()):
if re.match('Batch_\\d+_batch_results.csv', file_name):
file_path = os.path.join(self._get_directory_path(), file_name)
hlog(f'Importing Mechanical Turk results from {file_path}')
self._import_from_file_path(file_path)
def import_request_result(self, fields: Dict[(str, str)]) -> Optional[CritiqueRequestResult]:
return self._request_key_to_results.get(self._make_request_key(fields)) |
def test_finish(event_stream):
assert isinstance(next(event_stream), events.Initialized)
event = event_stream.finish()
assert isinstance(event, events.Finished)
assert (next(event_stream, None) is None) |
class GPT2BPETokenizer(Tokenizer):
def __init__(self, cache_dir=None, **kwargs):
self.text_tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir=cache_dir)
self.text_tokenizer.max_len = int(.0)
self.num_command_tokens = 2
self.num_tokens = len(self.text_tokenizer.encoder)
self.num_text_tokens = (self.num_tokens - 1)
self.num_type_tokens = 2
self._command_tokens = [CommandToken('pad', '<|endoftext|>', self.text_tokenizer.encoder['<|endoftext|>']), CommandToken('eos', '<|endoftext|>', self.text_tokenizer.encoder['<|endoftext|>'])]
self.command_name_map = {tok.name: tok for tok in self._command_tokens}
self.command_token_map = {tok.token: tok for tok in self._command_tokens}
self.command_id_map = {tok.Id: tok for tok in self._command_tokens}
self.type_tokens = [TypeToken('str0', '<str0>', 0), TypeToken('str1', '<str1>', 1)]
self.type_name_map = {tok.name: tok for tok in self.type_tokens}
self.type_token_map = {tok.token: tok for tok in self.type_tokens}
self.type_id_map = {tok.Id: tok for tok in self.type_tokens}
self._tokens = list(self.text_tokenizer.encoder.keys())
self._vocab = {k: v for (k, v) in self.text_tokenizer.encoder.items()}
self._text_tokens = list(self._tokens)
self._text_token_vocab = {k: v for (k, v) in self.text_tokenizer.encoder.items()}
self._command_token_tokens = list(self.command_token_map.keys())
self._command_token_vocab = {t: Id for (Id, t) in self.command_id_map.items()}
self._token_types = list(self.type_token_map.keys())
self._token_type_vocab = {t: Id for (Id, t) in self.type_id_map.items()}
def EncodeAsIds(self, text, process_fn=None):
processed_text = text
if (process_fn is not None):
processed_text = process_fn(processed_text)
Ids = self.text_tokenizer.encode(processed_text)
tokenization = Tokenization(Ids, processed_text, text)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
def EncodeAsTokens(self, text, process_fn=None):
processed_text = text
if (process_fn is not None):
processed_text = process_fn(processed_text)
tokens = []
for token in re.findall(self.text_tokenizer.pat, processed_text):
token = ''.join((self.text_tokenizer.bye_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.text_tokenizer.bpe(token).split(' ')))
tokenization = Tokenization(tokens, processed_text, text, asIds=False)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
def IdToToken(self, Id, type_token=False):
if isinstance(Id, (TypeToken, CommandToken)):
return Id.token
if type_token:
return self.type_id_map[Id].token
return self.text_tokenizer.decoder[Id]
def TokenToId(self, token, type_token=False):
if isinstance(token, (TypeToken, CommandToken)):
return token.Id
if type_token:
return self.type_token_map[token].Id
return self.text_tokenizer.encoder[token]
def DecodeIds(self, Ids, type_token=False):
if type_token:
return ' '.join(((Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token) for Id in Ids))
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
return self.text_tokenizer.decode(Ids)
def DecodeTokens(self, Tokens, type_token=False):
if type_token:
return ' '.join(((t.token if isinstance(t, TypeToken) else t) for t in Tokens))
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return self.text_tokenizer.decode([self.TokenToId(tok) for tok in Tokens]) |
def test_not_fix_example():
with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as tempdir:
test_name = os.path.join(tempdir, 'nofix.xml')
with open(test_name, 'w', encoding='utf-8') as fout:
fout.write(NOT_FIX_NONPROJ_EXAMPLE)
sentences = convert_arboretum.read_xml_file(test_name)
assert (len(sentences) == 1)
(tree, words) = convert_arboretum.process_tree(sentences[0])
assert (not convert_arboretum.word_sequence_missing_words(tree))
with tsurgeon.Tsurgeon() as tsurgeon_processor:
assert (convert_arboretum.check_words(tree, tsurgeon_processor) is None) |
def rand_saturation(x, param):
ratio = param.saturation
x_mean = x.mean(dim=1, keepdim=True)
set_seed_DiffAug(param)
rands = torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)
if param.Siamese:
rands[:] = rands[0]
x = (((x - x_mean) * (rands * ratio)) + x_mean)
return x |
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
(bos_tokens, _) = batch.tokens_bos
if self.hparams.gradient_checkpointing:
wavs.requires_grad_()
(enc_out, logits, _) = torch.utils.checkpoint.checkpoint(self.modules.whisper, wavs, bos_tokens)
else:
(enc_out, logits, _) = self.modules.whisper(wavs, bos_tokens)
hyps = None
if (stage != sb.Stage.TRAIN):
(hyps, _) = self.modules.whisper.generate(audio_features=enc_out, forced_decoder_locale=self.hparams.forced_decoder_locale, max_gen_tokens=self.hparams.max_gen_tokens)
return (logits, hyps)
def compute_objectives(self, predictions, batch, stage):
(logits, hyps) = predictions
ids = batch.id
(tokens_eos, _) = batch.tokens_eos
loss = self.hparams.ce_loss(logits.flatten(end_dim=(- 2)), tokens_eos.flatten())
if (stage == sb.Stage.TRAIN):
selected_samples = random.sample(self.hparams.replay_buffer, len(ids))
tmp = []
for sample in selected_samples:
data = self._pipeline(sample[0])
wav = data['sig']
bos_tokens = data['tokens_bos']
logits = torch.load(sample[1])
tmp.append((wav, bos_tokens, logits))
selected_samples = tmp
replay_wavs = [x[0] for x in selected_samples]
max_len = max((len(x) for x in replay_wavs))
replay_wavs = [torch.nn.functional.pad(x, [0, (max_len - len(x))]) for x in replay_wavs]
replay_wavs = torch.stack(replay_wavs).to(self.device)
replay_bos_tokens = [x[1] for x in selected_samples]
max_len = max((len(x) for x in replay_bos_tokens))
replay_bos_tokens = [torch.nn.functional.pad(x, [0, (max_len - len(x))], value=self.tokenizer.pad_token_id) for x in replay_bos_tokens]
replay_bos_tokens = torch.stack(replay_bos_tokens).to(self.device)
target_replay_logits = [x[2] for x in selected_samples]
max_len = max((len(x) for x in target_replay_logits))
target_replay_logits = [torch.nn.functional.pad(x, [0, 0, 0, (max_len - len(x))]) for x in target_replay_logits]
max_len = max((x.shape[(- 1)] for x in target_replay_logits))
target_replay_logits = [torch.nn.functional.pad(x, [0, (max_len - x.shape[(- 1)])]) for x in target_replay_logits]
target_replay_logits = torch.stack(target_replay_logits).to(self.device)
if self.hparams.gradient_checkpointing:
replay_wavs.requires_grad_()
(_, replay_logits, _) = torch.utils.checkpoint.checkpoint(self.modules.whisper, replay_wavs, replay_bos_tokens)
else:
(_, replay_logits, _) = self.modules.whisper(replay_wavs, replay_bos_tokens)
target_replay_logits = torch.nn.functional.pad(target_replay_logits, [0, (replay_logits.shape[(- 1)] - target_replay_logits.shape[(- 1)])])
loss += (self.hparams.der_alpha * ((replay_logits - target_replay_logits) ** 2).mean())
if (stage != sb.Stage.TRAIN):
target_words = batch.target_wrd
predicted_words = self.tokenizer.batch_decode(hyps, skip_special_tokens=True)
if self.hparams.normalize_transcripts:
predicted_words = [self.tokenizer._normalize(text).split(' ') for text in predicted_words]
else:
predicted_words = [text.split(' ') for text in predicted_words]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def on_stage_start(self, stage, epoch=None):
if (stage != sb.Stage.TRAIN):
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.wer_computer()
def on_stage_end(self, stage, stage_loss, epoch=None):
stage_stats = {'loss': stage_loss}
if (stage == sb.Stage.TRAIN):
self.train_stats = stage_stats
else:
stage_stats['CER'] = self.cer_metric.summarize('error_rate')
stage_stats['WER'] = self.wer_metric.summarize('error_rate')
if (stage == sb.Stage.VALID):
(old_lr, new_lr) = self.hparams.lr_annealing(stage_stats['loss'])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
stats_meta_data = {'epoch': epoch, 'lr': old_lr}
self.hparams.train_logger.log_stats(stats_meta=stats_meta_data, train_stats=self.train_stats, valid_stats=stage_stats)
self.checkpointer.save_and_keep_only(meta={'WER': stage_stats['WER']}, min_keys=['WER'])
elif (stage == sb.Stage.TEST):
self.hparams.train_logger.log_stats(stats_meta={'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stage_stats)
with open(self.hparams.wer_file, 'w', encoding='utf-8') as w:
self.wer_metric.write_stats(w)
def _fit_train(self, train_set, epoch, enable):
self._pipeline = train_set.dataset.pipeline
return super()._fit_train(train_set, epoch, enable) |
def get_edge_set(g: dgl.DGLGraph):
return set(map(tuple, np.column_stack([_.cpu().numpy() for _ in g.edges()]).tolist())) |
def readspec():
specdict = {}
with open(os.path.join(CURRENT_DIR, '..', 'kernel-specification.yml')) as f:
loadfile = yaml.load(f, Loader=yaml.CSafeLoader)
indspec = loadfile['kernels']
with open(os.path.join(CURRENT_DIR, '..', 'kernel-test-data.json')) as f:
data = json.load(f)['tests']
for spec in indspec:
if ('def ' in spec['definition']):
for childfunc in spec['specializations']:
specdict[childfunc['name']] = Specification(spec['name'], childfunc, data, (not spec['automatic-tests']))
return specdict |
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('destination_dir', help='destination directory')
parser.add_argument('deps', help='file with header file names to parse')
pargs = parser.parse_args(args)
if (not mk_genfile_common.check_dir_exists(pargs.destination_dir)):
return 1
if (not mk_genfile_common.check_files_exist([pargs.deps])):
return 1
with open(pargs.deps, 'r') as f:
lines = f.read().split('\n')
h_files_full_path = [os.path.abspath(header_file) for header_file in lines if header_file]
if (not mk_genfile_common.check_files_exist(h_files_full_path)):
return 1
output = mk_genfile_common.mk_install_tactic_cpp_internal(h_files_full_path, pargs.destination_dir)
logging.info('Generated "{}"'.format(output))
return 0 |
def cublas_type_metadata(dtype: dtypes.typeclass) -> Tuple[(str, str, str)]:
if (dtype == dtypes.float16):
return ('H', '__half', 'Half')
elif (dtype == dtypes.float32):
return ('S', 'float', 'Float')
elif (dtype == dtypes.float64):
return ('D', 'double', 'Double')
elif (dtype == dtypes.complex64):
return ('C', 'cuComplex', 'Complex64')
elif (dtype == dtypes.complex128):
return ('Z', 'cuDoubleComplex', 'Complex128')
else:
raise TypeError(('Type %s not supported in BLAS operations' % str(dtype))) |
def get_net(data_loader, name):
logger = logging.getLogger(__name__)
blob_names = data_loader.get_output_names()
net = core.Net(name)
net.type = 'dag'
for gpu_id in range(cfg.NUM_GPUS):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(muji.OnGPU(gpu_id)):
for blob_name in blob_names:
blob = core.ScopedName(blob_name)
workspace.CreateBlob(blob)
net.DequeueBlobs(data_loader._blobs_queue_name, blob_names)
logger.info(('Protobuf:\n' + str(net.Proto())))
return net |
class AggPredictor():
def __init__(self, question, sql, history, kw=None):
self.sql = sql
self.question = question
self.history = history
self.kw = kw
def generate_output(self):
label = (- 1)
if self.kw:
key = self.kw
else:
key = self.history[(- 2)]
if (key == 'select'):
label = self.sql[0]
elif (key == 'orderBy'):
label = self.sql[1][0]
elif (key == 'having'):
label = self.sql[2][1][0]
return (self.history, label) |
def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=(- 1)):
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1.0, num_warmup_steps)))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) |
class LogFriendlyProgressBar():
def __init__(self, iterable, desc, total, step: int=1):
self._desc = desc
self._i = 0
self._N = total
self.step = step
self._progress = 0
self._iterable = iterable
self._iterator = None
def __iter__(self):
self._iterator = iter(self._iterable)
return self
def __next__(self):
value = next(self._iterator)
self._i += 1
progress = math.floor(((self._i * 100) / self._N))
if ((progress > self._progress) and ((progress % self.step) == 0)):
if self._desc:
print(f'{self._desc} - progress: {progress}%', file=sys.stderr)
else:
print(f'Progress: {progress}%', file=sys.stderr)
sys.stderr.flush()
self._progress = progress
return value
def close(self):
return |
.parametrize('value, expected', (({'key': '1'}, True), ({'key': 1}, True), ({'key': '\udcff'}, False), ({'key': ['1', 'abc', '\udcff']}, False)))
def test_is_valid_query(value, expected):
assert (is_valid_query(value) == expected) |
class Tool(BaseTool):
description: str = ''
func: Callable[([str], str)]
coroutine: Optional[Callable[([str], Awaitable[str])]] = None
max_output_len = 3000
def _run(self, tool_input: str) -> str:
return self.func(tool_input)
async def _arun(self, tool_input: str) -> str:
if self.coroutine:
return (await self.coroutine(tool_input))
raise NotImplementedError('Tool does not support async')
def __init__(self, base_url, func_name, openapi_spec, path, method, description, retrieval_available=False, **kwargs):
def func(params):
try:
params = parse_json_string(params)
if isinstance(params, list):
return "'Action Input' cannot be a list. Only call one function per action."
except:
return "Invalid JSON format. Please ensure 'Action Input' is a valid JSON object."
retry_times = 0
while (retry_times < 3):
try:
response = call_api_function(input_params=params, openapi_spec=openapi_spec, path=path, method=method, base_url=base_url)
except ValueError as e:
return str(e)
message = f'Status Code: {response.status_code}. Response: {response.text}'
if (response.status_code >= 500):
retry_times += 1
continue
break
if (not (200 <= response.status_code < 300)):
message += ". You should choose one of: (1) change the input and retry; (2) return the 'Final Answer' and explain what happened; (You must choose this one when the error occurs more than 3 times.) (3) call another function."
if (len(message) > self.max_output_len):
if retrieval_available:
file_name = f'./tmp/retrieval_{int(time.time())}.txt'
with open(file_name, 'w', encoding='utf-8') as f:
f.write(message)
return ("The output is too long. You need to use the 'retrievalDataFromFile' function to retrieve the output from the file: " + file_name)
else:
message = message[:self.max_output_len]
return message
tool_name = func_name
super(Tool, self).__init__(name=tool_name, func=func, description=description, **kwargs) |
def schema(open_api_3_schema_with_recoverable_errors):
return schemathesis.from_dict(open_api_3_schema_with_recoverable_errors) |
def node_to_internal_type(node: bblfsh.Node):
if (type(node) == str):
return node
return node.internal_type |
_utils.test()
def test_write_after_break():
a = ti.field(ti.i32, shape=5)
a.fill((- 1))
def foo():
ti.loop_config(serialize=True)
for i in range(5):
while True:
if (i > 3):
break
a[i] = i
break
foo()
assert (a[4] == (- 1)) |
class BasicModel(torch.nn.Module):
def __init__(self):
super(BasicModel, self).__init__()
self.conv1 = Conv2d(8, 8, 3)
self.bn = BatchNorm2d(8)
self.relu = ReLU()
def forward(self, inp):
size = inp.shape
x = self.conv1(inp)
x = self.bn(x)
x = self.relu(x)
return (x, size)
def parameters_sum(self):
return getattr(self.conv1, KERNEL).detach().numpy().flatten().shape[0]
def max_tensor(self):
(_, l_shape) = self(torch.from_numpy(next(small_random_datagen())[0]).float())
return compute_output_size(l_shape) |
def ftimer_handle_frame(exp_meta, exp_meta_lock, frame):
if (len(frame['payload']) < 12):
return
try:
msg = dissect.base.LoRaWANMessage(frame['payload'])
if (msg.mhdr.data_msg and seq_eq(msg.payload.fhdr.devAddr, DUT_DEV_ADDR)):
f_cnt = msg.payload.fhdr.fCnt
port = msg.payload.port
d = ('up' if msg.mhdr.data_up else 'down')
with exp_meta_lock:
log_frame(exp_meta, d, 'gps', f_cnt, port, frame, multi=True)
if ('gps_time' in frame):
print(('GPS: Tracked %slink frame %d' % (d, f_cnt)))
else:
print(('GPS: Tracked %slink frame %d (localtime only!)' % (d, f_cnt)))
elif msg.mhdr.data_msg:
print(('GPS: Got frame for different devAddr: %02x %02x %02x %02x' % msg.payload.fhdr.devAddr))
except:
print('GPS: Could not parse frame')
traceback.print_exc() |
def test_all_checks():
_test_single_check(BaseBadSampler, check_target_type)
_test_single_check(SamplerSingleClass, check_samplers_one_label)
_test_single_check(NotFittedSampler, check_samplers_fit)
_test_single_check(NoAcceptingSparseSampler, check_samplers_sparse)
_test_single_check(NotPreservingDtypeSampler, check_samplers_preserve_dtype) |
_python_op()
class ResourceTest(Kernel):
def __init__(self, config, path):
self.path = path
def fetch_resources(self):
with open(self.path, 'r') as f:
n = int(f.read())
with open(self.path, 'w') as f:
f.write(str((n + 1)))
def setup_with_resources(self):
with open(self.path, 'r') as f:
assert (int(f.read()) == 1)
def execute(self, frame: FrameType) -> Any:
return None |
def create_model(metric: str='cosine', scale_cls: int=10.0, learn_scale: bool=True, normalize: bool=True):
return PN_head(metric, scale_cls, learn_scale, normalize) |
def bin_to_ascii(B):
n = len(B)
if (n == 0):
raise ValueError('B must be a non-empty binary string.')
if (mod(n, 8) != 0):
raise ValueError('The number of bits in B must be a multiple of 8.')
b = [int(str(x)) for x in list(B)]
A = []
k = (n // 8)
for i in range(k):
A.append(chr(ascii_integer(b[(8 * i):(8 * (i + 1))])))
return ''.join(A) |
def fine_type(mention):
if (mention.attributes['type'] == 'NOM'):
mention_fine_type = mention.attributes['fine_type']
elif (mention.attributes['type'] == 'PRO'):
mention_fine_type = mention.attributes['citation_form']
else:
mention_fine_type = mention.attributes['type']
if (not mention_fine_type):
mention_fine_type = 'NONE'
return ('fine_type', mention_fine_type) |
def _kind_name(dtype):
try:
return _kind_to_stem[dtype.kind]
except KeyError:
raise RuntimeError('internal dtype error, unknown kind {!r}'.format(dtype.kind)) |
class Optimizer():
def __init__(self, model, sess, ob_batch_num=100):
self.model = model
self.sess = sess
self.ob_batch_num = ob_batch_num
self.scan_data = 0
self.scan_batch = 0
self.ret_loss = 0
self.tb_point = 0
def _reset_optm_info(self):
self.scan_data = self.scan_batch = 0
self.ret_loss = 0.0
def _optimize(self, optm_data_loader, batch_idx):
(local_data, local_size) = optm_data_loader.get_batch(batch_idx=batch_idx)
(_, local_loss, summary) = self.model.train_batch(self.sess, local_data)
local_loss = float(local_loss)
self.ret_loss = ((1.0 * ((self.ret_loss * self.scan_data) + (local_loss * local_size))) / (self.scan_data + local_size))
self.scan_data += local_size
self.scan_batch += 1
self.tb_point += 1
if ((self.scan_batch % self.ob_batch_num) == 0):
LogInfo.logs('[optm-%s-%d/%d] cur_batch_loss = %.6f, avg_loss = %.6f, scanned = %d/%d', optm_data_loader.mode, self.scan_batch, optm_data_loader.n_batch, local_loss, self.ret_loss, self.scan_data, len(optm_data_loader))
def optimize_all(self, optm_data_loader):
self._reset_optm_info()
for batch_idx in range(optm_data_loader.n_batch):
self._optimize(optm_data_loader=optm_data_loader, batch_idx=batch_idx) |
_method
class UnknownClass(UniqueRepresentation):
def __repr__(self):
return 'Unknown'
def __bool__(self):
raise UnknownError('Unknown does not evaluate in boolean context')
def __and__(self, other):
if (other is False):
return False
elif ((other is True) or (other is Unknown)):
return self
else:
return NotImplemented
def __or__(self, other):
if (other is True):
return True
elif ((other is False) or (other is Unknown)):
return self
else:
return NotImplemented
def __richcmp__(self, other, op):
if (other is self):
return rich_to_bool(op, 0)
if (not isinstance(other, bool)):
return NotImplemented
if other:
return rich_to_bool(op, (- 1))
else:
return rich_to_bool(op, (+ 1)) |
_dispatch
def rfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None):
return (Dispatchable(x, np.ndarray),) |
def test_tocuda_unimplementedkernels7():
content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1]))
offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak.contents.ListOffsetArray(offsets, content)
content1 = ak.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
recordarray = ak.contents.RecordArray([content1, listoffsetarray, content2, content1], fields=['one', 'two', '2', 'wonky'])
cuda_recordarray = ak.to_backend(recordarray, 'cuda')
copyback_recordarray = ak.to_backend(cuda_recordarray, 'cpu')
assert (ak.to_list(cuda_recordarray) == ak.to_list(recordarray))
assert (ak.to_list(copyback_recordarray) == ak.to_list(recordarray)) |
class ControlFlowToyModel(nn.Module):
def __init__(self):
super(ControlFlowToyModel, self).__init__()
self.lin1 = nn.Linear(10, 10, bias=False)
self.lin2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device))
if use_second_layer:
return self.lin2(F.relu(self.lin1(x)))
else:
return F.relu(self.lin1(x)) |
class _decomposition4d_args():
log2_hashmap_size: int = 19
n_features_per_level: int = 2
n_levels: int = 16
coarsest_resolution: int = 32
finest_resolution: int = 2048 |
def get_dataset(args, models, shuffle=False):
(shards_path, rest) = get_shards_path(args, f=get_shards_size)
if isinstance(args.computation.num_gpus, int):
world_size = min(du.get_world_size(), args.computation.num_gpus)
else:
world_size = du.get_world_size()
batch_size = int((args.data.batch_size / world_size))
(num_workers, effective_num_workers) = get_num_workers(args.computation.num_workers, len(shards_path))
out_str = '#Workers of Feature Extraction Dataset'
out_str += f' (node={du.get_rank()})'
out_str += f': {num_workers}'
print(out_str)
shards_size_dt = rest['shards_size_dt']
shard_names = [Path(p).stem for p in shards_path]
if args.acav.force_cache_restart:
skip_lists = {}
caches = None
shards_size = [shards_size_dt[key] for key in shard_names]
else:
(caches, skip_lists) = load_shard_caches(args, shards_path)
shards_size = [(shards_size_dt[key] - len(skip_lists[key])) for key in shard_names]
data = MetaWebDataset(shards_path, handler=wds.warn_and_continue, skip_lists=skip_lists)
id_len = (25 if args.acav.use_replicates else 12)
get_name = partial(_get_name, id_len=id_len)
load_video = partial(load_video_webdata, num_frames=args.data.media.num_frames, duration=args.acav.duration, skip_shorter_seconds=(args.acav.duration * args.acav.skip_shorter_ratio))
add_meta = partial(_add_meta, shards_size_dt=rest['shards_size_dt'])
preprocess = Preprocessors(args, models)
data = data.map(get_name, handler=wds.warn_and_continue).map_tuple(load_video, identity, handler=wds.warn_and_continue).pipe(drop_none).map_tuple(preprocess, identity, handler=wds.warn_and_continue).map(check_data_none, handler=wds.warn_and_continue).map(add_meta, handler=wds.warn_and_continue)
if shuffle:
data = data.shuffle(args.computation.shuffle_bufsize)
'\n if the actual number of datapoints is smaller than length,\n the ResizedDataset will fill the difference with duplicate datapoints\n '
print('rank {} dataset_size: {}'.format(du.get_rank(), shards_size))
length = du.get_length(shards_size, batch_size, args.computation.num_workers, world_size)
nominal = (length * effective_num_workers)
data = wds.ResizedDataset(data, length, nominal)
data.caches = caches
return (data, num_workers) |
def model_fields(model, only=None, exclude=None, field_args=None, converter=None):
converter = (converter or ModelConverter())
field_args = (field_args or {})
props = model.properties()
sorted_props = sorted(iteritems(props), key=(lambda prop: prop[1].creation_counter))
field_names = list((x[0] for x in sorted_props))
if only:
field_names = list((f for f in only if (f in field_names)))
elif exclude:
field_names = list((f for f in field_names if (f not in exclude)))
field_dict = {}
for name in field_names:
field = converter.convert(model, props[name], field_args.get(name))
if (field is not None):
field_dict[name] = field
return field_dict |
(frozen=True)
class FunctionSchema():
name: 'OperatorName'
arguments: Sequence['Argument']
kwarg_only_arguments: Sequence['Argument']
out_arguments: Sequence['Argument']
returns: Sequence['Return']
def schema_order_arguments(self) -> Iterator['Argument']:
return itertools.chain(self.arguments, self.kwarg_only_arguments, self.out_arguments)
def parse(func: str) -> 'FunctionSchema':
assert (' -> ' in func), 'function schema missing return type (spaces are mandatory)'
(func_decl, return_decl) = [x.strip() for x in func.split(' -> ')]
(ops, args) = func_decl.split('(', 1)
assert (args[(- 1)] == ')'), 'Expecting closing )'
args = args[:(- 1)]
name = OperatorName.parse(ops)
(arguments, kwarg_only_arguments, out_arguments) = parse_arguments(args)
returns = parse_returns(return_decl)
r = FunctionSchema(name=name, arguments=arguments, kwarg_only_arguments=kwarg_only_arguments, out_arguments=out_arguments, returns=returns)
assert (str(r) == func), f'{str(r)} != {func}'
return r
def __post_init__(self) -> None:
for (arg, ret) in zip(self.out_arguments, self.returns):
assert (arg.annotation == ret.annotation), 'Out arguments must have matching return Tensor; furthermore, the ith-argument needs to correspond to the ith return'
if self.out_arguments:
assert (len(self.out_arguments) == len(self.returns)), 'Must return as many arguments as there are out arguments'
if self.name.name.inplace:
if (str(self.name) not in ['_amp_non_finite_check_and_unscale_', '_foreach_add_scalar_list_', '_foreach_sub_scalar_list_', '_foreach_mul_scalar_list_', '_foreach_div_scalar_list_', '_foreach_add_.Scalar', '_foreach_sub_.Scalar', '_foreach_mul_.Scalar', '_foreach_div_.Scalar', '_foreach_add_.List', '_foreach_sub_.List', '_foreach_mul_.List', '_foreach_div_.List', '_foreach_exp_', '_foreach_sqrt_', '_foreach_addcmul_', '_foreach_addcdiv_']):
assert (len(self.returns) == 1)
def is_out_fn(self) -> bool:
return bool(self.out_arguments)
def __str__(self) -> str:
all_arguments: List[str] = []
all_arguments.extend(map(str, self.arguments))
if (self.kwarg_only_arguments or self.out_arguments):
all_arguments.append('*')
all_arguments.extend(map(str, self.kwarg_only_arguments))
all_arguments.extend(map(str, self.out_arguments))
all_arguments_str = ', '.join(all_arguments)
if (len(self.returns) == 1):
returns = str(self.returns[0])
else:
returns = (('(' + ', '.join(map(str, self.returns))) + ')')
return f'{self.name}({all_arguments_str}) -> {returns}' |
def deprocess_image(img):
img = (img - np.mean(img))
img = (img / (np.std(img) + 1e-05))
img = (img * 0.1)
img = (img + 0.5)
img = np.clip(img, 0, 1)
return np.uint8((img * 255)) |
class CodeData(torch.utils.data.Dataset):
def __init__(self, cad_path, solid_path, profile_path, loop_path):
with open(cad_path, 'rb') as f:
cad_data = pickle.load(f)
with open(solid_path, 'rb') as f:
solid_data = pickle.load(f)
self.solid_code = solid_data['content']
with open(profile_path, 'rb') as f:
profile_data = pickle.load(f)
self.profile_code = profile_data['content']
with open(loop_path, 'rb') as f:
loop_data = pickle.load(f)
self.loop_code = loop_data['content']
self.solid_unique_num = solid_data['unique_num']
self.profile_unique_num = profile_data['unique_num']
self.loop_unique_num = loop_data['unique_num']
self.data = []
print('Loading data...')
for cad in tqdm(cad_data):
solid_uid = cad['name'].split('/')[(- 1)]
if (solid_uid not in self.solid_code):
continue
solid_code = ((self.solid_code[solid_uid] + self.loop_unique_num) + self.profile_unique_num)
num_se = len(cad['cad_ext'])
sketchProfileCode = []
sketchLoopCode = []
valid = True
for idx_se in range(num_se):
profile_uid = ((solid_uid + '_') + str(idx_se))
if (profile_uid not in self.profile_code):
valid = False
continue
profile_code = (self.profile_code[profile_uid] + self.loop_unique_num)
sketchProfileCode.append(profile_code)
loop_codes = []
num_loop = len(np.where((cad['cad_cmd'][idx_se] == 3))[0])
for idx_loop in range(num_loop):
loop_uid = ((profile_uid + '_') + str(idx_loop))
if (loop_uid not in self.loop_code):
valid = False
continue
loop_code = self.loop_code[loop_uid]
loop_codes.append(loop_code)
sketchLoopCode.append(loop_codes)
if (not valid):
continue
(pixel_full, _, _) = self.param2pix(cad)
total_code = []
for (bbox_code, loops) in zip(sketchProfileCode, sketchLoopCode):
total_code += [(- 1)]
total_code += [bbox_code]
total_code += [(- 2)]
total_code += loops
total_code += [(- 3)]
total_code += [solid_code]
total_code += [(- 4)]
total_code = (np.array(total_code) + CODE_PAD)
if ((len(total_code) > MAX_CODE) or (len(pixel_full) > MAX_CAD)):
continue
total_code = self.pad_code(total_code)
self.data.append(total_code)
self.unq_code = np.unique(np.vstack(self.data), return_counts=False, axis=0)
return
def param2pix(self, cad):
pixel_full = []
coord_full = []
ext_full = []
for (cmd, param, ext) in zip(cad['cad_cmd'], cad['cad_param'], cad['cad_ext']):
ext_full.append(ext)
ext_full.append(np.array([(- 1)]))
coords = []
pixels = []
for (cc, pp) in zip(cmd, param):
if (cc == 6):
coords.append(pp[0:2])
coords.append(pp[2:4])
coords.append(pp[4:6])
coords.append(pp[6:8])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 5):
coords.append(pp[0:2])
coords.append(pp[2:4])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 4):
coords.append(pp[0:2])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 3):
coords.append(np.array([(- 2), (- 2)]))
elif (cc == 2):
coords.append(np.array([(- 3), (- 3)]))
elif (cc == 1):
coords.append(np.array([(- 4), (- 4)]))
for xy in coords:
if (xy[0] < 0):
pixels.append(xy[0])
else:
pixels.append(((xy[1] * (2 ** CAD_BIT)) + xy[0]))
pixel_full.append(pixels)
coord_full.append(coords)
ext_full.append(np.array([(- 2)]))
coord_full.append(np.array([(- 5), (- 5)]))
pixel_full += [(- 5)]
ext_full = (np.hstack(ext_full) + EXT_PAD)
coord_full = (np.vstack(coord_full) + SKETCH_PAD)
pixel_full = (np.hstack(pixel_full) + SKETCH_PAD)
return (pixel_full, coord_full, ext_full)
def pad_code(self, total_code):
padding = np.zeros((MAX_CODE - len(total_code))).astype(int)
total_code = np.concatenate([total_code, padding], axis=0)
return total_code
def __len__(self):
return len(self.unq_code)
def __getitem__(self, index):
code = self.unq_code[index]
code_mask = (np.zeros(MAX_CODE) == 0)
code_mask[:(np.where((code == 0))[0][0] + 1)] = False
return (code, code_mask) |
.parametrize('implementation', ['pure', 'im2col'])
.parametrize('num_in_channels, kernel_size, num_filters, bias', [(1, (3, 3), 8, True), (8, (3, 3), 3, False), (8, (5, 5), 3, True), (8, (4, 4), 3, False)])
.pure
def test_conv_simple(num_in_channels, kernel_size, num_filters, bias, implementation):
if (implementation == 'im2col'):
pytest.skip('pure im2col is currently broken')
old_implementation = donnx.ONNXConv.default_implementation
donnx.ONNXConv.default_implementation = implementation
batch_size = 8
X = np.random.rand(batch_size, num_in_channels, 32, 32).astype(np.float32)
W = np.random.rand(num_filters, num_in_channels, *kernel_size).astype(np.float32)
if bias:
B = np.random.rand(num_filters).astype(np.float32)
torch_Z = F.conv2d(torch.from_numpy(X), torch.from_numpy(W), bias=torch.from_numpy(B)).numpy()
else:
B = None
torch_Z = F.conv2d(torch.from_numpy(X), torch.from_numpy(W)).numpy()
dace_Z = np.zeros_like(torch_Z)
if bias:
def conv(X_: dace.float32[tuple(X.shape)], W_: dace.float32[tuple(W.shape)], B_: dace.float32[tuple(B.shape)], Z_: dace.float32[tuple(torch_Z.shape)]):
donnx.ONNXConv(X=X_, W=W_, B=B_, Y=Z_)
else:
def conv(X_: dace.float32[tuple(X.shape)], W_: dace.float32[tuple(W.shape)], Z_: dace.float32[tuple(torch_Z.shape)]):
donnx.ONNXConv(X=X_, W=W_, Y=Z_)
sdfg = conv.to_sdfg()
sdfg.expand_library_nodes()
if bias:
sdfg(X_=X, W_=W, Z_=dace_Z, B_=B)
else:
sdfg(X_=X, W_=W, Z_=dace_Z)
print((torch_Z - dace_Z))
assert np.allclose(torch_Z, dace_Z)
donnx.ONNXConv.default_implementation = old_implementation |
def matrix_interaction_plot(interaction_matrix, tokens, axis=None, cbar_kw=None, cbarlabel='Interaction Value', zero_diagonals=True, **kwargs):
if (cbar_kw is None):
cbar_kw = {}
if zero_diagonals:
interaction_matrix = interaction_matrix.copy()
np.fill_diagonal(interaction_matrix, 0.0)
if (not axis):
axis = plt.gca()
bounds = np.max(np.abs(interaction_matrix))
if ('cmap' not in kwargs):
kwargs['cmap'] = colors.maroon_white_aqua()
image = axis.imshow(interaction_matrix, vmin=(- bounds), vmax=bounds, **kwargs)
cbar = axis.figure.colorbar(image, ax=axis, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=(- 90), va='bottom', fontsize=14)
cbar.ax.tick_params(length=6, labelsize=12)
cbar.outline.set_visible(False)
axis.set_xticks(np.arange(interaction_matrix.shape[1]))
axis.set_yticks(np.arange(interaction_matrix.shape[0]))
axis.set_xticklabels(tokens)
axis.set_yticklabels(tokens)
axis.tick_params(length=6, labelsize=14)
plt.setp(axis.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
for (_, spine) in axis.spines.items():
spine.set_visible(False)
axis.set_xticks((np.arange((interaction_matrix.shape[1] + 1)) - 0.5), minor=True)
axis.set_yticks((np.arange((interaction_matrix.shape[0] + 1)) - 0.5), minor=True)
axis.tick_params(which='minor', bottom=False, left=False)
color_threshold = np.quantile(interaction_matrix, 0.25)
for i in range(interaction_matrix.shape[0]):
for j in range(interaction_matrix.shape[1]):
if (i == j):
continue
if (interaction_matrix[(i, j)] > color_threshold):
color = 'black'
else:
color = 'white'
if (i > j):
text = '{},\n{}'.format(tokens[j], tokens[i])
else:
text = '{},\n{}'.format(tokens[i], tokens[j])
text = axis.text(j, i, text, ha='center', va='center', color=color, fontsize=12)
return (image, cbar) |
def generate_contradictory_answer_from_context(document: str, synth_question: str):
time.sleep(1)
for _ in range(5):
try:
system_prompt = 'Create an answer for the given question that contradicts the provided document. You should create false information that disagrees with what exists within the content of the document.'
user_prompt = f'''Question: {synth_question}
Document:{document}'''
user_prompt = ((system_prompt + '\n\n') + user_prompt)
messages = [{'role': 'user', 'content': user_prompt}]
response = openai.ChatCompletion.create(model='gpt-3.5-turbo-16k', messages=messages)
final_response = response['choices'][0]['message']['content']
return final_response
except:
print('Error querying OpenAI! Attempting again...') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.