code stringlengths 101 5.91M |
|---|
class TestTwowaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_test = 10
(self.a, self.b) = twoway_split(self.n, self.k_test)
def test_sizes(self):
self.assertEqual([len(self.a), len(self.b)], [(40 - 10), 10])
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all()) |
.parametrize('task_name', [tn for tn in (all_tasks - julia_tasks)])
def test_describe_theta(task_name):
task = get_task(task_name)
labels = task.get_labels_parameters()
assert isinstance(labels, list)
assert (len(labels) == task.get_true_parameters(num_observation=1).shape[(- 1)]) |
def get_morgan_fp_smi(smi: str, nbits: int=2048, radius=3) -> np.ndarray:
return get_morgan_fp(Chem.MolFromSmiles(smi), nbits=nbits, radius=radius) |
def LF_donor(span):
rgx = '\\b(donor)\\b'
text = get_left_span(span, span.sentence, window=6).text
return (OTHER if re.search(rgx, span.sentence.text.strip(), re.I) else ABSTAIN) |
def local_path_from_s3_or_local_path(filename):
relative_filename = os.path.join(LOCAL_LOG_DIR, filename)
if os.path.isfile(filename):
return filename
elif os.path.isfile(relative_filename):
return relative_filename
else:
return sync_down(filename) |
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
if ((algorithm is 'sha256') or ((algorithm is 'auto') and (len(hash) is 64))):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter((lambda : fpath_file.read(chunk_size)), b''):
hasher.update(chunk)
return hasher.hexdigest() |
class Jasper(nn.Module):
def __init__(self, num_classes: int, version: str='10x5', device: torch.device='cuda') -> None:
super(Jasper, self).__init__()
supported_versions = {'10x5': {'encoder_config': Jasper10x5EncoderConfig(num_blocks=10, num_sub_blocks=5), 'decoder_config': JasperDecoderConfig(num_classes)}, '5x3': {'encoder_config': Jasper5x3EncoderConfig(num_blocks=5, num_sub_blocks=3), 'decoder_config': JasperDecoderConfig(num_classes)}}
assert (version.lower() in supported_versions.keys()), 'Unsupported Version: {}'.format(version)
self.encoder = JasperEncoder(config=supported_versions[version]['encoder_config'], device=device)
self.decoder = JasperDecoder(config=supported_versions[version]['decoder_config'], device=device)
self.device = device
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[(Tensor, Tensor)]:
(encoder_outputs, output_lengths) = self.encoder(inputs.transpose(1, 2), input_lengths)
(output, output_lengths) = self.decoder(encoder_outputs, output_lengths)
return (output, output_lengths)
def greedy_search(self, inputs: Tensor, input_lengths: Tensor, device: str):
with torch.no_grad():
(output, output_lengths) = self.forward(inputs.transpose(1, 2), input_lengths)
return output.max((- 1))[1] |
class LevitPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
def _convert_weights(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.to(dtype)
if (l.bias is not None):
l.bias.data = l.bias.data.to(dtype)
if isinstance(l, (nn.MultiheadAttention, Attention)):
for attr in [*[f'{s}_proj_weight' for s in ['in', 'q', 'k', 'v']], 'in_proj_bias', 'bias_k', 'bias_v']:
tensor = getattr(l, attr, None)
if (tensor is not None):
tensor.data = tensor.data.to(dtype)
if isinstance(l, nn.Parameter):
l.data = l.data.to(dtype)
for name in ['text_projection', 'proj']:
if (hasattr(l, name) and isinstance(l, nn.Parameter)):
attr = getattr(l, name, None)
if (attr is not None):
attr.data = attr.data.to(dtype)
model.apply(_convert_weights) |
def my_kde_bandwidth(obj, fac=(1.0 / 5)):
return (np.power(obj.n, ((- 1.0) / (obj.d + 4))) * fac) |
class TestSummarizationDistillerMultiGPU(TestCasePlus):
def setUpClass(cls):
return cls
_torch_multi_gpu
def test_multi_gpu(self):
updates = dict(no_teacher=True, freeze_encoder=True, gpus=2, overwrite_output_dir=True, sortish_sampler=True)
self._test_distiller_cli_fork(updates, check_contents=False)
def _test_distiller_cli_fork(self, updates, check_contents=True):
default_updates = dict(label_smoothing=0.0, early_stopping_patience=(- 1), train_batch_size=1, eval_batch_size=2, max_epochs=2, alpha_mlm=0.2, alpha_ce=0.8, do_predict=True, model_name_or_path='sshleifer/tinier_bart', teacher=CHEAP_ARGS['model_name_or_path'], val_check_interval=0.5)
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
def convert(k, v):
if (k in ['tgt_suffix', 'server_ip', 'server_port', 'out', 'n_tpu_cores']):
return ''
if ((v is False) or (v is None)):
return ''
if (v is True):
return f'--{k}'
return f'--{k}={v}'
cli_args = [x for x in (convert(k, v) for (k, v) in args_d.items()) if len(x)]
cmd = ([sys.executable, f'{self.test_file_dir}/distillation.py'] + cli_args)
execute_subprocess_async(cmd, env=self.get_env())
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith('ckpt')]
assert (len(ckpt_files) > 0)
self.assertIn('test_generations.txt', contents)
self.assertIn('test_results.txt', contents)
metrics_save_path = os.path.join(output_dir, 'metrics.json')
val_metric = 'rouge2'
metrics = load_json(metrics_save_path)
print(metrics)
last_step_stats = metrics['val'][(- 1)]
self.assertGreaterEqual(last_step_stats['val_avg_gen_time'], 0.01)
self.assertIsInstance(last_step_stats[f'val_avg_{val_metric}'], float)
self.assertEqual(len(metrics['test']), 1)
desired_n_evals = int((((args_d['max_epochs'] * (1 / args_d['val_check_interval'])) / 2) + 1))
self.assertEqual(len(metrics['val']), desired_n_evals) |
.parametrize('dt', [ti.i16, ti.u16, ti.u8, ti.i8])
_utils.test(arch=ti.vulkan)
def test_arg_short(dt):
def foo(a: dt, b: ti.types.ndarray(dtype=dt, ndim=1)):
b[0] = a
k = ti.ndarray(dt, shape=(1,))
sym_A = ti.graph.Arg(ti.graph.ArgKind.SCALAR, 'mat', dt)
sym_B = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'b', dt, ndim=1)
builder = ti.graph.GraphBuilder()
builder.dispatch(foo, sym_A, sym_B)
graph = builder.compile()
graph.run({'mat': 123, 'b': k})
assert (k.to_numpy()[0] == 123) |
def main():
parser = argparse.ArgumentParser(description=' Matcha-TTS: A fast TTS architecture with conditional flow matching')
parser.add_argument('model', type=str, help='ONNX model to use')
parser.add_argument('--vocoder', type=str, default=None, help='Vocoder to use (defaults to None)')
parser.add_argument('--text', type=str, default=None, help='Text to synthesize')
parser.add_argument('--file', type=str, default=None, help='Text file to synthesize')
parser.add_argument('--spk', type=int, default=None, help='Speaker ID')
parser.add_argument('--temperature', type=float, default=0.667, help='Variance of the x0 noise (default: 0.667)')
parser.add_argument('--speaking-rate', type=float, default=1.0, help='change the speaking rate, a higher value means slower speaking rate (default: 1.0)')
parser.add_argument('--gpu', action='store_true', help='Use CPU for inference (default: use GPU if available)')
parser.add_argument('--output-dir', type=str, default=os.getcwd(), help='Output folder to save results (default: current dir)')
args = parser.parse_args()
args = validate_args(args)
if args.gpu:
providers = ['GPUExecutionProvider']
else:
providers = ['CPUExecutionProvider']
model = ort.InferenceSession(args.model, providers=providers)
model_inputs = model.get_inputs()
model_outputs = list(model.get_outputs())
if args.text:
text_lines = args.text.splitlines()
else:
with open(args.file, encoding='utf-8') as file:
text_lines = file.read().splitlines()
processed_lines = [process_text(0, line, 'cpu') for line in text_lines]
x = [line['x'].squeeze() for line in processed_lines]
x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True)
x = x.detach().cpu().numpy()
x_lengths = np.array([line['x_lengths'].item() for line in processed_lines], dtype=np.int64)
inputs = {'x': x, 'x_lengths': x_lengths, 'scales': np.array([args.temperature, args.speaking_rate], dtype=np.float32)}
is_multi_speaker = (len(model_inputs) == 4)
if is_multi_speaker:
if (args.spk is None):
args.spk = 0
warn = '[!] Speaker ID not provided! Using speaker ID 0'
warnings.warn(warn, UserWarning)
inputs['spks'] = np.repeat(args.spk, x.shape[0]).astype(np.int64)
has_vocoder_embedded = (model_outputs[0].name == 'wav')
if has_vocoder_embedded:
write_wavs(model, inputs, args.output_dir)
elif args.vocoder:
external_vocoder = ort.InferenceSession(args.vocoder, providers=providers)
write_wavs(model, inputs, args.output_dir, external_vocoder=external_vocoder)
else:
warn = '[!] A vocoder is not embedded in the graph nor an external vocoder is provided. The mel output will be written as numpy arrays to `*.npy` files in the output directory'
warnings.warn(warn, UserWarning)
write_mels(model, inputs, args.output_dir) |
def reset_data() -> None:
global data
data = {'Num. Workers': [], 'FPS': [], 'Env': [], 'System': [], 'Method': []} |
class LR(torch.nn.Module):
def __init__(self, opt):
super(LR, self).__init__()
self.use_cuda = opt.get('use_cuda')
self.field_dims = opt['field_dims']
self.linear = FeaturesLinear(self.field_dims)
def forward(self, x):
score = self.linear.forward(x)
return score.squeeze(1)
def l2_penalty(self, x, lamb):
return 0
def calc_sparsity(self):
return (0, 0)
def get_threshold(self):
return 0
def get_embedding(self):
return np.zeros(1) |
def validate_fi_associationid(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(associationid.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(associationid.is_valid)
else:
return df.applymap(associationid.is_valid)
return associationid.is_valid(df) |
def get_error_type(result, binary=False):
if binary:
if (result == True):
return 1
else:
return 0
if (result == (- 2)):
return 0
elif (result == (- 1)):
return 1
elif (result == False):
return 2
elif (result == True):
return 3
else:
raise NotImplementedError() |
def get_xp3_document_iterator(file_path: str) -> Iterator[str]:
with open(file_path, 'r') as f:
for line in f:
json_dict = json.loads(line)
(yield json_dict['inputs'])
(yield json_dict['targets']) |
def check_layers(layers):
if (not isinstance(layers[0], Prior)):
raise ValueError('first layer must be a Prior')
for (i, layer) in enumerate(layers[1:(- 1)]):
if (not isinstance(layer, Channel)):
raise ValueError(f'intermediate layer i={i} must be a Channel')
if isinstance(layers[(- 1)], Channel):
if (layers[(- 1)].n_next != 1):
raise ValueError('last layer must be a Channel with one output')
elif (not isinstance(layers[(- 1)], Likelihood)):
raise ValueError('last layer must be a Channel or a Likelihood') |
def Upsample(tensor, size):
name = (tensor.name.split('/')[0] + '_upsample')
def bilinear_upsample(x, size):
resized = tf.image.resize(images=x, size=size)
return resized
y = Lambda((lambda x: bilinear_upsample(x, size)), output_shape=size, name=name)(tensor)
return y |
class ProblemSet(IterableDataset):
class Iterator():
def __init__(self, problem_set):
self.problem_set = problem_set
self.paradigm = problem_set.paradigm
self.vocab = problem_set.vocab
self.ammo = []
self.magazine = []
self.magazine_size = 1000
def __next__(self):
if (len(self.magazine) == 0):
while (len(self.ammo) < 10000):
problem = random.choices(self.problem_set.problems)[0]
args = problem.generate()
if (self.paradigm == 'rot'):
graph = ProbGraph()
graph.extend([(problem.__class__, args)])
self.ammo.extend(graph.keys())
else:
self.ammo.append((problem.__class__, args))
random.shuffle(self.ammo)
self.magazine = self.ammo[:self.magazine_size]
self.ammo = self.ammo[self.magazine_size:]
(prob_cls, args) = self.magazine.pop()
(x, y, label) = prob_cls.solve(args, paradigm=self.paradigm)
return (self.vocab(x), self.vocab(y), label)
def __init__(self, problems: list[Problem], paradigm, vocab):
super().__init__()
self.problems = problems
self.paradigm = paradigm
self.vocab = vocab
def __iter__(self):
return self.Iterator(self)
def get_data_loader(self, batch_size, num_workers=1, collate_fn=collate_by_len):
return DataLoader(self, batch_size, collate_fn=collate_fn, pin_memory=True, num_workers=num_workers) |
_function_dispatch(_require_fields_dispatcher)
def require_fields(array, required_dtype):
out = np.empty(array.shape, dtype=required_dtype)
assign_fields_by_name(out, array)
return out |
_binary
def atomic_max(x, y):
return impl.expr_init(expr.Expr(_ti_core.expr_atomic_max(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info()))) |
def normalize_question(question: str) -> str:
if (question[(- 1)] == '?'):
question = question[:(- 1)]
return question |
def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, **kwargs):
return DenseNet(growth_rate, block_config, num_init_features, **kwargs) |
def instantiate_from_config(config):
if (not ('target' in config)):
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict())) |
def fixmatch_augment_pool():
augs = [(AutoContrast, None, None), (Brightness, 0.9, 0.05), (Color, 0.9, 0.05), (Contrast, 0.9, 0.05), (Equalize, None, None), (Identity, None, None), (Posterize, 4, 4), (Sharpness, 0.9, 0.05), (Solarize, 256, 0)]
return augs |
def main(argv=None):
print('Loading training data..')
train_data = load_data(FLAGS.train_prefix)
print('Done loading training data..')
train(train_data) |
def main():
parser = argparse.ArgumentParser(description='OGBN-MAG (MetaPath2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=64)
parser.add_argument('--context_size', type=int, default=7)
parser.add_argument('--walks_per_node', type=int, default=5)
parser.add_argument('--num_negative_samples', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--log_steps', type=int, default=100)
args = parser.parse_args()
device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu')
device = torch.device(device)
dataset = PygNodePropPredDataset('ogbn-mag')
data = dataset[0]
data.edge_index_dict[('institution', 'employs', 'author')] = transpose(data.edge_index_dict[('author', 'affiliated_with', 'institution')], None, m=data.num_nodes_dict['author'], n=data.num_nodes_dict['institution'])[0]
data.edge_index_dict[('paper', 'written_by', 'author')] = transpose(data.edge_index_dict[('author', 'writes', 'paper')], None, m=data.num_nodes_dict['author'], n=data.num_nodes_dict['paper'])[0]
data.edge_index_dict[('field_of_study', 'contains', 'paper')] = transpose(data.edge_index_dict[('paper', 'has_topic', 'field_of_study')], None, m=data.num_nodes_dict['paper'], n=data.num_nodes_dict['field_of_study'])[0]
print(data)
metapath = [('author', 'writes', 'paper'), ('paper', 'has_topic', 'field_of_study'), ('field_of_study', 'contains', 'paper'), ('paper', 'written_by', 'author'), ('author', 'affiliated_with', 'institution'), ('institution', 'employs', 'author'), ('author', 'writes', 'paper'), ('paper', 'cites', 'paper'), ('paper', 'written_by', 'author')]
model = MetaPath2Vec(data.edge_index_dict, embedding_dim=128, metapath=metapath, walk_length=64, context_size=7, walks_per_node=5, num_negative_samples=5, sparse=True).to(device)
loader = model.loader(batch_size=128, shuffle=True, num_workers=4)
optimizer = torch.optim.SparseAdam(list(model.parameters()), lr=0.01)
model.train()
for epoch in range(1, (args.epochs + 1)):
for (i, (pos_rw, neg_rw)) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (((i + 1) % args.log_steps) == 0):
print(f'Epoch: {epoch:02d}, Step: {(i + 1):03d}/{len(loader)}, Loss: {loss:.4f}')
if (((i + 1) % 1000) == 0):
save_embedding(model)
save_embedding(model) |
class DynamicBatchSampler():
def __init__(self, dataset, collator, max_tokens, max_segment_len, max_doc_len=None):
self.max_tokens = max_tokens
self.dataset = dataset.sort('length', reverse=True)
self.collator = collator
self.max_segment_len = max_segment_len
self.max_doc_len = max_doc_len
def __iter__(self):
batch = []
per_example_batch_len = 0
for example in self.dataset:
if ((self.max_doc_len is not None) and (example['length'] > self.max_doc_len)):
logger.info(f"Skipping doc with len {example['length']}. max_doc_len is {self.max_doc_len}")
continue
if (not batch):
per_example_batch_len = self.calc_effective_per_example_batch_len(example['length'])
elif (((len(batch) + 1) * per_example_batch_len) > self.max_tokens):
(yield self.collator(batch))
batch = []
per_example_batch_len = self.calc_effective_per_example_batch_len(example['length'])
batch.append(example)
if (len(batch) > 0):
(yield self.collator(batch))
def calc_effective_per_example_batch_len(self, example_len):
return (math.ceil((example_len / self.max_segment_len)) * self.max_segment_len) |
class BertTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, max_length=None, pad_to_max_length=False, stride=0, truncation_strategy='longest_first', add_special_tokens=True, **kwargs):
super(BertTokenizerFast, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
self._update_special_tokens()
self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.BertPreTokenizer.new(do_basic_tokenize=do_basic_tokenize, do_lower_case=do_lower_case, tokenize_chinese_chars=tokenize_chinese_chars, never_split=(never_split if (never_split is not None) else [])))
self._tokenizer.with_decoder(tk.decoders.WordPiece.new())
if add_special_tokens:
self._tokenizer.with_post_processor(tk.processors.BertProcessing.new((sep_token, self._tokenizer.token_to_id(sep_token)), (cls_token, self._tokenizer.token_to_id(cls_token))))
if (max_length is not None):
self._tokenizer.with_truncation(max_length, stride=stride, strategy=truncation_strategy)
self._tokenizer.with_padding(max_length=(max_length if pad_to_max_length else None), direction=self.padding_side, pad_id=self.pad_token_id, pad_type_id=self.pad_token_type_id, pad_token=self.pad_token)
self._decoder = tk.decoders.WordPiece.new() |
class PixelShufflePack(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2))
self.init_weights()
def init_weights(self):
default_init_weights(self, 1)
def forward(self, x):
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x |
def FareyMap(p):
from sage.combinat.permutation import Permutation
from sage.groups.perm_gps.permgroup import PermutationGroup
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.libs.gap.libgap import libgap
def normalise(pair):
(x, y) = pair
if ((x != 0) and ((p - x) < x)):
return (((- x) % p), ((- y) % p))
elif ((x == 0) and ((p - y) < y)):
return (0, ((- y) % p))
return (x, y)
points = [(x, y) for x in range(p) for y in range(p) if (((x, y) != (0, 0)) and (((x != 0) and ((p - x) >= x)) or ((x == 0) and ((p - y) >= y))))]
convert = {pt: (i + 1) for (i, pt) in enumerate(points)}
F = GF(p)
S = matrix(F, 2, 2, [0, (- 1), 1, 0])
T = matrix(F, 2, 2, [1, 1, 0, 1])
perm_S = Permutation([convert[normalise((S * vector(pt)))] for pt in points])
perm_T = Permutation([convert[normalise((T * vector(pt)))] for pt in points])
group = PermutationGroup([perm_S, perm_T])
triangle = [convert[normalise(pt)] for pt in [(1, 0), (0, 1), (1, 1)]]
triangle = libgap.Set(triangle)
triangles = libgap.Orbit(group, triangle, libgap.OnSets).sage()
return SimplicialComplex(triangles) |
def load_url_dist(url, model_dir=None):
(rank, world_size) = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if (rank == 0):
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
if (world_size > 1):
torch.distributed.barrier()
if (rank > 0):
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
return checkpoint |
def get_compile_args(compiler):
opts = ['-std=c++11', '-O2', '-DNDEBUG']
if (sys.platform == 'darwin'):
opts += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
return opts |
def prevent_unsatisfiable_schema(schema: Schema, new_type: str) -> None:
drop_not_type_specific_keywords(schema, new_type)
if ('not' in schema):
drop_not_type_specific_keywords(schema['not'], new_type)
if (not schema['not']):
del schema['not'] |
def create_symlinks(target_dir: os.PathLike, symlinks_to_create: List[os.PathLike]):
for src_path in symlinks_to_create:
trg_path = os.path.join(target_dir, os.path.basename(src_path))
if os.path.islink(src_path):
os.symlink(os.readlink(src_path), trg_path)
else:
print(f'Creating a symlink to {src_path}, so try not to delete it occasionally!')
os.symlink(src_path, trg_path) |
class TestHessianUpdateStrategy(TestCase):
def test_hessian_initialization(self):
quasi_newton = (BFGS(), SR1())
for qn in quasi_newton:
qn.initialize(5, 'hess')
B = qn.get_matrix()
assert_array_equal(B, np.eye(5))
def test_rosenbrock_with_no_exception(self):
prob = Rosenbrock(n=5)
x_list = [[0.097627, 0.4303787, 0.2055267, 0.0897663, (- 0.1526904)], [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.], [0.2142498, (- 0.018848), 0.0503822, 0.0347033, 0.], [0.207168, (- 0.0185071), 0.0341337, (- 0.0139298), 0.0288175], [0.1533055, (- 0.0322935), 0.0280418, (- 0.0083592), 0.], [0.1382378, (- 0.0276671), 0.0266161, (- 0.007406), 0.0280161], [0.1651957, (- 0.0049124), 0.0269665, (- 0.0040025), 0.], [0.235493, 0.0443711, 0.0173959, 0.0041872, 0.], [0.4168118, 0.1433867, 0.0111714, 0.0126265, (- 0.)], [0.4681972, 0.2153273, 0.0225249, 0.0152704, (- 0.)], [0.6023068, 0.3346815, 0.0731108, 0.0186618, (- 0.)], [0.6415743, 0.3985468, 0.1324422, 0.021416, (- 0.)], [0.750369, 0.5447616, 0.2804541, 0.0539851, 0.0024223], [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.0045496], [0.8059782, 0.6586838, 0.4229577, 0.145299, 0.], [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.], [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.], [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.], [0.9071558, 0.8299587, 0.67714, 0.4402896, 0.], [0.9190793, 0.848648, 0.7163332, 0.508378, 0.], [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.], [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.], [0.9545744, 0.9099264, 0.8270244, 0.682222, 0.], [0.9688112, 0.935171, 0.8730961, 0.7546601, 0.], [0.9743227, 0.9491953, 0.900515, 0.8086497, 0.], [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.], [0.9886746, 0.977776, 0.955895, 0.9123417, 0.], [0.9899096, 0.9803828, 0.9615592, 0.92556, 0.], [0.996951, 0.9935441, 0.9864657, 0.9726775, 0.], [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.], [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.], [1.000264, 1.0005088, 1.0010594, 1.0021161, 1.], [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.], [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.], [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.], [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.], [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.], [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.]]
grad_list = [prob.grad(x) for x in x_list]
delta_x = [(np.array(x_list[(i + 1)]) - np.array(x_list[i])) for i in range((len(x_list) - 1))]
delta_grad = [(grad_list[(i + 1)] - grad_list[i]) for i in range((len(grad_list) - 1))]
for (s, y) in zip(delta_x, delta_grad):
if (np.dot(s, y) <= 0):
raise ArithmeticError()
for quasi_newton in (BFGS(init_scale=1, min_curvature=0.0001), SR1(init_scale=1)):
hess = deepcopy(quasi_newton)
inv_hess = deepcopy(quasi_newton)
hess.initialize(len(x_list[0]), 'hess')
inv_hess.initialize(len(x_list[0]), 'inv_hess')
for (s, y) in zip(delta_x, delta_grad):
hess.update(s, y)
inv_hess.update(s, y)
B = hess.get_matrix()
H = inv_hess.get_matrix()
assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
B_true = prob.hess(x_list[len(delta_x)])
assert_array_less((norm((B - B_true)) / norm(B_true)), 0.1)
def test_SR1_skip_update(self):
prob = Rosenbrock(n=5)
x_list = [[0.097627, 0.4303787, 0.2055267, 0.0897663, (- 0.1526904)], [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.], [0.2142498, (- 0.018848), 0.0503822, 0.0347033, 0.], [0.207168, (- 0.0185071), 0.0341337, (- 0.0139298), 0.0288175], [0.1533055, (- 0.0322935), 0.0280418, (- 0.0083592), 0.], [0.1382378, (- 0.0276671), 0.0266161, (- 0.007406), 0.0280161], [0.1651957, (- 0.0049124), 0.0269665, (- 0.0040025), 0.], [0.235493, 0.0443711, 0.0173959, 0.0041872, 0.], [0.4168118, 0.1433867, 0.0111714, 0.0126265, (- 0.)], [0.4681972, 0.2153273, 0.0225249, 0.0152704, (- 0.)], [0.6023068, 0.3346815, 0.0731108, 0.0186618, (- 0.)], [0.6415743, 0.3985468, 0.1324422, 0.021416, (- 0.)], [0.750369, 0.5447616, 0.2804541, 0.0539851, 0.0024223], [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.0045496], [0.8059782, 0.6586838, 0.4229577, 0.145299, 0.], [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.], [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.], [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.], [0.9071558, 0.8299587, 0.67714, 0.4402896, 0.]]
grad_list = [prob.grad(x) for x in x_list]
delta_x = [(np.array(x_list[(i + 1)]) - np.array(x_list[i])) for i in range((len(x_list) - 1))]
delta_grad = [(grad_list[(i + 1)] - grad_list[i]) for i in range((len(grad_list) - 1))]
hess = SR1(init_scale=1, min_denominator=0.01)
hess.initialize(len(x_list[0]), 'hess')
for i in range((len(delta_x) - 1)):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
B = np.copy(hess.get_matrix())
s = delta_x[17]
y = delta_grad[17]
hess.update(s, y)
B_updated = np.copy(hess.get_matrix())
assert_array_equal(B, B_updated)
def test_BFGS_skip_update(self):
prob = Rosenbrock(n=5)
x_list = [[0.097627, 0.4303787, 0.2055267, 0.0897663, (- 0.1526904)], [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.], [0.2142498, (- 0.018848), 0.0503822, 0.0347033, 0.], [0.207168, (- 0.0185071), 0.0341337, (- 0.0139298), 0.0288175], [0.1533055, (- 0.0322935), 0.0280418, (- 0.0083592), 0.], [0.1382378, (- 0.0276671), 0.0266161, (- 0.007406), 0.0280161], [0.1651957, (- 0.0049124), 0.0269665, (- 0.0040025), 0.]]
grad_list = [prob.grad(x) for x in x_list]
delta_x = [(np.array(x_list[(i + 1)]) - np.array(x_list[i])) for i in range((len(x_list) - 1))]
delta_grad = [(grad_list[(i + 1)] - grad_list[i]) for i in range((len(grad_list) - 1))]
hess = BFGS(init_scale=1, min_curvature=10)
hess.initialize(len(x_list[0]), 'hess')
for i in range((len(delta_x) - 1)):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
B = np.copy(hess.get_matrix())
s = delta_x[5]
y = delta_grad[5]
hess.update(s, y)
B_updated = np.copy(hess.get_matrix())
assert_array_equal(B, B_updated) |
class ConvBlock(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding=0, dilation=1, bias=False):
super().__init__()
self.conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
class SparseFeat(namedtuple('SparseFeat', ['name', 'vocabulary_size', 'embedding_dim', 'use_hash', 'vocabulary_path', 'dtype', 'embeddings_initializer', 'embedding_name', 'group_name', 'trainable'])):
__slots__ = ()
def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, vocabulary_path=None, dtype='int32', embeddings_initializer=None, embedding_name=None, group_name=DEFAULT_GROUP_NAME, trainable=True):
if (embedding_dim == 'auto'):
embedding_dim = (6 * int(pow(vocabulary_size, 0.25)))
if (embeddings_initializer is None):
embeddings_initializer = RandomNormal(mean=0.0, stddev=0.0001, seed=2020)
if (embedding_name is None):
embedding_name = name
return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, vocabulary_path, dtype, embeddings_initializer, embedding_name, group_name, trainable)
def __hash__(self):
return self.name.__hash__() |
class ProteinResNetLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.norm = LayerNorm(config.hidden_size)
def forward(self, x):
return self.norm(x.transpose(1, 2)).transpose(1, 2) |
def select_crossover(toolbox, ga_params):
if (ga_params['mate_scheme'] == 'cluster'):
mate_func = Genotype.xover_cluster
elif (ga_params['mate_scheme'] == 'dv'):
mate_func = Genotype.xover_genes
else:
raise ValueError(f"{ga_params['mate_scheme']} is not a valid mutation scheme")
toolbox.register('mate', mate_func)
return toolbox |
class ProphetNetForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class NllbMoeConfig(PretrainedConfig):
model_type = 'nllb-moe'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, router_bias=False, router_dtype='float32', router_ignore_padding_tokens=False, num_experts=128, expert_capacity=64, encoder_sparse_step=4, decoder_sparse_step=4, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, second_expert_policy='all', normalize_router_prob_before_dropping=False, batch_prioritized_routing=False, moe_eval_capacity_token_fraction=1.0, moe_token_dropout=0.2, pad_token_id=1, bos_token_id=0, eos_token_id=2, output_router_logits=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
self.router_z_loss_coef = router_z_loss_coef
self.router_aux_loss_coef = router_aux_loss_coef
self.decoder_sparse_step = decoder_sparse_step
self.encoder_sparse_step = encoder_sparse_step
self.num_experts = num_experts
self.expert_capacity = expert_capacity
self.router_bias = router_bias
if (router_dtype not in ['float32', 'float16', 'bfloat16']):
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
self.router_dtype = router_dtype
self.router_ignore_padding_tokens = router_ignore_padding_tokens
self.batch_prioritized_routing = batch_prioritized_routing
self.second_expert_policy = second_expert_policy
self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
self.moe_token_dropout = moe_token_dropout
self.output_router_logits = output_router_logits
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs) |
class RelationType():
def __init__(self, identifier, index, short_name, verbose_name, symmetric=False):
self._identifier = identifier
self._index = index
self._short_name = short_name
self._verbose_name = verbose_name
self._symmetric = symmetric
def identifier(self):
return self._identifier
def index(self):
return self._index
def short_name(self):
return self._short_name
def verbose_name(self):
return self._verbose_name
def symmetric(self):
return self._symmetric
def __int__(self):
return self._index
def __eq__(self, other):
if isinstance(other, RelationType):
return (self._identifier == other._identifier)
return False
def __hash__(self):
return hash(self._identifier) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def _remove_existing(trg, is_dir):
if os.path.exists(trg):
if is_dir:
shutil.rmtree(trg)
else:
os.remove(trg) |
def __handle_stream(db, stream, day):
if ('transport_info' in stream):
if ('remote' in stream['transport_info']):
if ('onion' in stream['transport_info']['remote']):
return
transfer_size_actual = int(stream['byte_info']['payload-bytes-recv'])
transfer_size_target = int(stream['stream_info']['recvsize'])
timeout_limit = __get_timeout_limit(transfer_size_target)
cmd = int(stream['time_info']['usecs-to-command'])
rsp = int(stream['time_info']['usecs-to-response'])
lb = int(stream['time_info']['usecs-to-last-byte-recv'])
ttlb = ((lb - cmd) / 1000000.0)
db['daily_counts'].setdefault(day, {'requests': 0, 'timeouts': 0, 'failures': 0})
db['daily_counts'][day]['requests'] += 1
if stream['is_error']:
se = stream['stream_info']['error']
if ((se.upper() == 'TIMEOUT') or (se.upper() == 'STALLOUT')):
db['daily_counts'][day]['timeouts'] += 1
else:
db['daily_counts'][day]['failures'] += 1
return
if ((lb > 0) and (cmd > 0) and (ttlb > timeout_limit)):
db['daily_counts'][day]['timeouts'] += 1
return
if (transfer_size_actual < transfer_size_target):
db['daily_counts'][day]['failures'] += 1
return
assert stream['is_success']
if ((rsp > 0) and (cmd > 0)):
rtt = ((rsp - cmd) / 1000000.0)
db['circuit_rtt'].append(rtt)
if (('elapsed_seconds' in stream) and ('payload_bytes_recv' in stream['elapsed_seconds'])):
for (transfer_size, time_to_size) in stream['elapsed_seconds']['payload_bytes_recv'].items():
if ((time_to_size > 0) and (cmd > 0)):
transfer_time_secs = (time_to_size - (cmd / 1000000.0))
__store_transfer_time(db, transfer_size, transfer_time_secs)
goodput = __goodput_bps(stream, aka_int(512000, (500 * (2 ** 10))), aka_int(1048576, (2 ** 20)))
if (goodput is not None):
db['client_goodput'].append(goodput)
goodput = __goodput_bps(stream, aka_int(4194304, (4 * (2 ** 20))), aka_int(5242880, (5 * (2 ** 20))))
if (goodput is not None):
db['client_goodput_5MiB'].append(goodput)
elif ((lb > 0) and (cmd > 0)):
__store_transfer_time(db, transfer_size_target, ttlb) |
class EntityNode(ASTNode):
def __init__(self, val, data_type, fields):
super().__init__('ENTITY', val, data_type, fields)
def textual_form_core(self):
return self.val |
_model
def tf_mixnet_m(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m('tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
def test_convert_to_numpy_dataframe_and_series():
X_df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
y_series = pd.Series([7, 8, 9])
(X_array, y_array) = convert_to_numpy(X_df, y_series)
assert isinstance(X_array, np.ndarray)
assert isinstance(y_array, np.ndarray)
assert np.array_equal(X_array, X_df.values)
assert np.array_equal(y_array, y_series.values) |
def test_resnet31_ocr_backbone():
with pytest.raises(AssertionError):
ResNet31OCR(2.5)
with pytest.raises(AssertionError):
ResNet31OCR(3, layers=5)
with pytest.raises(AssertionError):
ResNet31OCR(3, channels=5)
model = ResNet31OCR()
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 32, 160)
feat = model(imgs)
assert (feat.shape == torch.Size([1, 512, 4, 40])) |
def load_model(helper: PredictHelper, config: PredictionConfig, path_to_model_weights: str) -> Any:
return ConstantVelocityHeading(config.seconds, helper) |
class Partition15(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:15'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.21.layer.0.layer_norm', 'l_1': 'decoder.block.21.layer.0.SelfAttention.q', 'l_2': 'decoder.block.21.layer.0.SelfAttention.k', 'l_3': 'decoder.block.21.layer.0.SelfAttention.v', 'l_4': 'decoder.block.21.layer.0.SelfAttention.o', 'l_5': 'decoder.block.21.layer.0.dropout', 'l_6': 'decoder.block.21.layer.1.layer_norm', 'l_7': 'decoder.block.21.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.21.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.21.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.21.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.21.layer.1.dropout', 'l_12': 'decoder.block.21.layer.2.layer_norm', 'l_13': 'decoder.block.21.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.21.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.21.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.21.layer.2.dropout', 'l_17': 'decoder.block.22.layer.0.layer_norm', 'l_18': 'decoder.block.22.layer.0.SelfAttention.q', 'l_19': 'decoder.block.22.layer.0.SelfAttention.k', 'l_20': 'decoder.block.22.layer.0.SelfAttention.v', 'l_21': 'decoder.block.22.layer.0.SelfAttention.o', 'l_22': 'decoder.block.22.layer.0.dropout', 'l_23': 'decoder.block.22.layer.1.layer_norm', 'l_24': 'decoder.block.22.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.22.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.22.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.22.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.22.layer.1.dropout', 'l_29': 'decoder.block.22.layer.2.layer_norm', 'l_30': 'decoder.block.22.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.22.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.22.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.22.layer.2.dropout', 'l_34': 'decoder.block.23.layer.0.layer_norm', 'l_35': 'decoder.block.23.layer.0.SelfAttention.q', 'l_36': 'decoder.block.23.layer.0.SelfAttention.k', 'l_37': 'decoder.block.23.layer.0.SelfAttention.v', 'l_38': 'decoder.block.23.layer.0.SelfAttention.o', 'l_39': 'decoder.block.23.layer.0.dropout', 'l_40': 'decoder.block.23.layer.1.layer_norm', 'l_41': 'decoder.block.23.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.23.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.23.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.23.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.23.layer.1.dropout', 'l_46': 'decoder.block.23.layer.2.layer_norm', 'l_47': 'decoder.block.23.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.23.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.23.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.23.layer.2.dropout', 'l_51': 'decoder.final_layer_norm', 'l_52': 'decoder.dropout', 'l_53': 'lm_head'}
self.to(self.device)
def forward(self, *args):
(labels, x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_8(x0)
t_1 = self.l_9(x0)
t_2 = self.l_25(x0)
t_3 = self.l_26(x0)
t_4 = self.l_42(x0)
t_5 = self.l_43(x0)
t_6 = self.l_0(x1)
t_7 = self.l_1(t_6)
t_8 = self.l_2(t_6)
t_9 = self.l_3(t_6)
t_6 = t_6.shape
t_6 = t_6[slice(None, 2, None)]
t_6 = t_6[0]
t_7 = t_7.view(t_6, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_8 = t_8.view(t_6, (- 1), 32, 128)
t_8 = t_8.transpose(1, 2)
t_9 = t_9.view(t_6, (- 1), 32, 128)
t_9 = t_9.transpose(1, 2)
t_8 = t_8.transpose(3, 2)
t_8 = torch.matmul(t_7, t_8)
t_8 += x2
t_7 = t_8.float()
t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None)
t_8 = t_7.type_as(t_8)
t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False)
t_9 = torch.matmul(t_8, t_9)
t_9 = t_9.transpose(1, 2)
t_9 = t_9.contiguous()
t_6 = t_9.view(t_6, (- 1), 4096)
t_6 = self.l_4(t_6)
t_9 = self.l_5(t_6)
t_9 = (x1 + t_9)
t_6 = (t_6, None, x2)
t_8 = t_6[0]
t_9 = (t_9,)
t_6 = t_6[slice(1, None, None)]
t_6 = (t_9 + t_6)
t_9 = t_6[slice(None, 2, None)]
t_7 = t_9[0]
t_10 = self.l_6(t_7)
t_9 = t_9[1]
t_6 = t_6[slice(2, None, None)]
t_11 = self.l_7(t_10)
t_10 = t_10.shape
t_10 = t_10[slice(None, 2, None)]
t_10 = t_10[0]
t_11 = t_11.view(t_10, (- 1), 32, 128)
t_11 = t_11.transpose(1, 2)
t_0 = t_0.view(t_10, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_1 = t_1.view(t_10, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_0 = t_0.transpose(3, 2)
t_0 = torch.matmul(t_11, t_0)
t_0 += x3
t_11 = t_0.float()
t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None)
t_0 = t_11.type_as(t_0)
t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False)
t_1 = torch.matmul(t_0, t_1)
t_1 = t_1.transpose(1, 2)
t_1 = t_1.contiguous()
t_10 = t_1.view(t_10, (- 1), 4096)
t_10 = self.l_10(t_10)
t_1 = self.l_11(t_10)
t_1 = (t_7 + t_1)
t_10 = (t_10, None, x3)
t_7 = t_10[0]
t_1 = (t_1,)
t_10 = t_10[slice(1, None, None)]
t_10 = (t_1 + t_10)
t_1 = t_10[0]
t_0 = self.l_12(t_1)
t_10 = t_10[slice(2, None, None)]
t_10 = (t_6 + t_10)
t_0 = self.l_13(t_0)
t_0 = torch.nn.functional.relu(t_0, inplace=False)
t_0 = self.l_14(t_0)
t_0 = self.l_15(t_0)
t_0 = self.l_16(t_0)
t_0 = (t_1 + t_0)
t_9 = (t_0, t_9)
t_10 = (t_9 + t_10)
t_9 = t_10[slice(None, 2, None)]
t_9 = t_9[0]
t_0 = self.l_17(t_9)
t_1 = t_10[2]
t_10 = t_10[3]
t_6 = self.l_18(t_0)
t_11 = self.l_19(t_0)
t_12 = self.l_20(t_0)
t_0 = t_0.shape
t_0 = t_0[slice(None, 2, None)]
t_0 = t_0[0]
t_6 = t_6.view(t_0, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_11 = t_11.view(t_0, (- 1), 32, 128)
t_11 = t_11.transpose(1, 2)
t_12 = t_12.view(t_0, (- 1), 32, 128)
t_12 = t_12.transpose(1, 2)
t_11 = t_11.transpose(3, 2)
t_11 = torch.matmul(t_6, t_11)
t_11 += t_1
t_6 = t_11.float()
t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None)
t_11 = t_6.type_as(t_11)
t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False)
t_12 = torch.matmul(t_11, t_12)
t_12 = t_12.transpose(1, 2)
t_12 = t_12.contiguous()
t_0 = t_12.view(t_0, (- 1), 4096)
t_0 = self.l_21(t_0)
t_12 = self.l_22(t_0)
t_12 = (t_9 + t_12)
t_1 = (t_0, None, t_1)
t_0 = t_1[0]
t_12 = (t_12,)
t_1 = t_1[slice(1, None, None)]
t_1 = (t_12 + t_1)
t_12 = t_1[slice(None, 2, None)]
t_9 = t_12[0]
t_11 = self.l_23(t_9)
t_12 = t_12[1]
t_1 = t_1[slice(2, None, None)]
t_6 = self.l_24(t_11)
t_11 = t_11.shape
t_11 = t_11[slice(None, 2, None)]
t_11 = t_11[0]
t_6 = t_6.view(t_11, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_2 = t_2.view(t_11, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_3 = t_3.view(t_11, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_6, t_2)
t_2 += t_10
t_6 = t_2.float()
t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_6.type_as(t_2)
t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False)
t_3 = torch.matmul(t_2, t_3)
t_3 = t_3.transpose(1, 2)
t_3 = t_3.contiguous()
t_11 = t_3.view(t_11, (- 1), 4096)
t_11 = self.l_27(t_11)
t_3 = self.l_28(t_11)
t_3 = (t_9 + t_3)
t_10 = (t_11, None, t_10)
t_11 = t_10[0]
t_3 = (t_3,)
t_10 = t_10[slice(1, None, None)]
t_10 = (t_3 + t_10)
t_3 = t_10[0]
t_9 = self.l_29(t_3)
t_10 = t_10[slice(2, None, None)]
t_10 = (t_1 + t_10)
t_9 = self.l_30(t_9)
t_9 = torch.nn.functional.relu(t_9, inplace=False)
t_9 = self.l_31(t_9)
t_9 = self.l_32(t_9)
t_9 = self.l_33(t_9)
t_9 = (t_3 + t_9)
t_12 = (t_9, t_12)
t_10 = (t_12 + t_10)
t_12 = t_10[slice(None, 2, None)]
t_12 = t_12[0]
t_9 = self.l_34(t_12)
t_3 = t_10[2]
t_10 = t_10[3]
t_1 = self.l_35(t_9)
t_2 = self.l_36(t_9)
t_6 = self.l_37(t_9)
t_9 = t_9.shape
t_9 = t_9[slice(None, 2, None)]
t_9 = t_9[0]
t_1 = t_1.view(t_9, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_2 = t_2.view(t_9, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_6 = t_6.view(t_9, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_1, t_2)
t_2 += t_3
t_1 = t_2.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_1.type_as(t_2)
t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False)
t_6 = torch.matmul(t_2, t_6)
t_6 = t_6.transpose(1, 2)
t_6 = t_6.contiguous()
t_9 = t_6.view(t_9, (- 1), 4096)
t_9 = self.l_38(t_9)
t_6 = self.l_39(t_9)
t_6 = (t_12 + t_6)
t_3 = (t_9, None, t_3)
t_9 = t_3[0]
t_6 = (t_6,)
t_3 = t_3[slice(1, None, None)]
t_3 = (t_6 + t_3)
t_6 = t_3[slice(None, 2, None)]
t_12 = t_6[0]
t_2 = self.l_40(t_12)
t_6 = t_6[1]
t_3 = t_3[slice(2, None, None)]
t_1 = self.l_41(t_2)
t_2 = t_2.shape
t_2 = t_2[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_1.view(t_2, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_4 = t_4.view(t_2, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_5 = t_5.view(t_2, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_4 = t_4.transpose(3, 2)
t_4 = torch.matmul(t_1, t_4)
t_4 += t_10
t_1 = t_4.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_4 = t_1.type_as(t_4)
t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False)
t_5 = torch.matmul(t_4, t_5)
t_5 = t_5.transpose(1, 2)
t_5 = t_5.contiguous()
t_2 = t_5.view(t_2, (- 1), 4096)
t_2 = self.l_44(t_2)
t_5 = self.l_45(t_2)
t_5 = (t_12 + t_5)
t_10 = (t_2, None, t_10)
t_2 = t_10[0]
t_5 = (t_5,)
t_10 = t_10[slice(1, None, None)]
t_10 = (t_5 + t_10)
t_5 = t_10[0]
t_12 = self.l_46(t_5)
t_10 = t_10[slice(2, None, None)]
t_10 = (t_3 + t_10)
t_12 = self.l_47(t_12)
t_12 = torch.nn.functional.relu(t_12, inplace=False)
t_12 = self.l_48(t_12)
t_12 = self.l_49(t_12)
t_12 = self.l_50(t_12)
t_12 = (t_5 + t_12)
t_6 = (t_12, t_6)
t_10 = (t_6 + t_10)
t_6 = t_10[slice(None, 2, None)]
t_6 = t_6[0]
t_6 = self.l_51(t_6)
t_12 = t_10[2]
t_10 = t_10[3]
t_6 = self.l_52(t_6)
t_6 = (t_6 * 0.03125)
t_6 = self.l_53(t_6)
t_5 = t_6.size((- 1))
t_5 = t_6.view((- 1), t_5)
t_6 = labels.view((- 1))
t_6 = torch.nn.functional.cross_entropy(t_5, t_6, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean')
return (t_6,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def test_signed_scaling_float32():
x = np.array([(- 128), 127], dtype=np.int8)
y = img_as_float32(x)
assert_equal(y.max(), 1) |
class MyTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.testing_class = EnvironmentCPUvsGPU(cpu_env_class=ClassicControlMountainCarEnv, cuda_env_class=CUDAClassicControlMountainCarEnv, env_configs=env_configs, gpu_env_backend='numba', num_envs=5, num_episodes=2)
def test_env_consistency(self):
try:
self.testing_class.test_env_reset_and_step()
except AssertionError:
self.fail('ClassicControlMountainCarEnv environment consistency tests failed')
def test_reset_pool(self):
env_wrapper = EnvWrapper(env_obj=CUDAClassicControlMountainCarEnv(episode_length=100, reset_pool_size=3), num_envs=3, env_backend='numba')
env_wrapper.reset_all_envs()
env_wrapper.env_resetter.init_reset_pool(env_wrapper.cuda_data_manager, seed=12345)
self.assertTrue((env_wrapper.cuda_data_manager.reset_target_to_pool['state'] == 'state_reset_pool'))
state_after_initial_reset = env_wrapper.cuda_data_manager.pull_data_from_device('state').squeeze()
reset_pool = env_wrapper.cuda_data_manager.pull_data_from_device(env_wrapper.cuda_data_manager.get_reset_pool('state'))
reset_pool_mean = reset_pool.mean(axis=0).squeeze()
self.assertTrue((reset_pool.std(axis=0).squeeze()[0] > 0.0001))
env_wrapper.cuda_data_manager.data_on_device_via_torch('_done_')[:] = torch.from_numpy(np.array([1, 1, 0])).cuda()
state_values = {0: [], 1: [], 2: []}
for _ in range(10000):
env_wrapper.env_resetter.reset_when_done(env_wrapper.cuda_data_manager, mode='if_done', undo_done_after_reset=False)
res = env_wrapper.cuda_data_manager.pull_data_from_device('state')
state_values[0].append(res[0])
state_values[1].append(res[1])
state_values[2].append(res[2])
state_values_env0_mean = np.stack(state_values[0]).mean(axis=0).squeeze()
state_values_env1_mean = np.stack(state_values[1]).mean(axis=0).squeeze()
state_values_env2_mean = np.stack(state_values[2]).mean(axis=0).squeeze()
self.assertTrue((np.absolute((state_values_env0_mean[0] - reset_pool_mean[0])) < (0.1 * abs(reset_pool_mean[0]))))
self.assertTrue((np.absolute((state_values_env1_mean[0] - reset_pool_mean[0])) < (0.1 * abs(reset_pool_mean[0]))))
self.assertTrue((np.absolute((state_values_env2_mean[0] - state_after_initial_reset[0][0])) < (0.001 * abs(state_after_initial_reset[0][0])))) |
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, c1=0.0001, c2=0.9, amax=50, amin=1e-08, xtol=1e-14):
_check_c1_c2(c1, c2)
if (phi0 is None):
phi0 = phi(0.0)
if (derphi0 is None):
derphi0 = derphi(0.0)
if ((old_phi0 is not None) and (derphi0 != 0)):
alpha1 = min(1.0, (((1.01 * 2) * (phi0 - old_phi0)) / derphi0))
if (alpha1 < 0):
alpha1 = 1.0
else:
alpha1 = 1.0
maxiter = 100
dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax)
(stp, phi1, phi0, task) = dcsrch(alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter)
return (stp, phi1, phi0) |
def main(cfg):
(dataset, train_loader, test_loader, num_query, num_classes) = make_data_loader(cfg)
model = build_model(num_classes, 'base', pretrain_choice=True)
model = (torch.nn.DataParallel(model).cuda() if torch.cuda.is_available() else model)
loss_func = make_loss()
optimizer = make_optimizer(cfg, model)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40, 80], gamma=0.1)
if (cfg.train == 1):
start_epoch = 0
acc_best = 0.0
do_train(cfg, model, train_loader, test_loader, optimizer, scheduler, loss_func, num_query, start_epoch, acc_best)
else:
last_model_wts = torch.load(os.path.join(cfg.logs_dir, 'checkpoint_best.pth'))
model_dict = model.state_dict()
checkpoint_dict = {k: v for (k, v) in last_model_wts['state_dict'].items() if ((k in model_dict) and ('classifier' not in k))}
model_dict.update(checkpoint_dict)
model.load_state_dict(model_dict)
(mAP, cmc1, cmc5, cmc10, cmc20) = inference(model, test_loader, num_query)
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
line = '{} - Test: cmc1: {:.1%}, cmc5: {:.1%}, cmc10: {:.1%}, cmc20: {:.1%}, mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP)
print(line) |
def get_results(filename):
top3 = get_top3_topics(filename)
result = []
for (k, v) in top3:
responses = generate_all_responses(k)
result.append({'topic': k, 'score': v, 'responses': responses})
return result |
def register_Ns3DeviceNameTag_methods(root_module, cls):
cls.add_constructor([param('ns3::DeviceNameTag const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetDeviceName', 'std::string', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetDeviceName', 'void', [param('std::string', 'n')])
return |
def down_resblock(x_init, channels, to_down=True, use_bias=True, sn=False, scope='resblock'):
with tf.variable_scope(scope):
init_channel = x_init.shape.as_list()[(- 1)]
with tf.variable_scope('res1'):
x = lrelu(x_init, 0.2)
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
with tf.variable_scope('res2'):
x = lrelu(x, 0.2)
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn)
if to_down:
x = down_sample(x)
if (to_down or (init_channel != channels)):
with tf.variable_scope('shortcut'):
x_init = conv(x_init, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn)
if to_down:
x_init = down_sample(x_init)
return (x + x_init) |
def read_in_run_from_pickle(bm25_file):
with open(bm25_file, 'rb') as f:
bm25_dict = pickle.load(f)
bm25_dict_new = {}
for (key, value) in bm25_dict.items():
bm25_dict_new.update({key: {}})
i = 1
for (key2, value2) in value.items():
bm25_dict_new.get(key).update({key2: [i, float(value2)]})
i += 1
return bm25_dict_new |
def write_citations(app: Sphinx, citations):
from sage.misc.temporary_file import atomic_write
outdir = citation_dir(app)
with atomic_write((outdir / CITE_FILENAME), binary=True) as f:
pickle.dump(citations, f)
logger.info(('Saved pickle file: %s' % CITE_FILENAME)) |
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
def _named_modules_with_dup(model: nn.Module, prefix: str='') -> Iterable[Tuple[(str, nn.Module)]]:
(yield (prefix, model))
for (name, module) in model._modules.items():
if (module is None):
continue
submodule_prefix = ((prefix + ('.' if prefix else '')) + name)
(yield from _named_modules_with_dup(module, submodule_prefix)) |
def modularity(mod_matrix: np.ndarray, communities: list) -> float:
C = np.zeros_like(mod_matrix)
for community in communities:
for (i, j) in combinations(community, 2):
C[(i, j)] = 1.0
C[(j, i)] = 1.0
return np.tril(np.multiply(mod_matrix, C), 0).sum() |
class TestSamplingPolicy(unittest.TestCase):
def test_random_policy(self):
policy = RandomPolicy(2, sequence_length=2)
n_samples = 100
samples = [policy.generate() for _ in range(n_samples)]
a_ct = samples.count([0, 0])
b_ct = samples.count([0, 1])
c_ct = samples.count([1, 0])
d_ct = samples.count([1, 1])
self.assertGreater(a_ct, 0)
self.assertGreater(b_ct, 0)
self.assertGreater(c_ct, 0)
self.assertGreater(d_ct, 0)
self.assertEqual((((a_ct + b_ct) + c_ct) + d_ct), n_samples)
def test_mean_field_policy(self):
policy = MeanFieldPolicy(2, sequence_length=2, p=[1, 0])
n_samples = 100
samples = [policy.generate() for _ in range(n_samples)]
self.assertEqual(samples.count([0, 0]), n_samples) |
def test_slice_args(cl):
frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
slice_frame = cl.streams.Slice(frame, [cl.partitioner.ranges([[0, 1], [1, 2], [2, 3]])])
test = cl.ops.TestSliceArgs(frame=slice_frame, arg=[SliceList([i for i in range(3)])])
unsliced_frame = cl.streams.Unslice(test)
output = NamedStream(cl, 'test_slicing')
output_op = cl.io.Output(unsliced_frame, [output])
cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False)
num_rows = 0
list(output.load()) |
def _load_orion(pipeline, hyperparameters=None):
if (pipeline is None):
return Orion()
elif isinstance(pipeline, Orion):
return pipeline
else:
hyperparameters = _load_dict(hyperparameters)
try:
return Orion(pipeline, hyperparameters)
except ValueError:
try:
return Orion.load(pipeline)
except (FileNotFoundError, UnpicklingError):
raise ValueError('Invalid pipeline: {}'.format(pipeline)) |
class Unit3Dpy(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(1, 1, 1), stride=(1, 1, 1), activation='relu', padding='SAME', use_bias=False, use_bn=True):
super(Unit3Dpy, self).__init__()
self.padding = padding
self.activation = activation
self.use_bn = use_bn
if (padding == 'SAME'):
padding_shape = get_padding_shape(kernel_size, stride)
(simplify_pad, pad_size) = simplify_padding(padding_shape)
self.simplify_pad = simplify_pad
elif (padding == 'VALID'):
padding_shape = 0
else:
raise ValueError('padding should be in [VALID|SAME] but got {}'.format(padding))
if (padding == 'SAME'):
if (not simplify_pad):
self.pad = torch.nn.ConstantPad3d(padding_shape, 0)
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, bias=use_bias)
else:
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad_size, bias=use_bias)
elif (padding == 'VALID'):
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding_shape, stride=stride, bias=use_bias)
else:
raise ValueError('padding should be in [VALID|SAME] but got {}'.format(padding))
if self.use_bn:
self.batch3d = torch.nn.BatchNorm3d(out_channels)
if (activation == 'relu'):
self.activation = torch.nn.functional.relu
def forward(self, inp):
if ((self.padding == 'SAME') and (self.simplify_pad is False)):
inp = self.pad(inp)
out = self.conv3d(inp)
if self.use_bn:
out = self.batch3d(out)
if (self.activation is not None):
out = torch.nn.functional.relu(out)
return out |
def should_be_zero(A):
a0 = alpha0(A)
return ((4 / (9 * (A ** 2))) - ((8 * a0) * ((((a0 ** 2) + (a0 / (2 * A))) + (1 / (16 * (A ** 2)))) - (1 / (4 * A))))) |
class IRGAN(RecMixin, BaseRecommenderModel):
_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._params_list = [('_predict_model', 'predict_model', 'predict_model', 'generator', None, None), ('_factors', 'factors', 'factors', 10, None, None), ('_learning_rate', 'lr', 'lr', 0.001, None, None), ('_l_w', 'l_w', 'l_w', 0.1, None, None), ('_l_b', 'l_b', 'l_b', 0.001, None, None), ('_l_gan', 'l_gan', 'l_gan', 0.001, None, None), ('_g_epochs', 'g_epochs', 'g_epochs', 5, None, None), ('_d_epochs', 'd_epochs', 'd_epochs', 1, None, None), ('_g_pretrain_epochs', 'g_pretrain_epochs', 'g_pt_ep', 10, None, None), ('_d_pretrain_epochs', 'd_pretrain_epochs', 'd_pt_ep', 10, None, None), ('_sample_lambda', 'sample_lambda', 'sample_lambda', 0.2, None, None)]
self.autoset_params()
if (self._batch_size < 1):
self._batch_size = self._data.transactions
if (self._predict_model not in ['generator', 'discriminator']):
raise Exception(f'It is necessary to specify the model component to use as recommender (generator/discriminator)')
self._ratings = self._data.train_dict
self._sampler = pws.Sampler(self._data.i_train_dict)
self._model = IRGAN_model(self._predict_model, self._data, self._batch_size, self._factors, self._learning_rate, self._l_w, self._l_b, self._l_gan, self._num_users, self._num_items, self._g_pretrain_epochs, self._d_pretrain_epochs, self._g_epochs, self._d_epochs, self._sample_lambda, self._seed)
def name(self):
return (('IRGAN' + f'_{self.get_base_params_shortcut()}') + f'_{self.get_params_shortcut()}')
def train(self):
if self._restore:
return self.restore_weights()
for it in self.iterate(self._epochs):
(dis_loss, gen_loss) = (0, 0)
with tqdm(total=1, disable=(not self._verbose)) as t:
(update_dis_loss, update_gen_loss) = self._model.train_step()
dis_loss += update_dis_loss
gen_loss += update_gen_loss
t.set_postfix({'Dis loss': f'{dis_loss.numpy():.5f}', 'Gen loss': f'{gen_loss.numpy():.5f}'})
t.update()
self.evaluate(it, (dis_loss.numpy() / (it + 1)))
def get_recommendations(self, k: int=100):
predictions_top_k_test = {}
predictions_top_k_val = {}
for (index, offset) in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min((offset + self._batch_size), self._num_users)
predictions = self._model.predict(offset, offset_stop)
(recs_val, recs_test) = self.process_protocol(k, predictions, offset, offset_stop)
predictions_top_k_val.update(recs_val)
predictions_top_k_test.update(recs_test)
return (predictions_top_k_val, predictions_top_k_test) |
def test_paramset_unconstrained():
pset = paramsets.unconstrained(name='foo', is_scalar=False, n_parameters=5, inits=[0, 1, 2, 3, 4], bounds=[((- 1), 1), ((- 2), 2), ((- 3), 3), ((- 4), 4)], fixed=False)
assert (pset.suggested_init == [0, 1, 2, 3, 4])
assert (pset.suggested_bounds == [((- 1), 1), ((- 2), 2), ((- 3), 3), ((- 4), 4)])
assert (pset.suggested_fixed == ([False] * 5))
assert (not pset.constrained) |
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops) |
class VeRi3kParsingDataset(Dataset):
CLASSES = ['background', 'front', 'back', 'roof', 'side']
def __init__(self, image_path, masks_path, augmentation=None, preprocessing=None, subset='trainval'):
self.metas = [os.path.splitext(fname)[0] for fname in os.listdir(masks_path)]
self.masks_path = Path(masks_path)
self.image_path = Path(image_path)
if (subset == 'trainval'):
self.metas = self.metas
elif (subset == 'train'):
self.metas = self.metas[:(- 500)]
else:
self.metas = self.metas[(- 500):]
self.class_values = [self.CLASSES.index(cls) for cls in self.CLASSES]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, item):
image_name = self.metas[item]
img = read_rgb_image(f'{(self.image_path / image_name)}.jpg', format='ndarray')
mask = cv2.imread(f'{(self.masks_path / image_name)}.png', cv2.IMREAD_UNCHANGED)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=(- 1)).astype('float32')
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img = sample['image']
mask = sample['mask']
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img = sample['image']
mask = sample['mask']
return (img, mask)
def __len__(self):
return len(self.metas) |
def test_double_fault_ones_zeros(example_diversity_ones_zeros):
(y, y_pred_ones, y_pred_zeros) = example_diversity_ones_zeros
df = double_fault(y, y_pred_ones, y_pred_zeros)
assert (df == 0.0) |
def state_dict_from_pretrained(model_name, device=None, dtype=None):
mapped_device = ('cpu' if (dtype not in [torch.float32, None]) else device)
is_sharded = False
resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False)
if (resolved_archive_file is None):
resolved_archive_file = cached_file(model_name, WEIGHTS_INDEX_NAME, _raise_exceptions_for_missing_entries=False)
if (resolved_archive_file is not None):
is_sharded = True
if (resolved_archive_file is None):
raise EnvironmentError(f'Model name {model_name} was not found.')
if is_sharded:
(resolved_archive_file, sharded_metadata) = get_checkpoint_shard_files(model_name, resolved_archive_file)
state_dict = {}
for sharded_file in resolved_archive_file:
state_dict.update(torch.load(sharded_file, map_location=mapped_device))
else:
state_dict = torch.load(cached_file(model_name, WEIGHTS_NAME), map_location=device)
if (dtype is not None):
state_dict = {k: v.to(dtype=dtype) for (k, v) in state_dict.items()}
state_dict = {k: v.to(device=device) for (k, v) in state_dict.items()}
return state_dict |
def draw_graph(ofolder, idx2lb, g_label, idx, prob):
fpath = os.path.join(ofolder, '{}.npz'.format(idx))
ograph_folder = ('graph/' + ofolder.split('/')[(- 1)])
if (not os.path.exists(ograph_folder)):
os.makedirs(ograph_folder)
color_dict = {1: 'red', 0: 'lightblue'}
(vertices, raw_edges) = load_data(fpath)
vertices = list(vertices)
lb = idx2lb[idx]
abs2rel = {}
for (i, v) in enumerate(vertices):
abs2rel[v] = i
edges = [(abs2rel[p1], abs2rel[p2]) for (p1, p2, _) in raw_edges]
g = Graph(vertex_attrs={'label': vertices}, edges=edges, directed=False)
edge_weights = [(1 - d) for (_, _, d) in raw_edges]
if (len(edge_weights) > 0):
w_mean = (sum(edge_weights) / len(edge_weights))
w_max = max(edge_weights)
w_min = min(edge_weights)
else:
(w_mean, w_max, w_min) = (1, 1, 1)
visual_style = {}
visual_style['vertex_color'] = [color_dict[(lb == idx2lb[v])] for v in vertices]
visual_style['edge_width'] = [(5 * w) for w in edge_weights]
plot(g, **visual_style, target='{}/{}_{}_{:.2f}_{:.2f}_{:.2f}_{:.2f}.png'.format(ograph_folder, g_label, idx, prob, w_mean, w_min, w_max)) |
def gather(x, indices, axis, batch_dims):
xshape = x.shape
ishape = indices.shape
bshape = xshape[:batch_dims]
samples = np.prod(bshape).astype(int)
x = x.reshape(((samples,) + xshape[batch_dims:]))
indices = indices.reshape(((samples,) + ishape[batch_dims:]))
y_list = []
for b in range(samples):
y = np.take(x[b], indices[b], axis=(axis - batch_dims))
y_list.append(y)
y = (np.stack(y_list) if (samples != 1) else y_list[0])
y = (y.reshape((bshape + y.shape[1:])) if (samples != 1) else y)
return y |
def get_type_str(*args) -> str:
types = []
for i in args:
if isinstance(i, (int, float, bytes, bytearray)):
type_str = str(i.__class__.__name__)
elif isinstance(i, np.ndarray):
type_str = f'numpy.ndarray[{i.dtype}]'
elif isinstance(i, list):
inner = ', '.join(set([get_type_str(ii) for ii in i]))
type_str = f'List[{inner}]'
else:
type_str = str(type(i))
types.append(type_str)
outer = ', '.join(types)
return f'{outer}' |
_utils.test()
def test_struct_for_huge_offsets():
a = ti.field(dtype=ti.i32)
offset = (1024, 2048, 2100, 2200)
ti.root.dense(ti.ijkl, 4).place(a, offset=offset)
def test():
for (i, j, k, l) in a:
a[(i, j, k, l)] = (((i + (j * 10)) + (k * 100)) + (l * 1000))
test()
for i in range(offset[0], (offset[0] + 4)):
for j in range(offset[1], (offset[1] + 4)):
for k in range(offset[2], (offset[2] + 4)):
for l in range(offset[3], (offset[3] + 4)):
assert (a[(i, j, k, l)] == (((i + (j * 10)) + (k * 100)) + (l * 1000))) |
class FDEM_CrossCheck(unittest.TestCase):
if testBH:
def test_BH_CrossCheck_jxr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jyr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jzr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jxi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jyi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jzi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_exr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_eyr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_ezr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_exi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_eyi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_ezi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bxr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_byr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bzr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bxi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_byi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bzi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hxr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hyr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hzr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hxi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hyi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hzi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
if testBH:
def test_BH_CrossCheck_jxr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jyr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jzr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jxi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jyi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_jzi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['CurrentDensity', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_exr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_eyr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_ezr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_exi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_eyi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_ezi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['ElectricField', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bxr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_byr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bzr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bxi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_byi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_bzi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticFluxDensity', 'z', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hxr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'x', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hyr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'y', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hzr(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'z', 'r'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hxi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'x', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hyi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'y', 'i'], verbose=verbose, TOL=TOLEJHB))
def test_BH_CrossCheck_hzi(self):
self.assertTrue(crossCheckTest(SrcList, 'b', 'h', ['MagneticField', 'z', 'i'], verbose=verbose, TOL=TOLEJHB)) |
class HoeffdingAdaptiveTreeClassifier(HoeffdingTreeClassifier):
_ERROR_WIDTH_THRESHOLD = 300
def __init__(self, max_byte_size=, memory_estimate_period=1000000, grace_period=200, split_criterion='info_gain', split_confidence=1e-07, tie_threshold=0.05, binary_split=False, stop_mem_management=False, remove_poor_atts=False, no_preprune=False, leaf_prediction='nba', nb_threshold=0, nominal_attributes=None, bootstrap_sampling=True, random_state=None):
super().__init__(max_byte_size=max_byte_size, memory_estimate_period=memory_estimate_period, grace_period=grace_period, split_criterion=split_criterion, split_confidence=split_confidence, tie_threshold=tie_threshold, binary_split=binary_split, stop_mem_management=stop_mem_management, remove_poor_atts=remove_poor_atts, no_preprune=no_preprune, leaf_prediction=leaf_prediction, nb_threshold=nb_threshold, nominal_attributes=nominal_attributes)
self.alternate_trees_cnt = 0
self.pruned_alternate_trees_cnt = 0
self.switch_alternate_trees_cnt = 0
self.bootstrap_sampling = bootstrap_sampling
self.random_state = random_state
self._tree_root = None
def reset(self):
self.alternate_trees_cnt = 0
self.pruned_alternate_trees_cnt = 0
self.switch_alternate_trees_cnt = 0
self._tree_root = None
def _partial_fit(self, X, y, sample_weight):
if (self._tree_root is None):
self._tree_root = self._new_learning_node()
self._active_leaf_node_cnt = 1
if isinstance(self._tree_root, InactiveLeaf):
self._tree_root.learn_one(X, y, weight=sample_weight, tree=self)
else:
self._tree_root.learn_one(X, y, sample_weight, self, None, (- 1))
def filter_instance_to_leaves(self, X, y, weight, split_parent, parent_branch, update_splitter_counts):
nodes = []
self._tree_root.filter_instance_to_leaves(X, y, weight, split_parent, parent_branch, update_splitter_counts, nodes)
return nodes
def _get_votes_for_instance(self, X):
result = {}
if (self._tree_root is not None):
if isinstance(self._tree_root, InactiveLeaf):
found_node = [self._tree_root.filter_instance_to_leaf(X, None, (- 1))]
else:
found_node = self.filter_instance_to_leaves(X, (- np.inf), (- np.inf), None, (- 1), False)
for fn in found_node:
if (fn.parent_branch != (- 999)):
leaf_node = fn.node
if (leaf_node is None):
leaf_node = fn.parent
dist = leaf_node.predict_one(X, tree=self)
result = add_dict_values(result, dist, inplace=True)
return result
def _new_learning_node(self, initial_class_observations=None, is_active=True):
if is_active:
return AdaLearningNode(initial_class_observations, self.random_state)
else:
return InactiveLearningNodeMC(initial_class_observations)
def _new_split_node(self, split_test, class_observations):
return AdaSplitNode(split_test, class_observations, self.random_state) |
def create_result_count(used_seed: int, dataset: Text, arch_config: Dict[(Text, Any)], results: Dict[(Text, Any)], dataloader_dict: Dict[(Text, Any)]) -> ResultsCount:
xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)
net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes': arch_config['class_num']}, None)
if ('train_times' in results):
xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])
xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])
else:
network = get_cell_based_tiny_net(net_config)
network.load_state_dict(xresult.get_net_param())
if (dataset == 'cifar10-valid'):
xresult.update_OLD_eval('x-valid', results['valid_acc1es'], results['valid_losses'])
(loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}{:}'.format('cifar10', 'test')], network.cuda())
xresult.update_OLD_eval('ori-test', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss})
xresult.update_latency(latencies)
elif (dataset == 'cifar10'):
xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])
(loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}{:}'.format(dataset, 'test')], network.cuda())
xresult.update_latency(latencies)
elif ((dataset == 'cifar100') or (dataset == 'ImageNet16-120')):
xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])
(loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}{:}'.format(dataset, 'valid')], network.cuda())
xresult.update_OLD_eval('x-valid', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss})
(loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}{:}'.format(dataset, 'test')], network.cuda())
xresult.update_OLD_eval('x-test', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss})
xresult.update_latency(latencies)
else:
raise ValueError('invalid dataset name : {:}'.format(dataset))
return xresult |
class LLama2Engine(CausalEngine):
config_name: str = 'llama2_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
super().__init__(model_name='daryl149/llama-2-7b-chat-hf', weights_path=weights_path, trust_remote_code=True)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id |
def retrieve_all(db_name):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute('SELECT * FROM bots order by created_at')
data = []
rows = c.fetchall()
for row in rows:
data.append(list(row))
return data |
.skipif((not cpp17), reason='ROOT was compiled without C++17 support')
.parametrize('flatlist_as_rvec', [False, True])
def test_ByteMaskedArray_NumpyArray(flatlist_as_rvec):
array = ak.contents.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], np.int8)), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True)
layout = array
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=flatlist_as_rvec)
lookup = ak._lookup.Lookup(layout, generator)
generator.generate(compiler)
array_out = generator.tolayout(lookup, 0, ())
assert (array.to_list() == array_out.to_list()) |
def test_extract_entities_from_sentence():
rt = ET.fromstring(SENTENCE_SAMPLE)
entities = extract_entities_from_sentence(rt)
assert (entities == EXPECTED_ENTITIES['1-p']['1.39-s'])
rt = ET.fromstring(EMPTY_SENTENCE)
entities = extract_entities_from_sentence(rt)
assert (entities == []) |
class AbstractDataset(ABC):
def __init__(self, config, subset, num_classes):
self.summaries = []
self.config = config
self.subset = subset
self.n_classes = num_classes
self.use_bbox_guidance = config.bool('use_bbox_guidance', False)
self.use_unsigned_distance_transform_guidance = config.bool('use_unsigned_distance_transform_guidance', False)
self.use_signed_distance_transform_guidance = config.bool('use_signed_distance_transform_guidance', False)
self.use_laser_guidance = config.bool('use_laser_guidance', False)
self.use_clicks_guidance = config.bool('use_clicks_guidance', False)
self.epoch_length_train = config.int('epoch_length_train', (- 1))
self.shuffle_buffer_size = config.int('shuffle_buffer_size', 5000)
self.use_summaries = self.config.bool('use_summaries', False)
def n_examples_per_epoch(self):
if ((self.subset == 'train') and (self.epoch_length_train != (- 1))):
return self.epoch_length_train
else:
return None
def create_input_tensors_dict(self, batch_size):
pass
def num_classes(self):
return self.n_classes
def load_example(self, input_filenames):
raw_example = self.load_raw_example(*input_filenames)
processed = self.process_raw_example(raw_example)
return processed
def process_raw_example(self, example):
example = self.postproc_example_initial(example)
example = self.augment_example_before_resize(example)
example = self.postproc_example_before_resize(example)
example = self.resize_example(example)
example = self.augment_example_after_resize(example)
example = self.postproc_example_before_assembly(example)
example = self.assemble_example(example)
return example
def load_raw_example(self, img_filename, label_filename=None, *args):
img_tensors = self.load_image(img_filename)
if (not isinstance(img_tensors, dict)):
img_tensors = {DataKeys.IMAGES: img_tensors}
label_tensors = self.load_annotation(img_tensors[DataKeys.IMAGES], img_filename, label_filename)
if (not isinstance(label_tensors, dict)):
label_tensors = {DataKeys.SEGMENTATION_LABELS: label_tensors}
for k in img_tensors.keys():
assert (k not in label_tensors.keys())
example = img_tensors
example.update(label_tensors)
return example
def load_image(self, img_filename):
img_data = tf.read_file(img_filename)
img = tf.image.decode_image(img_data, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img.set_shape((None, None, 3))
return img
def load_annotation(self, img, img_filename, annotation_filename):
ann_data = tf.read_file(annotation_filename)
ann = tf.image.decode_image(ann_data, channels=1)
ann.set_shape((img.get_shape().as_list()[:(- 1)] + [1]))
ann = self.postproc_annotation(annotation_filename, ann)
return ann
def postproc_annotation(self, ann_filename, ann):
return ann
def resize_example(self, tensors):
resize_mode_str = self.config.string(('resize_mode_' + self.subset), '')
if (resize_mode_str == ''):
print('Using resize_mode_train for', self.subset, ('since resize_mode_' + self.subset), 'not specified in the config', file=log.v1)
resize_mode_str = self.config.string('resize_mode_train')
size = self.config.int_list(('input_size_' + self.subset), [])
if (len(size) == 0):
size = self.config.int_list('input_size_train', [])
resize_mode = ResizeMode(resize_mode_str)
tensors = resize(tensors, resize_mode, size)
return tensors
def augment_example_before_resize(self, tensors):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors = aug.apply_before_resize(tensors)
return tensors
def augment_example_after_resize(self, tensors):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors = aug.apply_after_resize(tensors)
return tensors
def jointly_augment_examples_before_resize(self, tensors_batch):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors_batch = aug.batch_apply_before_resize(tensors_batch)
return tensors_batch
def jointly_augment_examples_after_resize(self, tensors_batch):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors_batch = aug.batch_apply_after_resize(tensors_batch)
return tensors_batch
def postproc_example_initial(self, tensors):
if ((DataKeys.IMAGES in tensors) and (DataKeys.RAW_IMAGES not in tensors)):
tensors[DataKeys.RAW_IMAGES] = tensors[DataKeys.IMAGES]
if ((DataKeys.IMAGES in tensors) and (DataKeys.RAW_IMAGE_SIZES not in tensors)):
tensors[DataKeys.RAW_IMAGE_SIZES] = tf.shape(tensors[DataKeys.IMAGES])[0:2]
if ((DataKeys.SEGMENTATION_LABELS in tensors) and (DataKeys.BBOXES_y0x0y1x1 not in tensors)):
print('deriving bboxes from segmentation masks', file=log.v5)
segmentation_labels = tensors[DataKeys.SEGMENTATION_LABELS]
bbox = get_bbox_from_segmentation_mask(segmentation_labels)
tensors[DataKeys.BBOXES_y0x0y1x1] = bbox
return tensors
def postproc_example_before_assembly(self, tensors):
tensors_postproc = tensors.copy()
tensors_postproc[DataKeys.IMAGES] = normalize(tensors[DataKeys.IMAGES])
if self.use_signed_distance_transform_guidance:
assert (DataKeys.BBOX_GUIDANCE in tensors)
bbox_guidance = tensors[DataKeys.BBOX_GUIDANCE]
sdt = signed_distance_transform(bbox_guidance)
tensors_postproc[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE] = sdt
if self.use_unsigned_distance_transform_guidance:
assert (DataKeys.BBOX_GUIDANCE in tensors)
bbox_guidance = tensors[DataKeys.BBOX_GUIDANCE]
udt = unsigned_distance_transform(bbox_guidance)
tensors_postproc[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE] = udt
return tensors_postproc
def postproc_example_before_resize(self, tensors):
tensors_postproc = tensors.copy()
if ((self.use_bbox_guidance or self.use_signed_distance_transform_guidance or self.use_unsigned_distance_transform_guidance) and (DataKeys.BBOXES_y0x0y1x1 in tensors) and (DataKeys.BBOX_GUIDANCE not in tensors)):
bbox = tensors[DataKeys.BBOXES_y0x0y1x1]
img = tensors[DataKeys.IMAGES]
bbox_guidance = encode_bbox_as_mask(bbox, tf.shape(img))
tensors_postproc[DataKeys.BBOX_GUIDANCE] = bbox_guidance
return tensors_postproc
def assemble_example(self, tensors):
tensors_assembled = tensors.copy()
inputs_to_concat = [tensors[DataKeys.IMAGES]]
if (self.use_bbox_guidance and (DataKeys.BBOX_GUIDANCE in tensors)):
print('using bbox guidance', file=log.v5)
bbox_guidance = tf.cast(tensors[DataKeys.BBOX_GUIDANCE], tf.float32)
inputs_to_concat.append(bbox_guidance)
if (self.use_signed_distance_transform_guidance and (DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE in tensors)):
print('using signed distance transform guidance')
assert (not self.use_bbox_guidance), "we probably don't want to use both bbox and sdt guidance at the same time"
sdt_guidance = tensors[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE]
inputs_to_concat.append(sdt_guidance)
if (self.use_unsigned_distance_transform_guidance and (DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE in tensors)):
print('using unsigned distance transform guidance')
assert (not self.use_bbox_guidance), "we probably don't want to use both bbox and udt guidance at the same time"
udt_guidance = tensors[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE]
inputs_to_concat.append(udt_guidance)
if (self.use_laser_guidance and (DataKeys.LASER_GUIDANCE in tensors)):
print('using laser guidance', file=log.v5)
laser_guidance = tf.cast(tensors[DataKeys.LASER_GUIDANCE], tf.float32)
inputs_to_concat.append(laser_guidance)
if self.use_clicks_guidance:
print('using guidance from clicks')
neg_dist_transform = tensors[DataKeys.NEG_CLICKS]
pos_dist_transform = tensors[DataKeys.POS_CLICKS]
inputs_to_concat.append(neg_dist_transform)
inputs_to_concat.append(pos_dist_transform)
if (len(inputs_to_concat) > 1):
inputs = tf.concat(inputs_to_concat, axis=(- 1))
else:
inputs = inputs_to_concat[0]
tensors_assembled[DataKeys.INPUTS] = inputs
return tensors_assembled
def create_summaries(self, data):
if (DataKeys.IMAGES in data):
self.summaries.append(tf.summary.image((self.subset + 'data/images'), unnormalize(data[DataKeys.IMAGES])))
if (DataKeys.SEGMENTATION_LABELS in data):
self.summaries.append(tf.summary.image((self.subset + 'data/ground truth segmentation labels'), tf.cast(data[DataKeys.SEGMENTATION_LABELS], tf.float32)))
if (DataKeys.BBOX_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/bbox guidance'), tf.cast(data[DataKeys.BBOX_GUIDANCE], tf.float32)))
if (DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/signed_distance_transform_guidance'), data[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE]))
if (DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/unsigned_distance_transform_guidance'), data[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE]))
if (DataKeys.LASER_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/laser guidance'), tf.cast(data[DataKeys.LASER_GUIDANCE], tf.float32))) |
def main():
max_cpu = mp.cpu_count()
app_path = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='BLASYS -- Approximate Logic Synthesis Using Boolean Matrix Factorization')
parser.add_argument('-i', '--input', help='Input verilog file', required=True, dest='input')
parser.add_argument('-tb', '--testbench', help='Number of test vectors', required=True, dest='testbench')
parser.add_argument('-n', '--number', help='Number of partitions', default=None, type=int, dest='npart')
parser.add_argument('-o', '--output', help='Output directory', default=None, dest='output')
parser.add_argument('-ts', '--threshold', help='Threshold on error', default='None', dest='threshold')
parser.add_argument('-lib', '--liberty', help='Liberty file name', default=None, dest='liberty')
parser.add_argument('-ss', '--stepsize', help='Step size of optimization process', default=1, type=int, dest='stepsize')
parser.add_argument('-m', '--metric', help='Choose error metric', dest='metric', default='HD')
parser.add_argument('-tr', '--track', help='Number of tracks in greedy search', dest='track', type=int, default=3)
parser.add_argument('-cpu', '--cpu_count', help='Specify number of CPU in parallel mode', dest='cpu', type=int, default=(- 1))
parser.add_argument('--parallel', help='Run the flow in parallel mode if specified', dest='parallel', action='store_true')
parser.add_argument('--no_partition', help='Factorize without partition', dest='single', action='store_true')
parser.add_argument('--sta', help='Use OpenSTA to estimate power and delay', dest='sta', action='store_true')
parser.add_argument('--fast_random', help='Accelerate by randomly picking subcircuits to approximate', dest='rand', action='store_true')
parser.add_argument('--fast_deter', help='Accelerate by picking certain subcircuits to approximate', dest='deter', action='store_true')
args = parser.parse_args()
if ((args.parallel == True) and (args.cpu == (- 1))):
args.cpu = max_cpu
if (args.cpu != (- 1)):
args.parallel = True
accelerate = 0
if (args.rand == True):
accelerate = 1
elif (args.deter == True):
accelerate = 2
print_banner()
with open(os.path.join(app_path, 'config', 'params.yml'), 'r') as config_file:
config = yaml.safe_load(config_file)
config['part_config'] = os.path.join(app_path, 'config', 'test.ini')
if (args.threshold == 'None'):
threshold_list = [np.inf]
else:
threshold_list = list(map(float, args.threshold.split(',')))
worker = GreedyWorker(args.input, args.liberty, config, args.testbench, args.metric, args.sta)
worker.create_output_dir(args.output)
worker.evaluate_initial()
if (args.single is not True):
worker.convert2aig()
if (args.npart is None):
worker.recursive_partitioning()
else:
worker.recursive_partitioning(args.npart)
worker.greedy_opt(args.parallel, args.cpu, args.stepsize, threshold_list, track=args.track, accel=accelerate)
else:
worker.blasys() |
class param():
def __init__(self, config):
self.dataset_dir = (get_tc_path() + config['pre_dataset_dir'])
self.parametrized_dir = (get_tc_path() + config['pre_output_dir'])
self.output_dir = os.path.join(get_tc_path(), config['co_experiment_dir'], config['co_output_dir'])
dataset_type = config['pre_dataset_param']
if (dataset_type == 'stanford'):
self.d_par = stanford_params()
elif (dataset_type == 'scannet'):
self.d_par = scannet_params()
elif (dataset_type == 'semantic3d'):
self.d_par = semantic3d_params() |
def extract_seconds(input_file, output_file):
with open(input_file, 'r') as f:
lines = f.readlines()
log_created_year = get_log_created_year(input_file)
start_datetime = get_start_time(lines, log_created_year)
assert start_datetime, 'Start time not found'
out = open(output_file, 'w')
for line in lines:
line = line.strip()
if (line.find('Iteration') != (- 1)):
dt = extract_datetime_from_line(line, log_created_year)
elapsed_seconds = (dt - start_datetime).total_seconds()
out.write(('%f\n' % elapsed_seconds))
out.close() |
def error(message: str) -> None:
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
_cashocs_logger.error(message)
fenics.MPI.barrier(fenics.MPI.comm_world) |
class _SigmoidFocalLoss(Function):
def forward(ctx, logits, targets, gamma, alpha):
ctx.save_for_backward(logits, targets)
num_classes = logits.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
losses = _C.sigmoid_focalloss_forward(logits, targets, num_classes, gamma, alpha)
return losses
_differentiable
def backward(ctx, d_loss):
(logits, targets) = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_logits = _C.sigmoid_focalloss_backward(logits, targets, d_loss, num_classes, gamma, alpha)
return (d_logits, None, None, None, None) |
def get_args():
parser = argparse.ArgumentParser(description='Convert to tfrecords from numpy - Geofacies', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset_path_input', type=str, required=True, default='', help='dataset path (numpy)')
parser.add_argument('--dataset_path_output', type=str, required=True, default='', help='dataset path (tfrecorfs)')
parser.add_argument('--split', action='store_true', help='train-test split')
parser.add_argument('--test_size', type=float, default=0.3, help='test size of split')
parser.add_argument('--random_state', type=int, default=0, help='random seed')
parser.add_argument('--model', type=str, default='cvae', help="model architecture ('cvae','cvae_style','CycleGAN')")
args = parser.parse_args()
return args |
def optimizer_kwargs(cfg):
kwargs = dict(opt=cfg.opt, lr=cfg.lr, weight_decay=cfg.weight_decay, momentum=cfg.momentum)
if (getattr(cfg, 'opt_eps', None) is not None):
kwargs['eps'] = cfg.opt_eps
if (getattr(cfg, 'opt_betas', None) is not None):
kwargs['betas'] = cfg.opt_betas
if (getattr(cfg, 'layer_decay', None) is not None):
kwargs['layer_decay'] = cfg.layer_decay
if (getattr(cfg, 'opt_args', None) is not None):
kwargs.update(cfg.opt_args)
return kwargs |
def sigmoid_cross_entropy_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
x1 = inputs[1]
s0 = F.sigmoid(x0)
dx0 = (dy * (s0 - x1))
return (dx0, None) |
def sum_interaction_coefficients(n):
N = (np.arange((n - 1)) + 2)
coeff_dict = {}
for K in powerset(N):
coeff = 0
for S in powerset(K):
if (len(S) == 0):
continue
num = int(np.math.pow((- 1), (len(S) + 1)))
denom = (((len(K) - len(S)) + 1) * np.math.factorial((len(S) + 1)))
coeff += Fraction(num, denom)
coeff_dict[tuple((S + (1,)))] = coeff
return coeff_dict |
.parametrize('max_timestep', [3])
.parametrize('embed_dim', [256])
.parametrize('context_size', [10])
.parametrize('batch_size', [32])
def test_global_position_encoding(max_timestep: int, embed_dim: int, context_size: int, batch_size: int) -> None:
model = GlobalPositionEncoding(embed_dim, max_timestep, context_size)
x = torch.randint(low=0, high=max_timestep, size=(batch_size, (3 * context_size)))
y = model(x)
assert (y.shape == (batch_size, (3 * context_size), embed_dim)) |
def rescale_l8(img: ee.Image) -> ee.Image:
opt = img.select(['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2'])
therm = img.select('TEMP1')
qa = img.select('pixel_qa')
opt = opt.updateMask(opt.gte(0)).clamp(0, 10000)
opt = opt.multiply(0.0001)
therm = therm.multiply(0.1)
scaled = ee.Image.cat([opt, therm, qa]).copyProperties(img)
scaled = scaled.set('system:time_start', img.get('system:time_start'))
return scaled |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.