code stringlengths 101 5.91M |
|---|
def load_satimage():
data_home = get_data_home()
train_file = os.path.join(data_home, 'satimage.scale.tr')
test_file = os.path.join(data_home, 'satimage.scale.t')
return _todense(_load(train_file, test_file, 'satimage')) |
class LeeBrickellISDAlgorithm(InformationSetAlgorithm):
def __init__(self, code, decoding_interval, search_size=None):
if (search_size is not None):
if ((not isinstance(search_size, (Integer, int))) or (search_size < 0)):
raise ValueError('The search size parameter has to be a positive integer')
if (search_size > decoding_interval[1]):
raise ValueError('The search size parameter has to be at most the maximal number of allowed errors')
super().__init__(code, decoding_interval, 'Lee-Brickell', parameters={'search_size': search_size})
self._parameters_specified = True
else:
self._parameters_specified = False
super().__init__(code, decoding_interval, 'Lee-Brickell')
def decode(self, r):
import itertools
from sage.misc.prandom import sample
C = self.code()
(n, k) = (C.length(), C.dimension())
tau = self.decoding_interval()
p = self.parameters()['search_size']
F = C.base_ring()
G = C.generator_matrix()
Fstar = F.list()[1:]
while True:
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
continue
Gt = (Gi_inv * G)
y = (r - (vector([r[i] for i in I]) * Gt))
g = Gt.rows()
for pi in range((p + 1)):
for A in itertools.combinations(range(k), pi):
for m in itertools.product(Fstar, repeat=pi):
e = (y - sum(((m[i] * g[A[i]]) for i in range(pi))))
errs = e.hamming_weight()
if (tau[0] <= errs <= tau[1]):
return (r - e)
def calibrate(self):
from sage.matrix.special import random_matrix
from sage.misc.prandom import sample, randint
from sage.modules.free_module_element import random_vector
from time import process_time
C = self.code()
G = C.generator_matrix()
(n, k) = (C.length(), C.dimension())
tau = self.decoding_interval()[1]
F = C.base_ring()
q = F.cardinality()
Fstar = F.list()[1:]
def time_information_set_steps():
before = process_time()
while True:
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
continue
return (process_time() - before)
def time_search_loop(p):
y = random_vector(F, n)
g = random_matrix(F, p, n).rows()
scalars = [[Fstar[randint(0, (q - 2))] for i in range(p)] for s in range(100)]
before = process_time()
for m in scalars:
e = (y - sum(((m[i] * g[i]) for i in range(p))))
return ((process_time() - before) / 100.0)
T = (sum([time_information_set_steps() for s in range(5)]) / 5.0)
P = [time_search_loop(p) for p in range((tau + 1))]
def compute_estimate(p):
iters = ((1.0 * binomial(n, k)) / sum(((binomial((n - tau), (k - i)) * binomial(tau, i)) for i in range((p + 1)))))
estimate = (iters * (T + sum((((P[pi] * ((q - 1) ** pi)) * binomial(k, pi)) for pi in range((p + 1))))))
return estimate
if self._parameters_specified:
self._time_estimate = compute_estimate(self._parameters['search_size'])
else:
self._calibrate_select([compute_estimate(p) for p in range((tau + 1))])
def _calibrate_select(self, estimates):
search_size = 0
for p in range(1, len(estimates)):
if (estimates[p] < estimates[search_size]):
search_size = p
self._parameters = {'search_size': search_size}
self._time_estimate = estimates[search_size] |
class GAEAEvalTrial(PyTorchTrial):
def __init__(self, context: PyTorchTrialContext) -> None:
self.context = context
self.hparams = AttrDict(context.get_hparams())
self.data_config = context.get_data_config()
self.criterion = nn.CrossEntropyLoss()
self.download_directory = self.download_data_from_s3()
self.last_epoch_idx = (- 1)
self.model = self.context.wrap_model(self.build_model_from_config())
print(('param size = %f MB' % utils.count_parameters_in_MB(self.model)))
self.optimizer = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.context.get_hparam('learning_rate'), momentum=self.context.get_hparam('momentum'), weight_decay=self.context.get_hparam('weight_decay')))
self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.optimizer, 150.0, 0), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH)
def build_model_from_config(self):
if self.context.get_hparam('permute'):
genotype = genotypes['cifar100_permuted']
else:
genotype = genotypes[self.context.get_hparam('task')]
print(self.context.get_hparam('task'))
print(genotype)
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3)}
(n_classes, in_channels) = dataset_hypers[self.context.get_hparam('task')]
model = Network(self.context.get_hparam('init_channels'), n_classes, self.context.get_hparam('layers'), genotype, in_channels=in_channels, drop_path_prob=self.context.get_hparam('drop_path_prob'))
return model
def get_genotype_from_hps(self):
cell_config = {'normal': [], 'reduce': []}
for cell in ['normal', 'reduce']:
for node in range(4):
for edge in [1, 2]:
edge_ind = self.hparams['{}_node{}_edge{}'.format(cell, (node + 1), edge)]
edge_op = self.hparams['{}_node{}_edge{}_op'.format(cell, (node + 1), edge)]
cell_config[cell].append((edge_op, edge_ind))
print(cell_config)
return Genotype(normal=cell_config['normal'], normal_concat=range(2, 6), reduce=cell_config['reduce'], reduce_concat=range(2, 6))
def download_data_from_s3(self):
s3_bucket = self.context.get_data_config()['bucket']
download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}'
s3 = boto3.client('s3')
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.context.get_hparam('task'), download_directory)
(self.train_data, _, self.val_data) = load_data(self.context.get_hparam('task'), download_directory, False, permute=self.context.get_hparam('permute'))
return download_directory
def build_training_data_loader(self) -> DataLoader:
train_data = self.train_data
train_queue = DataLoader(train_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, pin_memory=True, num_workers=self.data_config['num_workers_train'])
return train_queue
def build_validation_data_loader(self) -> DataLoader:
valid_data = self.val_data
valid_queue = DataLoader(valid_data, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, pin_memory=True, num_workers=self.data_config['num_workers_val'])
return valid_queue
def train_batch(self, batch: Any, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
if ((batch_idx == 0) or (self.last_epoch_idx < epoch_idx)):
current_lr = self.lr_scheduler.get_last_lr()[0]
self.model.drop_path_prob = ((self.context.get_hparam('drop_path_prob') * epoch_idx) / 150.0)
self.last_epoch_idx = epoch_idx
(input, target) = batch
logits = self.model(input)
loss = self.criterion(logits, target)
(top1, top5) = accuracy(logits, target, topk=(1, 5))
self.context.backward(loss)
self.context.step_optimizer(self.optimizer, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm'))))
return {'loss': loss, 'top1_accuracy': top1, 'top5_accuracy': top5}
'\n def evaluate_batch(self, batch: Any) -> Dict[str, Any]:\n input, target = batch\n logits = self.model(input)\n loss = self.criterion(logits, target)\n top1, top5 = accuracy(logits, target, topk=(1, 5))\n\n\n return {\n "loss": loss,\n "top1_accuracy": top1,\n "top5_accuracy": top5,\n\n }\n '
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
acc_top1 = utils.AverageMeter()
acc_top5 = utils.AverageMeter()
loss_avg = utils.AverageMeter()
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
(top1, top5) = utils.accuracy(logits, target, topk=(1, 5))
acc_top1.update(top1.item(), n)
acc_top5.update(top5.item(), n)
loss_avg.update(loss, n)
results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg}
return results |
def attach_dependencies(doc, response):
if (len(doc.sentences) != len(response.conversions)):
raise ValueError(('Sent %d sentences but got back %d conversions' % (len(doc.sentences), len(response.conversions))))
for (sent_idx, (sentence, conversion)) in enumerate(zip(doc.sentences, response.conversions)):
graph = conversion.graph
if (len(sentence.words) != len(graph.node)):
raise ValueError(('Sentence %d of the conversion should have %d words but got back %d nodes in the graph' % (sent_idx, len(sentence.words), len(graph.node))))
if (len(sentence.words) != (len(graph.edge) + 1)):
raise ValueError(('Sentence %d of the conversion should have %d edges (one per word, plus the root) but got back %d edges in the graph' % (sent_idx, (len(sentence.words) - 1), len(graph.edge))))
expected_nodes = set(range(1, (len(sentence.words) + 1)))
targets = set()
for edge in graph.edge:
if (edge.target in targets):
raise ValueError(('Found two parents of %d in sentence %d' % (edge.target, sent_idx)))
targets.add(edge.target)
sentence.words[(edge.target - 1)].head = edge.source
sentence.words[(edge.target - 1)].deprel = edge.dep
roots = (expected_nodes - targets)
assert (len(roots) == 1)
for root in roots:
sentence.words[(root - 1)].head = 0
sentence.words[(root - 1)].deprel = 'root'
sentence.build_dependencies() |
class SpeechCommandsDataset(Dataset):
def __init__(self, folder, transform=None, train=True, classes=CLASSES):
all_classes = [d for d in os.listdir(folder) if (os.path.isdir(os.path.join(folder, d)) and (not d.startswith('_')))]
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.data = []
self.targets = []
for c in CLASSES:
d = os.path.join(folder, c)
target = class_to_idx[c]
file_names = sorted(os.listdir(d))
for (i, f) in enumerate(file_names):
path = os.path.join(d, f)
self.data.append(path)
self.targets.append(target)
self.classes = classes
self.transform = transform
self.nclass = len(all_classes)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
data = self.data[index]
target = self.targets[index]
if (self.transform is not None):
data = self.transform(data)
return (data, target) |
def load_banana():
data_home = get_data_home()
train_file = os.path.join(data_home, 'banana', 'banana.all.txt')
return _todense(_load(train_file, None, 'banana')) |
class MemoryChunkArguments(MemoryChunkLonglivedArray):
def setup_args(self):
return je(ri(0, "\n cdef {{ myself.storage_type.c_ptr_type() }} c_args = self._args\n cdef int i\n for i from 0 <= i < len(args):\n {{ myself.storage_type.assign_c_from_py('self._args[i]', 'args[i]') | i(4) }}\n "), myself=self)
def pass_argument(self):
return 'c_args' |
def _anthropic_create_retry_decorator(llm: ChatOpenAI) -> Callable[([Any], Any)]:
import anthropic
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=((((((retry_if_exception_type(anthropic.APITimeoutError) | retry_if_exception_type(anthropic.APIError)) | retry_if_exception_type(anthropic.APIConnectionError)) | retry_if_exception_type(anthropic.RateLimitError)) | retry_if_exception_type(anthropic.APIConnectionError)) | retry_if_exception_type(anthropic.APIStatusError)) | retry_if_exception_type(anthropic.InternalServerError)), before_sleep=before_sleep_log(logger, logging.WARNING)) |
def mk_lean_auto_spec_name(fn_name: str, namespaces: List[ScopedName]):
prefix = 'auto_spec_'
return get_name_in_open_scopes(ScopedName.from_string(fn_name), namespaces, prefix) |
def all_gather(data, group=None):
if (get_world_size() == 1):
return [data]
if (group is None):
group = _get_global_gloo_group()
if (dist.get_world_size(group) == 1):
return [data]
tensor = _serialize_to_tensor(data, group)
(size_list, tensor) = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list |
def compile(source, options=None, full_module_name=None, **kwds):
options = CompilationOptions(defaults=options, **kwds)
if (isinstance(source, basestring) and (not options.timestamps)):
return compile_single(source, options, full_module_name)
else:
return compile_multiple(source, options) |
(frozen=True)
class Reference():
output: Output
tags: List[str]
def is_correct(self) -> bool:
return (CORRECT_TAG in self.tags)
def render_lines(self) -> List[str]:
return [f'reference {format_tags(self.tags)}: {format_text(self.output.text)}'] |
def example():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
for _ in range(50):
(random_intervention_dict, success_signal, obs) = env.do_single_random_intervention()
print('The random intervention performed is ', random_intervention_dict)
for i in range(100):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.close() |
def get_context():
c = NS_context()
c.curl = Array(c.T)
c.W_hat = Function(c.T)
return c |
def run_experiment(argv):
default_log_dir = config.LOG_DIR
now = datetime.datetime.now(dateutil.tz.tzlocal())
rand_id = str(uuid.uuid4())[:5]
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z')
default_exp_name = ('experiment_%s_%s' % (timestamp, rand_id))
parser = argparse.ArgumentParser()
parser.add_argument('--n_parallel', type=int, default=1, help="Number of parallel workers to perform rollouts. 0 => don't start any workers")
parser.add_argument('--exp_name', type=str, default=default_exp_name, help='Name of the experiment.')
parser.add_argument('--log_dir', type=str, default=None, help='Path to save the log and iteration snapshot.')
parser.add_argument('--snapshot_mode', type=str, default='all', help='Mode to save the snapshot. Can be either "all" (all iterations will be saved), "last" (only the last iteration will be saved), "gap" (every`snapshot_gap` iterations are saved), or "none" (do not save snapshots)')
parser.add_argument('--snapshot_gap', type=int, default=1, help='Gap between snapshot iterations.')
parser.add_argument('--tabular_log_file', type=str, default='progress.csv', help='Name of the tabular log file (in csv).')
parser.add_argument('--text_log_file', type=str, default='debug.log', help='Name of the text log file (in pure text).')
parser.add_argument('--tensorboard_log_dir', type=str, default='tb', help='Name of the folder for tensorboard_summary.')
parser.add_argument('--tensorboard_step_key', type=str, default=None, help='Name of the step key in log data which shows the step in tensorboard_summary.')
parser.add_argument('--params_log_file', type=str, default='params.json', help='Name of the parameter log file (in json).')
parser.add_argument('--variant_log_file', type=str, default='variant.json', help='Name of the variant log file (in json).')
parser.add_argument('--resume_from', type=str, default=None, help='Name of the pickle file to resume experiment from.')
parser.add_argument('--plot', type=ast.literal_eval, default=False, help='Whether to plot the iteration results')
parser.add_argument('--log_tabular_only', type=ast.literal_eval, default=False, help='Whether to only print the tabular log information (in a horizontal format)')
parser.add_argument('--seed', type=int, help='Random seed for numpy')
parser.add_argument('--args_data', type=str, help='Pickled data for stub objects')
parser.add_argument('--variant_data', type=str, help='Pickled data for variant configuration')
parser.add_argument('--use_cloudpickle', type=ast.literal_eval, default=False)
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint', help='Name of the folder for checkpoints.')
parser.add_argument('--obs_dir', type=str, default='obs', help='Name of the folder for original observations.')
args = parser.parse_args(argv[1:])
if (args.seed is not None):
set_seed(args.seed)
if (args.n_parallel > 0):
from rllab.sampler import parallel_sampler
parallel_sampler.initialize(n_parallel=args.n_parallel)
if (args.seed is not None):
parallel_sampler.set_seed(args.seed)
if args.plot:
from rllab.plotter import plotter
plotter.init_worker()
if (args.log_dir is None):
log_dir = osp.join(default_log_dir, args.exp_name)
else:
log_dir = args.log_dir
tabular_log_file = osp.join(log_dir, args.tabular_log_file)
text_log_file = osp.join(log_dir, args.text_log_file)
params_log_file = osp.join(log_dir, args.params_log_file)
tensorboard_log_dir = osp.join(log_dir, args.tensorboard_log_dir)
checkpoint_dir = osp.join(log_dir, args.checkpoint_dir)
obs_dir = osp.join(log_dir, args.obs_dir)
if (args.variant_data is not None):
variant_data = pickle.loads(base64.b64decode(args.variant_data))
variant_log_file = osp.join(log_dir, args.variant_log_file)
logger.log_variant(variant_log_file, variant_data)
else:
variant_data = None
if (not args.use_cloudpickle):
logger.log_parameters_lite(params_log_file, args)
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.set_tensorboard_dir(tensorboard_log_dir)
logger.set_checkpoint_dir(checkpoint_dir)
logger.set_obs_dir(obs_dir)
prev_snapshot_dir = logger.get_snapshot_dir()
prev_mode = logger.get_snapshot_mode()
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.set_log_tabular_only(args.log_tabular_only)
logger.set_tensorboard_step_key(args.tensorboard_step_key)
logger.push_prefix(('[%s] ' % args.exp_name))
git_commit = get_git_commit_hash()
logger.log('Git commit: {}'.format(git_commit))
git_diff_file_path = osp.join(log_dir, 'git_diff_{}.patch'.format(git_commit))
save_git_diff_to_file(git_diff_file_path)
logger.log('hostname: {}, pid: {}, tmux session: {}'.format(socket.gethostname(), os.getpid(), get_tmux_session_name()))
if (args.resume_from is not None):
data = joblib.load(args.resume_from)
assert ('algo' in data)
algo = data['algo']
algo.train()
elif args.use_cloudpickle:
import cloudpickle
method_call = cloudpickle.loads(base64.b64decode(args.args_data))
method_call(variant_data)
else:
data = pickle.loads(base64.b64decode(args.args_data))
maybe_iter = concretize(data)
if is_iterable(maybe_iter):
for _ in maybe_iter:
pass
logger.set_snapshot_mode(prev_mode)
logger.set_snapshot_dir(prev_snapshot_dir)
logger.remove_tabular_output(tabular_log_file)
logger.remove_text_output(text_log_file)
logger.pop_prefix() |
class ShiftCipher(SymmetricKeyCipher):
def __init__(self, parent, key):
SymmetricKeyCipher.__init__(self, parent, key)
def __eq__(self, other):
return ((type(self) is type(other)) and (self.parent() == other.parent()) and (self.key() == other.key()))
def __call__(self, M):
dom = self.domain()
if ((not isinstance(M, StringMonoidElement)) and (M.parent() == dom)):
raise TypeError(('Argument M (= %s) must be a string in the plaintext/ciphertext space.' % M))
from sage.rings.finite_rings.integer_mod import Mod
A = list(dom.alphabet())
N = self.domain().ngens()
K = self.key()
I = [A.index(str(e)) for e in M]
return dom([A.index(A[Mod((i + K), N).lift()]) for i in I])
def _repr_(self):
return ('Shift cipher on %s' % self.parent().cipher_domain()) |
class CFGDenoiser(nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale):
cfg_z = einops.repeat(z, '1 ... -> n ...', n=3)
cfg_sigma = einops.repeat(sigma, '1 ... -> n ...', n=3)
cfg_cond = {'c_crossattn': [torch.cat([cond['c_crossattn'][0], uncond['c_crossattn'][0], uncond['c_crossattn'][0]])], 'c_concat': [torch.cat([cond['c_concat'][0], cond['c_concat'][0], uncond['c_concat'][0]])]}
(out_cond, out_img_cond, out_uncond) = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3)
return ((out_uncond + (text_cfg_scale * (out_cond - out_img_cond))) + (image_cfg_scale * (out_img_cond - out_uncond))) |
def run_with_reloader(*args, **kwargs):
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs) |
def get_args(**kwargs):
args = defaults
args = update_args(args, kwargs)
args = process_paths(args)
args = objectify(args)
args.computation.device = 'cpu'
if args.computation.use_gpu:
if (torch.cuda.device_count() > 0):
print('using gpu')
args.computation.device = 'cuda'
else:
print('no gpu available, defaulting to cpu')
if (args.computation.num_gpus is None):
args.computation.num_gpus = sys.maxsize
args.computation.num_gpus = min(args.computation.num_gpus, torch.cuda.device_count())
print('args:')
print(args)
return args |
def convert_kb_vocab(data_dir, cutoff=2):
kb_vocab_file = os.path.join(data_dir, 'celeb_vocab_stats.pkl')
original_vocab_file = os.path.join(data_dir, 'vocab.pkl')
new_vocab_file = os.path.join(data_dir, 'vocab_with_celeb.pkl')
word_counter = pkl.load(open(kb_vocab_file, 'r'))
original_vocab_dict = pkl.load(open(original_vocab_file, 'r'))
vocab_count = [x for x in word_counter.most_common() if (x[1] >= cutoff)]
vocab_dict = original_vocab_dict[0]
i = len(vocab_dict)
print('Original vocab dict size {}'.format(i))
for (word, count) in vocab_count:
if (not (word in vocab_dict)):
vocab_dict[word] = i
i += 1
inverted_vocab_dict = {word_id: word for (word, word_id) in vocab_dict.iteritems()}
both_dict = [vocab_dict, inverted_vocab_dict]
with open(new_vocab_file, 'wb') as f:
pkl.dump(both_dict, f, protocol=pkl.HIGHEST_PROTOCOL)
print('New vocab dict size {}'.format(len(vocab_dict))) |
def rouge_score(gold: str, pred: str, rouge_type: str, scorer: rouge_scorer.RougeScorer) -> float:
scores = scorer.score(gold, pred)
return scores[rouge_type].fmeasure |
class SawyerCoffeePullEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.7, (- 0.001))
obj_high = (0.05, 0.75, (+ 0.001))
goal_low = ((- 0.1), 0.55, (- 0.001))
goal_high = (0.1, 0.65, (+ 0.001))
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.75, 0.0]), 'obj_init_angle': 0.3, 'hand_init_pos': np.array([0.0, 0.4, 0.2])}
self.goal = np.array([0.0, 0.6, 0])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_coffee.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.07))}
return (ob, reward, False, info)
def _target_site_config(self):
return [('mug_goal', self._target_pos)]
def _get_pos_objects(self):
return self.get_body_com('obj')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flatten()
qvel = self.data.qvel.flatten()
qpos[0:3] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
pos_mug_init = self.init_config['obj_init_pos']
pos_mug_goal = self.goal
if self.random_init:
(pos_mug_init, pos_mug_goal) = np.split(self._get_state_rand_vec(), 2)
while (np.linalg.norm((pos_mug_init[:2] - pos_mug_goal[:2])) < 0.15):
(pos_mug_init, pos_mug_goal) = np.split(self._get_state_rand_vec(), 2)
self._set_obj_xyz(pos_mug_init)
self.obj_init_pos = pos_mug_init
pos_machine = (pos_mug_init + np.array([0.0, 0.22, 0.0]))
self.sim.model.body_pos[self.model.body_name2id('coffee_machine')] = pos_machine
self._target_pos = pos_mug_goal
self.maxPullDist = np.linalg.norm((pos_mug_init[:2] - pos_mug_goal[:2]))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.reachCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
goal = self._target_pos
c1 = 1000
c2 = 0.01
c3 = 0.001
assert np.all((goal == self._get_site_pos('mug_goal')))
reachDist = np.linalg.norm((fingerCOM - objPos))
pullDist = np.linalg.norm((objPos[:2] - goal[:2]))
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((np.concatenate((objPos[:(- 1)], [self.init_fingerCOM[(- 1)]])) - fingerCOM))
if (reachDistxy < 0.05):
reachRew = ((- reachDist) + 0.1)
if (reachDist < 0.05):
reachRew += (max(actions[(- 1)], 0) / 50)
else:
reachRew = (- reachDistxy)
if (reachDist < 0.05):
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
class TuneEvaluatorHoldout(Evaluator):
kind = 'tune_eval_holdout'
def __init__(self, train, test, target, per=None, lossf='rmse', context={}):
super().__init__(context=context)
self.train = load_dataset(train)
self.test = load_dataset(test)
self.lossf = get_lossf(lossf)
self.target = target
self.per = per
def _get_config(self):
return {'train': self.train.name, 'test': self.test.name, 'lossf': self.lossf.__name__, 'per': self.per, 'target': self.target}
def evaluate(self, model):
model.train(self.train, target=self.target)
pred = model.predict(self.test, per=self.per)
return {'loss': self.lossf(self.test.pp(self.target, per=self.per), pred)} |
class TestMetrics(object):
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), (6, 6, 6)), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), (4, 2, 2)), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]]), (4, 2, 2))])
def test__nonzero_intersection(self, m, m_hat, expected):
result = metrics._nonzero_intersection(m, m_hat)
print(result)
assert (result == expected)
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), 0), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), 0), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]]), 1)])
def test_support_false_positive_count(self, m, m_hat, expected):
result = metrics.support_false_positive_count(m, m_hat)
print(result)
assert (result == expected)
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), 0), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), 1), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), 0)])
def test_support_false_negative_count(self, m, m_hat, expected):
result = metrics.support_false_negative_count(m, m_hat)
print(result)
assert (result == expected)
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), 0), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), 1), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]]), 2)])
def test_support_difference_count(self, m, m_hat, expected):
result = metrics.support_difference_count(m, m_hat)
print(result)
assert (result == expected)
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), 1), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), 0), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]]), 0)])
def test_has_exact_support(self, m, m_hat, expected):
result = metrics.has_exact_support(m, m_hat)
print(result)
assert (result == expected)
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), 1), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), 1), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]]), 0)])
def test_has_approx_support(self, m, m_hat, expected):
result = metrics.has_approx_support(m, m_hat, 0.5)
print(m, m_hat, result)
assert (result == expected)
.parametrize('m, m_hat, expected', [(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), 0), (np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]), np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]), 3.0), (np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]), np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]]), 3.)])
def test_error_fro(self, m, m_hat, expected):
result = metrics.error_fro(m, m_hat)
print(m, m_hat, result)
np.testing.assert_array_almost_equal(result, expected) |
def wer(reference, hypothesis, ignore_case=False, delimiter=' '):
(edit_distance, ref_len) = word_errors(reference, hypothesis, ignore_case, delimiter)
if (ref_len == 0):
raise ValueError("Reference's word number should be greater than 0.")
wer = (float(edit_distance) / ref_len)
return wer |
def main(args):
dummy_batch_size = args.max_tokens
if (args.max_tokens is None):
args.max_tokens = 4096
dummy_batch_size = 1024
print(args)
if (not torch.cuda.is_available()):
raise NotImplementedError('Training on CPU is not supported')
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
task = tasks.setup_task(args)
load_dataset_splits(task, (['train'] + args.valid_subset.split(',')))
model = task.build_model(args)
criterion = task.build_criterion(args)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {}'.format(sum((p.numel() for p in model.parameters()))))
max_positions = utils.resolve_max_positions(task.max_positions(), model.max_positions())
dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions)
trainer = Trainer(args, task, model, criterion, dummy_batch)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences))
epoch_itr = task.get_batch_iterator(dataset=task.dataset(args.train_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ignore_invalid_inputs=True, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank)
if (not load_checkpoint(args, trainer, epoch_itr)):
trainer.dummy_train_step([dummy_batch])
max_epoch = (args.max_epoch or math.inf)
max_update = (args.max_update or math.inf)
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
while ((lr > args.min_lr) and (trainer.get_num_updates() < max_update)):
train(args, trainer, task, epoch_itr)
if ((epoch_itr.epoch % args.validate_interval) == 0):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
if ((epoch_itr.epoch % args.save_interval) == 0):
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum)) |
def clean_lv_pvn(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard', 'birthdate'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact", "standard" or "birthdate".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
def load_test_files(root_path, cfg):
spk2idx = {}
npys = cfg['test']['wav_files']
labs = cfg['test']['spk_ids']
Y = []
X = []
for (npy, lab) in zip(npys, labs):
npy_name = os.path.join(root_path, npy)
x = np.load(npy_name)
if (lab not in spk2idx):
spk2idx[lab] = len(spk2idx)
X.append(x.T)
Y += [spk2idx[lab]]
return (X, Y) |
class Tag(object):
__slots__ = ['_interpreter', '_abi', '_platform']
def __init__(self, interpreter, abi, platform):
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
def interpreter(self):
return self._interpreter
def abi(self):
return self._abi
def platform(self):
return self._platform
def __eq__(self, other):
if (not isinstance(other, Tag)):
return NotImplemented
return ((self.platform == other.platform) and (self.abi == other.abi) and (self.interpreter == other.interpreter))
def __hash__(self):
return hash((self._interpreter, self._abi, self._platform))
def __str__(self):
return '{}-{}-{}'.format(self._interpreter, self._abi, self._platform)
def __repr__(self):
return '<{self} {self_id}>'.format(self=self, self_id=id(self)) |
_grad()
def test_model(model, data_dir, dataset_list, scale_list, topk_list):
torch.backends.cudnn.benchmark = False
model.eval()
for dataset in dataset_list:
text = '>> {}: Global Retrieval for scale {} with CVNet-Global'.format(dataset, str(scale_list))
print(text)
if (dataset == 'roxford5k'):
gnd_fn = 'gnd_roxford5k.pkl'
elif (dataset == 'rparis6k'):
gnd_fn = 'gnd_rparis6k.pkl'
else:
assert dataset
print('extract query features')
Q = extract_feature(model, data_dir, dataset, gnd_fn, 'query', scale_list)
print('extract database features')
X = extract_feature(model, data_dir, dataset, gnd_fn, 'db', scale_list)
cfg = config_gnd(dataset, data_dir)
print('perform global retrieval')
sim = np.dot(X, Q.T)
ranks = np.argsort((- sim), axis=0)
gnd = cfg['gnd']
ks = [1, 5, 10]
((mapE, apsE, mprE, prsE), (mapM, apsM, mprM, prsM), (mapH, apsH, mprH, prsH)) = test_revisitop(cfg, ks, [ranks, ranks, ranks])
print('Global retrieval results: mAP E: {}, M: {}, H: {}'.format(np.around((mapE * 100), decimals=2), np.around((mapM * 100), decimals=2), np.around((mapH * 100), decimals=2)))
print('>> {}: Reranking results with CVNet-Rerank'.format(dataset))
query_dataset = DataSet(data_dir, dataset, gnd_fn, 'query', [1.0])
db_dataset = DataSet(data_dir, dataset, gnd_fn, 'db', [1.0])
sim_corr_dict = {}
for topk in topk_list:
print('current top-k value: ', topk)
for i in tqdm(range(int(cfg['nq']))):
im_q = query_dataset.__getitem__(i)[0]
im_q = torch.from_numpy(im_q).cuda().unsqueeze(0)
feat_q = model.extract_featuremap(im_q)
rerank_count = np.zeros(3, dtype=np.uint16)
for j in range(int(cfg['n'])):
if ((rerank_count >= topk).sum() == 3):
break
rank_j = ranks[j][i]
if (rank_j in gnd[i]['junk']):
continue
elif (rank_j in gnd[i]['easy']):
append_j = np.asarray([True, True, False])
elif (rank_j in gnd[i]['hard']):
append_j = np.asarray([False, True, True])
else:
append_j = np.asarray([True, True, True])
append_j *= (rerank_count < topk)
if (append_j.sum() > 0):
im_k = db_dataset.__getitem__(rank_j)[0]
im_k = torch.from_numpy(im_k).cuda().unsqueeze(0)
feat_k = model.extract_featuremap(im_k)
score = model.extract_score_with_featuremap(feat_q, feat_k).cpu()
sim_corr_dict[(rank_j, i)] = score
rerank_count += append_j
mix_ratio = 0.5
ranks_corr_list = rerank_ranks_revisitop(cfg, topk, ranks, sim, sim_corr_dict, mix_ratio)
((mapE_r, apsE_r, mprE_r, prsE_r), (mapM_r, apsM_r, mprM_r, prsM_r), (mapH_r, apsH_r, mprH_r, prsH_r)) = test_revisitop(cfg, ks, ranks_corr_list)
print('Reranking results: mAP E: {}, M: {}, H: {}'.format(np.around((mapE_r * 100), decimals=2), np.around((mapM_r * 100), decimals=2), np.around((mapH_r * 100), decimals=2)))
torch.backends.cudnn.benchmark = True |
class TestConjugateGradientOptimizer(TfGraphTestCase):
def test_cg(self):
a = np.linspace((- np.pi), np.pi, 25).reshape((5, 5))
a = a.T.dot(a)
b = np.linspace((- np.pi), np.pi, 5)
x = cg(a.dot, b, cg_iters=5)
assert np.allclose(a.dot(x), b)
def test_pickleable(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0], dtype=np.float32)
a = tf.constant(a_val)
loss = (a * (x ** 2))
constraint = (loss, 0.0)
self.sess.run(tf.compat.v1.global_variables_initializer())
opt = ConjugateGradientOptimizer()
opt.update_opt(loss, policy, constraint, [a])
opt.optimize([a_val])
loss_before = opt.loss([a_val])
opt = pickle.loads(pickle.dumps(opt))
opt.update_opt(loss, policy, constraint, [a])
loss_after = opt.loss([a_val])
assert np.equal(loss_before, loss_after) |
class Registry():
mapping = {'builder_name_mapping': {}, 'task_name_mapping': {}, 'processor_name_mapping': {}, 'model_name_mapping': {}, 'lr_scheduler_name_mapping': {}, 'runner_name_mapping': {}, 'state': {}, 'paths': {}}
def register_model(cls, name):
def wrap(model_cls):
from codetf.models import BaseModel
assert issubclass(model_cls, BaseModel), 'All models must inherit BaseModel class'
if (name in cls.mapping['model_name_mapping']):
raise KeyError("Name '{}' already registered for {}.".format(name, cls.mapping['model_name_mapping'][name]))
cls.mapping['model_name_mapping'][name] = model_cls
return model_cls
return wrap
def register_lr_scheduler(cls, name):
def wrap(lr_sched_cls):
if (name in cls.mapping['lr_scheduler_name_mapping']):
raise KeyError("Name '{}' already registered for {}.".format(name, cls.mapping['lr_scheduler_name_mapping'][name]))
cls.mapping['lr_scheduler_name_mapping'][name] = lr_sched_cls
return lr_sched_cls
return wrap
def register_runner(cls, name):
def wrap(runner_cls):
if (name in cls.mapping['runner_name_mapping']):
raise KeyError("Name '{}' already registered for {}.".format(name, cls.mapping['runner_name_mapping'][name]))
cls.mapping['runner_name_mapping'][name] = runner_cls
return runner_cls
return wrap
def register_path(cls, name, path):
assert isinstance(path, str), 'All path must be str.'
if (name in cls.mapping['paths']):
raise KeyError("Name '{}' already registered.".format(name))
cls.mapping['paths'][name] = path
def register(cls, name, obj):
path = name.split('.')
current = cls.mapping['state']
for part in path[:(- 1)]:
if (part not in current):
current[part] = {}
current = current[part]
current[path[(- 1)]] = obj
def get_builder_class(cls, name):
return cls.mapping['builder_name_mapping'].get(name, None)
def get_model_class(cls, name):
return cls.mapping['model_name_mapping'].get(name, None)
def get_task_class(cls, name):
return cls.mapping['task_name_mapping'].get(name, None)
def get_processor_class(cls, name):
return cls.mapping['processor_name_mapping'].get(name, None)
def get_lr_scheduler_class(cls, name):
return cls.mapping['lr_scheduler_name_mapping'].get(name, None)
def get_runner_class(cls, name):
return cls.mapping['runner_name_mapping'].get(name, None)
def list_runners(cls):
return sorted(cls.mapping['runner_name_mapping'].keys())
def list_models(cls):
return sorted(cls.mapping['model_name_mapping'].keys())
def list_tasks(cls):
return sorted(cls.mapping['task_name_mapping'].keys())
def list_processors(cls):
return sorted(cls.mapping['processor_name_mapping'].keys())
def list_lr_schedulers(cls):
return sorted(cls.mapping['lr_scheduler_name_mapping'].keys())
def list_datasets(cls):
return sorted(cls.mapping['builder_name_mapping'].keys())
def get_path(cls, name):
return cls.mapping['paths'].get(name, None)
def get(cls, name, default=None, no_warning=False):
original_name = name
name = name.split('.')
value = cls.mapping['state']
for subname in name:
value = value.get(subname, default)
if (value is default):
break
if (('writer' in cls.mapping['state']) and (value == default) and (no_warning is False)):
cls.mapping['state']['writer'].warning('Key {} is not present in registry, returning default value of {}'.format(original_name, default))
return value
def unregister(cls, name):
return cls.mapping['state'].pop(name, None) |
def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False):
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def accumulate_flops(self):
if is_supported_instance(self):
return (self.__flops__ / model.__batch_counter__)
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_num_params = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([params_to_string(accumulated_num_params, units='M', precision=precision), '{:.3%} Params'.format((accumulated_num_params / total_params)), flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} FLOPs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if (m.extra_repr != flops_extra_repr):
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert (m.extra_repr != m.original_extra_repr)
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost, flush=flush)
model.apply(del_extra_repr) |
def load_ops(result_dir):
(fwd_ops, bwd_ops) = ({}, {})
for opdef in fwd_operators:
op = load_operator(opdef, result_dir, fwd_ops)
fwd_ops[op.name] = op
for opdef in bwd_operators:
op = load_operator(opdef, result_dir, bwd_ops)
bwd_ops[op.name] = op
return (fwd_ops, bwd_ops) |
def main(args):
args.override_context = None
args.override_question = None
args.almond_has_multiple_programs = None
args.almond_detokenize_sentence = None
args.do_alignment = None
if (args.main_metric_only and args.extra_metrics):
raise ValueError('Please remove --main_metric_only from your arguments so the requested extra metrics can be shown.')
set_seed(args)
args.tasks = list(get_tasks(args.task_names, args).values())
logger.info(f'''Arguments:
{pformat(vars(args))}''')
if (not args.eval_dir):
eval_dir = os.path.dirname(args.pred_file)
else:
eval_dir = args.eval_dir
os.makedirs(eval_dir, exist_ok=True)
tgt_lang = args.pred_tgt_languages[0]
for task in args.tasks:
results_file_name = os.path.join(eval_dir, (task.name + '.results.json'))
compute_metrics_on_file(args.pred_file, results_file_name, task, args, tgt_lang) |
_module()
class FPN(nn.Module):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest')):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output'))
elif add_extra_convs:
if extra_convs_on_inputs:
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (self.add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and (self.add_extra_convs == 'on_input')):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
if ('scale_factor' in self.upsample_cfg):
laterals[(i - 1)] += F.interpolate(laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if (self.add_extra_convs == 'on_input'):
extra_source = inputs[(self.backbone_end_level - 1)]
elif (self.add_extra_convs == 'on_lateral'):
extra_source = laterals[(- 1)]
elif (self.add_extra_convs == 'on_output'):
extra_source = outs[(- 1)]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs) |
def evaluate(iteration):
(gen_i, gen_j) = args.gen_sample.get(args.image_size, (10, 5))
images = []
with torch.no_grad():
for i in range(gen_i):
images.append(G_running_target(fixed_noise[i].cuda(), step=step, alpha=alpha).cpu())
sample_path = f'sample/{args.name}/{str(iteration).zfill(6)}.png'
utils.save_image(torch.cat(images, dim=0), sample_path, nrow=gen_i, normalize=True, range=((- 1), 1))
sample_num = args.sample_num
(fake_images, fake_acts) = get_fake_images_and_acts(inception, G_running_target, code_size, step, alpha, sample_num, batch_size)
fid = compute_fid(real_acts, fake_acts)
metrics = {'fid': fid}
return metrics |
def test_transform_for_loop_multi(simple_module, tracer_mock):
adapter = BranchCoverageInstrumentation(tracer_mock)
transformer = InstrumentationTransformer(tracer_mock, [adapter])
simple_module.multi_loop.__code__ = transformer.instrument_module(simple_module.multi_loop.__code__)
assert (simple_module.multi_loop(2) == 4)
assert (tracer_mock.register_predicate.call_count == 3)
calls = (([call(True, 0), call(True, 1), call(True, 1), call(False, 1)] * 2) + [call(False, 0), call(False, 2)])
assert (tracer_mock.executed_bool_predicate.call_count == len(calls))
tracer_mock.executed_bool_predicate.assert_has_calls(calls) |
class VQModel(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, remap=None, sane_index_shape=False, use_quantize=True, freeze_decoder=False, ckpt_quantize=None):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.use_quantize = use_quantize
self.freeze_decoder = freeze_decoder
if self.use_quantize:
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape)
if self.freeze_decoder:
checkpoint_quantize = torch.load(ckpt_quantize)['state_dict']
load_model(self.quantize, checkpoint_quantize, 'quantize')
self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1)
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if (colorize_nlabels is not None):
assert (type(colorize_nlabels) == int)
self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1))
if (monitor is not None):
self.monitor = monitor
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f'Restored from {path}')
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
if self.use_quantize:
(quant, emb_loss, info) = self.quantize(h)
return (quant, emb_loss, info)
else:
return (h, None, None)
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
(quant, diff, _) = self.encode(input)
dec = self.decode(quant)
return (dec, diff)
def get_input(self, batch, k):
x = batch[k]
if (len(x.shape) == 3):
x = x[(..., None)]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
self.decoder.conv_out.weight.requires_grad = True
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
self.decoder.conv_out.weight.requires_grad = True
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split='val')
rec_loss = log_dict_ae['val/rec_loss']
self.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
parameter_list = (list(self.encoder.parameters()) + list(self.quant_conv.parameters()))
if (not self.freeze_decoder):
parameter_list = ((parameter_list + list(self.decoder.parameters())) + list(self.post_quant_conv.parameters()))
if self.use_quantize:
parameter_list = (parameter_list + list(self.quantize.parameters()))
opt_ae = torch.optim.Adam(parameter_list, lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))
return ([opt_ae, opt_disc], [])
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
(xrec, _) = self(x)
if (x.shape[1] > 3):
assert (xrec.shape[1] > 3)
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log['inputs'] = x
log['reconstructions'] = xrec
return log
def to_rgb(self, x):
assert (self.image_key == 'segmentation')
if (not hasattr(self, 'colorize')):
self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0)
return x |
class FiniteWords(AbstractLanguage):
def cardinality(self):
if (not self.alphabet()):
return ZZ.one()
return Infinity
def __hash__(self):
return (hash(self.alphabet()) ^ hash('finite words'))
_method
def shift(self):
return InfiniteWords(self.alphabet())
def factors(self):
return self
_attribute
def _element_classes(self):
import sage.combinat.words.word as word
classes = {'list': word.FiniteWord_list, 'str': word.FiniteWord_str, 'tuple': word.FiniteWord_tuple, 'callable_with_caching': word.FiniteWord_callable_with_caching, 'callable': word.FiniteWord_callable, 'iter_with_caching': word.FiniteWord_iter_with_caching, 'iter': word.FiniteWord_iter}
if ((self.alphabet().cardinality() <= 256) and all(((isinstance(i, (int, Integer)) and (0 <= i < 256)) for i in self.alphabet()))):
L = self.alphabet().list()
key = self.sortkey_letters
if (all(((L[i] < L[(i + 1)]) for i in range((len(L) - 1)))) and all(((key(L[i]) < key(L[(i + 1)])) for i in range((len(L) - 1))))):
classes['char'] = word.FiniteWord_char
return classes
def _word_from_word(self, data):
if ((data.parent() is self) or (data.parent() == self)):
return data
from sage.combinat.words.word_char import WordDatatype_char
if isinstance(data, WordDatatype_char):
data = list(data)
if ('char' in self._element_classes):
return self._element_classes['char'](self, data)
else:
return self._element_classes['list'](self, data)
from sage.combinat.words.word_datatypes import WordDatatype_str, WordDatatype_list, WordDatatype_tuple
if isinstance(data, WordDatatype_str):
return self._element_classes['str'](self, data._data)
if isinstance(data, WordDatatype_tuple):
return self._element_classes['tuple'](self, data._data)
if isinstance(data, WordDatatype_list):
return self._element_classes['list'](self, data._data)
from sage.combinat.words.word_infinite_datatypes import WordDatatype_callable, WordDatatype_iter
if isinstance(data, WordDatatype_callable):
length = data.length()
data = data._func
return self._word_from_callable(data, length, caching=False)
if isinstance(data, WordDatatype_iter):
length = data.length()
data = iter(data)
return self._word_from_iter(data, length, caching=False)
raise TypeError('any instance of Word_class must be an instance of WordDatatype')
def _word_from_callable(self, data, length, caching=True):
wc = ('_with_caching' if caching else '')
if ((length not in ZZ) or (length < 0)):
raise ValueError(('not a correct value for length (%s)' % length))
return self._element_classes[('callable' + wc)](self, data, length)
def _word_from_iter(self, data, length=None, caching=True):
wc = ('_with_caching' if caching else '')
if ((length is None) or (length == 'finite')):
length = 'finite'
elif ((length not in ZZ) or (length < 0)):
raise ValueError(('not a correct value for length (%s)' % length))
return self._element_classes[('iter' + wc)](self, data, length)
def __call__(self, data=None, length=None, datatype=None, caching=True, check=True):
if (datatype is not None):
if (datatype == 'list'):
w = self._element_classes['list'](self, data)
elif (datatype == 'char'):
w = self._element_classes['char'](self, data)
elif (datatype == 'tuple'):
w = self._element_classes['tuple'](self, data)
elif (datatype == 'str'):
w = self._element_classes['str'](self, data)
elif (datatype == 'callable'):
w = self._word_from_callable(data, length, caching)
elif (datatype == 'iter'):
w = self._word_from_iter(data, length, caching)
elif (datatype == 'pickled_function'):
from sage.misc.fpickle import unpickle_function
data = unpickle_function(data)
w = self._word_from_callable(data, length, caching)
else:
raise ValueError('unknown datatype (={})'.format(datatype))
elif ('char' in self._element_classes):
if (data is None):
data = []
elif callable(data):
data = [data(i) for i in range(length)]
elif (not isinstance(data, (tuple, list))):
data = list(data)
w = self._element_classes['char'](self, data)
elif isinstance(data, list):
w = self._element_classes['list'](self, data)
elif (data is None):
w = self._element_classes['list'](self, [])
elif isinstance(data, str):
w = self._element_classes['str'](self, data)
elif isinstance(data, tuple):
w = self._element_classes['tuple'](self, data)
elif isinstance(data, (CombinatorialObject, ClonableElement)):
w = self._element_classes['list'](self, list(data))
elif callable(data):
w = self._word_from_callable(data, length, caching)
elif isinstance(data, Iterable):
from sage.combinat.words.abstract_word import Word_class
if isinstance(data, Word_class):
w = self._word_from_word(data)
else:
w = self._word_from_iter(data, length, caching)
else:
raise ValueError(('cannot guess a datatype from data (=%s); please specify one' % data))
if check:
self._check(w)
return w
def _repr_(self):
return 'Finite words over {!r}'.format(self.alphabet())
def _an_element_(self):
try:
some_letters = list(self.alphabet().some_elements())
except Exception:
return self([])
if (len(some_letters) == 1):
return self(([some_letters[0]] * 3))
else:
(a, b) = some_letters[:2]
return self([b, a, b])
def iterate_by_length(self, l=1):
if (not isinstance(l, (int, Integer))):
raise TypeError(('the parameter l (=%r) must be an integer' % l))
cls = self._element_classes['tuple']
for w in itertools.product(self.alphabet(), repeat=l):
(yield cls(self, w))
def __iter__(self):
for l in itertools.count():
(yield from self.iterate_by_length(l))
def __contains__(self, x):
from sage.combinat.words.finite_word import FiniteWord_class
return (isinstance(x, FiniteWord_class) and (x.parent().alphabet() == self.alphabet()))
def random_element(self, length=None, *args, **kwds):
if (length is None):
length = ZZ.random_element(0, 10)
return self([self.alphabet().random_element(*args, **kwds) for x in range(length)])
def iter_morphisms(self, arg=None, codomain=None, min_length=1):
n = self.alphabet().cardinality()
if (min_length < 0):
min_length = 0
if (arg is None):
from sage.combinat.integer_lists.nn import IntegerListsNN
compositions = IntegerListsNN(length=n, min_part=min_length)
elif isinstance(arg, tuple):
from sage.combinat.integer_lists import IntegerListsLex
(a, b) = arg
compositions = IntegerListsLex(min_sum=a, max_sum=(b - 1), length=n, min_part=min_length)
else:
arg = list(arg)
if ((not (len(arg) == n)) or (not all((isinstance(a, (int, Integer)) for a in arg)))):
raise TypeError(('arg (=%s) must be an iterable of %s integers' % (arg, n)))
compositions = [arg]
if (codomain is None):
codomain = self
elif isinstance(codomain, FiniteOrInfiniteWords):
codomain = codomain.finite_words()
elif (not isinstance(codomain, FiniteWords)):
raise TypeError(('codomain (=%s) must be an instance of FiniteWords' % codomain))
from sage.combinat.words.morphism import WordMorphism
for composition in compositions:
cuts = ([0] + list(composition))
for i in range(1, len(cuts)):
cuts[i] += cuts[(i - 1)]
s = cuts[(- 1)]
for big_word in codomain.iterate_by_length(s):
d = {}
i = 0
for a in self.alphabet():
d[a] = big_word[cuts[i]:cuts[(i + 1)]]
i += 1
(yield WordMorphism(d, codomain=codomain)) |
_test()
def test_constant_type_inference_fpga():
sdfg = make_sdfg()
sdfg.add_constant('constant_array', CONSTANT_ARRAY)
sdfg.add_constant('constant_value', CONSTANT_VALUE)
out = dace.ndarray([CONSTANT_ARRAY.size], dtype=dace.float32)
sdfg(N=CONSTANT_ARRAY.size, output=out)
ref = (CONSTANT_ARRAY + CONSTANT_VALUE)
diff = (np.linalg.norm((ref - out)) / CONSTANT_ARRAY.size)
assert (diff <= 1e-05)
return sdfg |
(unsafe_hash=True)
_properties
class DeadDataflowElimination(ppl.Pass):
CATEGORY: str = 'Simplification'
skip_library_nodes = properties.Property(dtype=bool, default=False, desc='If True, does not remove library nodes if their results are unused. Otherwise removes library nodes without side effects.')
remove_persistent_memory = properties.Property(dtype=bool, default=False, desc='If True, marks code with Persistent allocation lifetime as dead')
def modifies(self) -> ppl.Modifies:
return ((ppl.Modifies.Nodes | ppl.Modifies.Edges) | ppl.Modifies.Descriptors)
def should_reapply(self, modified: ppl.Modifies) -> bool:
return (modified & ((ppl.Modifies.Nodes | ppl.Modifies.Edges) | ppl.Modifies.States))
def depends_on(self) -> Set[Type[ppl.Pass]]:
return {ap.StateReachability, ap.AccessSets}
def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[(str, Any)]) -> Optional[Dict[(SDFGState, Set[str])]]:
reachable: Dict[(SDFGState, Set[SDFGState])] = pipeline_results['StateReachability'][sdfg.sdfg_id]
access_sets: Dict[(SDFGState, Tuple[(Set[str], Set[str])])] = pipeline_results['AccessSets'][sdfg.sdfg_id]
result: Dict[(SDFGState, Set[str])] = defaultdict(set)
try:
state_order = list(cfg.stateorder_topological_sort(sdfg))
except KeyError:
return None
for state in reversed(state_order):
writes = access_sets[state][1]
descendants = reachable[state]
descendant_reads = set().union(*(access_sets[succ][0] for succ in descendants))
no_longer_used: Set[str] = set((data for data in writes if (data not in descendant_reads)))
dead_nodes: List[nodes.Node] = []
for node in sdutil.dfs_topological_sort(state, reverse=True):
if self._is_node_dead(node, sdfg, state, dead_nodes, no_longer_used, access_sets[state]):
dead_nodes.append(node)
live_nodes = set()
for node in dead_nodes:
if (isinstance(node, nodes.ExitNode) and (state.entry_node(node) not in dead_nodes)):
live_nodes.add(node)
dead_nodes = dtypes.deduplicate([n for n in dead_nodes if (n not in live_nodes)])
if (not dead_nodes):
continue
scopes_to_reconnect: Set[nodes.Node] = set()
for node in state.nodes():
if (isinstance(node, nodes.ExitNode) and (node not in dead_nodes)):
if any(((n in dead_nodes) for n in state.predecessors(node))):
scopes_to_reconnect.add(node)
if scopes_to_reconnect:
schildren = state.scope_children()
for exit_node in scopes_to_reconnect:
entry_node = state.entry_node(exit_node)
for node in schildren[entry_node]:
if (node is exit_node):
continue
if isinstance(node, nodes.EntryNode):
node = state.exit_node(node)
if all(((succ in dead_nodes) for succ in state.successors(node))):
state.add_nedge(node, exit_node, Memlet())
predecessor_nsdfgs: Dict[(nodes.NestedSDFG, Set[str])] = defaultdict(set)
for node in dead_nodes:
try:
for e in state.in_edges(node):
mtree = state.memlet_tree(e)
for leaf in mtree.leaves():
if isinstance(leaf.src, nodes.NestedSDFG):
if (not leaf.data.is_empty()):
predecessor_nsdfgs[leaf.src].add(leaf.src_conn)
elif (isinstance(leaf.src, nodes.Tasklet) and (leaf.src.code.language != dtypes.Language.Python)):
if (leaf.src.code.language == dtypes.Language.CPP):
ctype = infer_types.infer_out_connector_type(sdfg, state, leaf.src, leaf.src_conn)
if (ctype is None):
raise NotImplementedError(f'Cannot eliminate dead connector "{leaf.src_conn}" on tasklet due to connector type inference failure.')
leaf.src.code.code = (f'''{ctype.as_arg(leaf.src_conn)};
''' + leaf.src.code.code)
else:
raise NotImplementedError(f'Cannot eliminate dead connector "{leaf.src_conn}" on tasklet due to its code language.')
state.remove_memlet_path(leaf)
state.remove_node(node)
except KeyError:
continue
result[state].update(dead_nodes)
access_nodes = set(state.data_nodes())
for node in access_nodes:
if (state.degree(node) == 0):
state.remove_node(node)
result[state].add(node)
for (node, dead_conns) in predecessor_nsdfgs.items():
for conn in dead_conns:
if (conn not in node.in_connectors):
node.sdfg.arrays[conn].transient = True
remaining_access_nodes = set((n for n in (access_nodes - result[state]) if (state.out_degree(n) > 0)))
removed_data_containers = set((n.data for n in result[state] if (isinstance(n, nodes.AccessNode) and (n not in remaining_access_nodes))))
access_sets[state] = ((access_sets[state][0] - removed_data_containers), access_sets[state][1])
return (result or None)
def report(self, pass_retval: Dict[(SDFGState, Set[str])]) -> str:
n = sum((len(v) for v in pass_retval.values()))
return f'Eliminated {n} nodes in {len(pass_retval)} states: {pass_retval}'
def _is_node_dead(self, node: nodes.Node, sdfg: SDFG, state: SDFGState, dead_nodes: Set[nodes.Node], no_longer_used: Set[str], access_set: Tuple[(Set[str], Set[str])]) -> bool:
if any(((succ not in dead_nodes) for succ in state.successors(node))):
return False
if isinstance(node, nodes.LibraryNode):
if self.skip_library_nodes:
return False
return (not node.has_side_effects)
elif isinstance(node, nodes.Tasklet):
return (not node.has_side_effects(sdfg))
elif isinstance(node, nodes.AccessNode):
if (node.data in PROTECTED_NAMES):
return False
desc = sdfg.arrays[node.data]
if (not desc.transient):
return False
if (not self.remove_persistent_memory):
if (desc.lifetime in (dtypes.AllocationLifetime.Persistent, dtypes.AllocationLifetime.External)):
return False
if (node.data not in no_longer_used):
return False
for e in state.in_edges(node):
if (e.dst_conn == 'set'):
return False
for l in state.memlet_tree(e).leaves():
if _has_side_effects(l.src, sdfg):
return False
if (isinstance(l.src, (nodes.NestedSDFG, nodes.LibraryNode)) and any(((ie.data.data == node.data) for ie in state.in_edges(l.src)))):
return False
if (isinstance(desc, data.Stream) and (node.data in access_set[0])):
return False
if isinstance(desc, data.Reference):
return False
return True |
def _show(image, title):
class UI(tkinter.Label):
def __init__(self, master, im):
if (im.mode == '1'):
self.image = BitmapImage(im, foreground='white', master=master)
else:
self.image = PhotoImage(im, master=master)
super().__init__(master, image=self.image, bg='black', bd=0)
if (not tkinter._default_root):
raise OSError('tkinter not initialized')
top = tkinter.Toplevel()
if title:
top.title(title)
UI(top, image).pack() |
def gen_model_1label():
na = sympy.Symbol('na', integer=True, positive=True)
nb = sympy.Symbol('nb', integer=True, positive=True)
theta_a = sympy.Symbol('theta_a', real=True, nonnegative=True)
theta_b = sympy.Symbol('theta_b', real=True, nonnegative=True)
t = sympy.Symbol('t', integer=True, nonnegative=True)
p1 = sympy.Pow(theta_a, sympy.Min(t, na))
p2 = sympy.Pow((1 - theta_a), sympy.Max((t - na), 0))
p3 = sympy.Pow(theta_b, sympy.Min(((na + nb) - t), nb))
p4 = sympy.Pow((1 - theta_b), sympy.Max((na - t), 0))
sum_ = sympy.Sum((((p1 * p2) * p3) * p4), (t, 0, (na + nb)))
p1 = 1
p2 = 1
p3 = sympy.Pow(theta_b, nb)
p4 = sympy_utils.polynomial_exp(1, (- theta_b), na, expand=False)
s0 = (((p1 * p2) * p3) * p4)
p1 = sympy.Pow(theta_a, t)
p2 = 1
p3 = sympy.Pow(theta_b, nb)
p4 = sympy_utils.polynomial_exp(1, (- theta_b), (na - t), expand=False)
sum1_ = sympy.Sum((((p1 * p2) * p3) * p4), (t, 0, na))
p1 = sympy.Pow(theta_a, na)
p2 = 1
p3 = sympy.Pow(theta_b, nb)
p4 = 1
s1 = (((p1 * p2) * p3) * p4)
p1 = sympy.Pow(theta_a, na)
p2 = sympy_utils.polynomial_exp(1, (- theta_a), (t - na), expand=False)
p3 = sympy.Pow(theta_b, ((na + nb) - t))
p4 = 1
sum2_ = sympy.Sum((((p1 * p2) * p3) * p4), (t, (na + 1), (na + nb)))
p1 = sympy.Pow(theta_a, na)
p2 = sympy_utils.polynomial_exp(1, (- theta_a), nb, expand=False)
p3 = 1
p4 = 1
s2 = (((p1 * p2) * p3) * p4)
for _ in range(6):
sum_ = sum_.simplify()
print(sum_)
sum__ = sum_.subs(na, 10).subs(nb, 10)
xs = ys = numpy.linspace(0, 1.0, num=11)
values = numpy.zeros((len(xs), len(ys)))
for (ix, x) in enumerate(xs):
for (iy, y) in enumerate(ys):
value = sum__.subs(theta_a, x).subs(theta_b, y).doit()
print(('theta = (%f, %f) -> sum = %s' % (x, y, value)))
syms = (theta_a, theta_b)
syms = (theta_a,)
sum_diff = sum_.diff(*syms)
print('diff:', sum_diff)
for _ in range(5):
sum_diff = sum_diff.simplify()
print(sum_diff)
opts = sympy.solve(sum_diff, *syms)
print('num opts:', len(opts))
print('opts:', opts) |
class CachedBuiltinMethodCallNode(CallNode):
subexprs = ['obj', 'args']
is_temp = True
def __init__(self, call_node, obj, method_name, args):
super(CachedBuiltinMethodCallNode, self).__init__(call_node.pos, obj=obj, method_name=method_name, args=args, may_return_none=call_node.may_return_none, type=call_node.type)
def may_be_none(self):
if (self.may_return_none is not None):
return self.may_return_none
return ExprNode.may_be_none(self)
def generate_result_code(self, code):
type_cname = self.obj.type.cname
obj_cname = self.obj.py_result()
args = [arg.py_result() for arg in self.args]
call_code = code.globalstate.cached_unbound_method_call_code(obj_cname, type_cname, self.method_name, args)
code.putln(('%s = %s; %s' % (self.result(), call_code, code.error_goto_if_null(self.result(), self.pos))))
code.put_gotref(self.result()) |
class EfficientNetImageProcessorTester(unittest.TestCase):
def __init__(self, parent, batch_size=13, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]):
size = (size if (size is not None) else {'height': 18, 'width': 18})
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size} |
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == 'thisown'):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name) |
def check_samplers_2d_target(name, sampler_orig):
sampler = clone(sampler_orig)
(X, y) = sample_dataset_generator()
y = y.reshape((- 1), 1)
sampler.fit_resample(X, y) |
class GradientStats(object):
def build_gradient_entry(named_parameters):
ave_grads = []
max_grads = []
layers = []
for (n, p) in named_parameters:
if (p.requires_grad and ('bias' not in n)):
layers.append(n)
ave_grads.append(p.grad.abs().mean().detach().cpu().numpy())
max_grads.append(p.grad.abs().max().detach().cpu().numpy())
return {'ave_grads': ave_grads, 'max_grads': max_grads, 'layers': layers}
def plot_gradient_flow(named_parameters, ax, set_xticks=False, set_ylabels=False, set_title=False):
ave_grads = named_parameters['ave_grads']
max_grads = named_parameters['max_grads']
layers = named_parameters['layers']
layers = [layer.replace('weight', '').replace('layer', '').replace('_', '').replace('.', ' ') for layer in layers]
ax.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')
ax.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')
ax.hlines(0, 0, (len(ave_grads) + 1), lw=1.5, color='k')
if set_xticks:
ax.set_xticks(range(0, len(ave_grads), 1))
ax.set_xticklabels(layers, rotation='vertical', fontsize=7)
ax.set_xlabel('Layers')
ax.set_xlim(left=0, right=len(ave_grads))
ax.set_ylim(bottom=(- 0.001), top=0.02)
if set_ylabels:
ax.set_ylabel('Average gradient')
ax.grid(True)
def plot_gradient_flow_over_epochs(gradient_stats_entries, output_file_name):
(epoch_number, iteration_number) = (set(), set())
for (epoch, iteration, _) in gradient_stats_entries:
epoch_number.add(epoch)
iteration_number.add(iteration)
epoch_number = len(epoch_number)
iteration_number = len(iteration_number)
(fig, axs) = plt.subplots(epoch_number, iteration_number, figsize=((epoch_number * 8), (iteration_number * 8)), sharey=True, sharex=True)
k = 0
for i in trange(epoch_number):
for j in trange(iteration_number):
(_, _, gradient_stats_entry) = gradient_stats_entries[k]
GradientStats.plot_gradient_flow(gradient_stats_entry['model'], axs[i][j], set_xticks=(True if ((i + 1) == epoch_number) else False), set_ylabels=(True if (j == 0) else False))
k += 1
ConsoleLogger.status('Saving gradient flow plot...')
fig.suptitle('Gradient flow', fontsize='x-large')
fig.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color='b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'], loc='center right', borderaxespad=0.1)
fig.savefig(output_file_name, bbox_inches='tight', pad_inches=0, dpi=200)
plt.close(fig) |
class C2f():
def __init__(self, c1: int, c2: int, n: int=1, shortcut: bool=False, name: str='', g: int=1, e: float=0.5):
self.c = int((c2 * e))
self.cv1 = Conv(c1, (2 * self.c), 1, 1, name=f'{name}.cv1')
self.cv2 = Conv(((2 + n) * self.c), c2, 1, name=f'{name}.cv2')
self.m = [Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0, name=f'{name}.m.{i}') for i in range(n)]
def __call__(self, x):
y1 = self.cv1(x)
y = tf.split(y1, 2, (- 1))
y.extend((m(y[(- 1)]) for m in self.m))
return self.cv2(Concatenate(axis=(- 1))(y))
def forward_split(self, x):
y = list(self.cv1(x).split((self.c, self.c), 1))
y.extend((m(y[(- 1)]) for m in self.m))
return self.cv2(torch.cat(y, 1)) |
def log_details(args):
logging.info('')
logging.info('Arguments received: ')
logging.info('')
for (k, v) in sorted(args.__dict__.items()):
logging.info(f'{k:25}: {v}')
logging.info('\n') |
def visualize_strings(texts, language_code, select=None, colors=None):
lang_pipe = stanza.Pipeline(language_code, processors='tokenize,ner')
for text in texts:
visualize_ner_str(text, lang_pipe, select=select, colors=colors) |
class TestCopyRowsToTensor(hu.HypothesisTestCase):
(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width).astype(dtype)
indices_lengths = np.random.randint(height)
all_indices = np.arange(height)
np.random.shuffle(all_indices)
indices = all_indices[:indices_lengths]
def ref(input_tensor, indices, row):
for idx in indices:
input_tensor[idx] = row
return [input_tensor]
op = core.CreateOperator('CopyRowsToTensor', ['input_tensor', 'indices', 'row'], ['input_tensor'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=[input_tensor, indices, row], reference=ref)
(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
(deadline=10000)
def test_copy_rows_to_tensor_invalid_input(self, input_tensor, gc, dc):
input_tensor = np.array(input_tensor).astype(np.float32)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand((width + 1)).astype(np.float32)
indices_lengths = np.random.randint(height)
all_indices = np.arange(height)
np.random.shuffle(all_indices)
indices = all_indices[:indices_lengths]
self.assertRunOpRaises(device_option=gc, op=core.CreateOperator('CopyRowsToTensor', ['input_tensor', 'indices', 'row'], ['input_tensor']), inputs=[input_tensor, indices, row], exception=RuntimeError, regexp='width of input tensor should match lengths of row') |
def calX_term(a, b, c, d):
tot = 0
for n in xrange((d + 1)):
tot += ((binom((- 0.5), n) * ((- 1) ** n)) * HansenCoefficient_term(a, b, c, (d - n)))
return tot |
def inspect_format_method(callable):
if ((not isinstance(callable, (types.MethodType, types.BuiltinMethodType))) or (callable.__name__ not in ('format', 'format_map'))):
return None
obj = callable.__self__
if isinstance(obj, string_types):
return obj |
def setup_fieldsplit_preconditioner(fun: Optional[fenics.Function], ksp: PETSc.KSP, options: _typing.KspOption) -> None:
if (fun is not None):
if (('pc_type' in options.keys()) and (options['pc_type'] == 'fieldsplit')):
function_space = fun.function_space()
if (not (function_space.num_sub_spaces() > 1)):
raise _exceptions.InputError('cashocs._utils.solve_linear_problem', 'ksp_options', 'You have specified a fieldsplit preconditioner, but the problem to be solved is not a mixed one.')
pc = ksp.getPC()
pc.setType('fieldsplit')
idx = []
name = []
for i in range(function_space.num_sub_spaces()):
idx_i = PETSc.IS().createGeneral(function_space.sub(i).dofmap().dofs())
idx.append(idx_i)
name.append(f'{i:d}')
idx_tuples = zip(name, idx)
pc.setFieldSplitIS(*idx_tuples) |
def focal_loss(y_true, y_pred):
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon()))
logits = tf.log((y_pred / (1 - y_pred)))
loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred)
return tf.reduce_mean(loss) |
def train_loop():
data = np.ndarray((args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
data.fill(33333)
total_forward = 0
total_backward = 0
niter = 13
n_dry = 3
label = np.ndarray(args.batchsize, dtype=np.int32)
label.fill(1)
count = 0
timer = Timer()
for i in range(niter):
x = xp.asarray(data)
y = xp.asarray(label)
if (args.arch == 'googlenet'):
timer.preprocess()
(out1, out2, out3) = model.forward(x)
timer.postprocess()
time_ = timer.getElapseTime()
if (i > (n_dry - 1)):
count += 1
total_forward += time_
out = ((out1 + out2) + out3)
else:
timer.preprocess()
out = model.forward(x)
timer.postprocess()
time_ = time_ = timer.getElapseTime()
if (i > (n_dry - 1)):
count += 1
total_forward += time_
out.zerograd()
out.grad.fill(3)
model.cleargrads()
if (xp != np):
xp.cuda.Stream(null=True)
timer.preprocess()
out.backward()
timer.postprocess()
time_ = timer.getElapseTime()
if (i > (n_dry - 1)):
total_backward += time_
model.cleargrads()
del out, x, y
if (args.arch == 'googlenet'):
del out1, out2, out3
print('Average Forward: ', (total_forward / count), ' ms')
print('Average Backward: ', (total_backward / count), ' ms')
print('Average Total: ', ((total_forward + total_backward) / count), ' ms')
print('') |
class Experiments(object):
def __init__(self, experiments):
self._experiments = experiments
def experiments(self):
return self._experiments
def train(self):
for experiment in self._experiments:
Experiments.set_deterministic_on(experiment.seed)
experiment.train()
torch.cuda.empty_cache()
def evaluate(self, evaluation_options):
for experiment in self._experiments:
Experiments.set_deterministic_on(experiment.seed)
experiment.evaluate(evaluation_options)
torch.cuda.empty_cache()
if (type(self._experiments[0].seed) == list):
Experiments.set_deterministic_on(self._experiments[0].seed[0])
else:
Experiments.set_deterministic_on(self._experiments[0].seed)
if evaluation_options['compute_quantized_embedding_spaces_animation']:
EmbeddingSpaceStats.compute_quantized_embedding_spaces_animation(all_experiments_paths=[experiment.experiment_path for experiment in self._experiments], all_experiments_names=[experiment.name for experiment in self._experiments], all_results_paths=[experiment.results_path for experiment in self._experiments])
if evaluation_options['plot_clustering_metrics_evolution']:
AlignmentStats.compute_clustering_metrics_evolution(all_experiments_names=[experiment.name for experiment in self._experiments], result_path=self._experiments[0].results_path)
if evaluation_options['check_clustering_metrics_stability_over_seeds']:
AlignmentStats.check_clustering_metrics_stability_over_seeds(all_experiments_names=[experiment.name for experiment in self._experiments], result_path=self._experiments[0].results_path)
if evaluation_options['plot_gradient_stats']:
all_experiments_paths = [experiment.experiment_path for experiment in self._experiments]
all_experiments_names = [experiment.name for experiment in self._experiments]
all_results_paths = [experiment.results_path for experiment in self._experiments]
gradient_stats_entries = list()
for i in range(len(all_experiments_paths)):
experiment_path = all_experiments_paths[i]
experiment_name = all_experiments_names[i]
experiment_results_path = all_results_paths[i]
file_names = [file_name for file_name in os.listdir(experiment_path) if (('gradient-stats' in file_name) and (experiment_name in file_name))]
file_names = sorted(file_names, key=(lambda x: (int(x.replace((experiment_name + '_'), '').replace('_gradient-stats.pickle', '').split('_')[0]), int(x.replace((experiment_name + '_'), '').replace('_gradient-stats.pickle', '').split('_')[1]))))
with tqdm(file_names) as bar:
bar.set_description('Processing')
for file_name in bar:
with open(((experiment_path + os.sep) + file_name), 'rb') as file:
split_file_name = file_name.replace((experiment_name + '_'), '').replace('_gradients-stats.pickle', '').split('_')
gradient_stats_entries.append((int(split_file_name[0]), int(split_file_name[1]), pickle.load(file)))
GradientStats.plot_gradient_flow_over_epochs(gradient_stats_entries, output_file_name=(((experiment_results_path + os.sep) + experiment_name) + '_gradient_flow.png'))
def set_deterministic_on(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def load(experiments_path):
experiments = list()
with open(experiments_path, 'r') as experiments_file:
experiment_configurations = json.load(experiments_file)
configuration = None
with open(experiment_configurations['configuration_path'], 'r') as configuration_file:
configuration = yaml.load(configuration_file, Loader=yaml.FullLoader)
if (type(experiment_configurations['seed']) == list):
for seed in experiment_configurations['seed']:
for experiment_configuration_key in experiment_configurations['experiments'].keys():
experiment = Experiment(name=((experiment_configuration_key + '-seed') + str(seed)), experiments_path=experiment_configurations['experiments_path'], results_path=experiment_configurations['results_path'], global_configuration=configuration, experiment_configuration=experiment_configurations['experiments'][experiment_configuration_key], seed=seed)
experiments.append(experiment)
else:
for experiment_configuration_key in experiment_configurations['experiments'].keys():
experiment = Experiment(name=experiment_configuration_key, experiments_path=experiment_configurations['experiments_path'], results_path=experiment_configurations['results_path'], global_configuration=configuration, experiment_configuration=experiment_configurations['experiments'][experiment_configuration_key], seed=experiment_configurations['seed'])
experiments.append(experiment)
return Experiments(experiments) |
def cln_word(word):
if (word[(- 3):] == "'ve"):
return [word[:(- 3)], 'have']
elif (word[(- 2):] == "'d"):
return [word[:(- 2)], ' had']
elif (word[(- 2):] == "'ll"):
return [word[:(- 2)], ' will']
elif (word[(- 2):] == "'m"):
return [word[:(- 2)], ' is']
elif (word[(- 3):] == "'re"):
return [word[:(- 3)], ' are']
else:
return [word] |
class OPTDecoderNF(modeling_opt.OPTDecoder):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.layers = nn.ModuleList([OPTDecoderLayerNF(config) for _ in range(config.num_hidden_layers)])
self.post_init()
def forward(self, *args, **kwargs):
out = super(OPTDecoderNF, self).forward(*args, **kwargs)
return out |
def info(msg: str) -> None:
B = escape_codes['bold_blue']
N = escape_codes['reset']
print(f'{B}:: INFO {msg}{N}', file=sys.stderr, flush=True) |
def test_show_versions(capsys):
with ignore_warnings():
show_versions()
(out, err) = capsys.readouterr()
assert ('python' in out)
assert ('numpy' in out)
info = threadpool_info()
if info:
assert ('threadpoolctl info:' in out) |
class RSHash(BaseModel):
def __init__(self, feature_mins, feature_maxes, sampling_points=1000, decay=0.015, num_components=100, num_hash_fns=1):
self.minimum = feature_mins
self.maximum = feature_maxes
self.m = num_components
self.w = num_hash_fns
self.s = sampling_points
self.dim = len(self.minimum)
self.decay = decay
self.scores = []
self.num_hash = num_hash_fns
self.cmsketches = []
self.effS = max(1000, (1.0 / (1 - np.power(2, (- self.decay)))))
self.f = np.random.uniform(low=(1.0 / np.sqrt(self.effS)), high=(1 - (1.0 / np.sqrt(self.effS))), size=self.m)
for i in range(self.num_hash):
self.cmsketches.append({})
self._sample_dims()
self.alpha = self._sample_shifts()
self.index = ((0 + 1) - self.s)
self.last_score = None
def fit_partial(self, X, y=None):
score_instance = 0
for r in range(self.m):
Y = ((- 1) * np.ones(len(self.V[r])))
Y[range(len(self.V[r]))] = np.floor(((X[np.array(self.V[r])] + np.array(self.alpha[r])) / float(self.f[r])))
mod_entry = np.insert(Y, 0, r)
mod_entry = tuple(mod_entry.astype(np.int32))
c = []
for w in range(len(self.cmsketches)):
try:
value = self.cmsketches[w][mod_entry]
except KeyError:
value = (self.index, 0)
tstamp = value[0]
wt = value[1]
new_wt = (wt * np.power(2, ((- self.decay) * (self.index - tstamp))))
c.append(new_wt)
new_tstamp = self.index
self.cmsketches[w][mod_entry] = (new_tstamp, (new_wt + 1))
min_c = min(c)
c = np.log((1 + min_c))
score_instance = (score_instance + c)
self.last_score = (score_instance / self.m)
self.index += 1
return self
def score_partial(self, X):
return self.last_score
def _sample_shifts(self):
alpha = []
for r in range(self.m):
alpha.append(np.random.uniform(low=0, high=self.f[r], size=len(self.V[r])))
return alpha
def _sample_dims(self):
max_term = np.max(((2 * np.ones(self.f.size)), list((1.0 / self.f))), axis=0)
common_term = (np.log(self.effS) / np.log(max_term))
low_value = (1 + (0.5 * common_term))
high_value = common_term
self.r = np.empty([self.m], dtype=int)
self.V = []
for i in range(self.m):
if (np.floor(low_value[i]) == np.floor(high_value[i])):
self.r[i] = 1
else:
self.r[i] = min(np.random.randint(low=low_value[i], high=high_value[i]), self.dim)
all_feats = np.array(list(range(self.dim)), dtype=np.int32)
choice_feats = all_feats[np.where((self.minimum != self.maximum))]
sel_V = np.random.choice(choice_feats, size=self.r[i], replace=False)
self.V.append(sel_V) |
class _PGNMF(NMF):
def __init__(self, n_components=None, solver='pg', init=None, tol=0.0001, max_iter=200, random_state=None, alpha=0.0, l1_ratio=0.0, nls_max_iter=10):
super().__init__(n_components=n_components, init=init, solver=solver, tol=tol, max_iter=max_iter, random_state=random_state, alpha_W=alpha, alpha_H=alpha, l1_ratio=l1_ratio)
self.nls_max_iter = nls_max_iter
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self)
H = self.components_
(W, _, self.n_iter_) = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self)
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
(W, H, self.n_iter) = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, 'NMF (input X)')
(n_samples, n_features) = X.shape
n_components = self.n_components
if (n_components is None):
n_components = n_features
if ((not isinstance(n_components, numbers.Integral)) or (n_components <= 0)):
raise ValueError(('Number of components must be a positive integer; got (n_components=%r)' % n_components))
if ((not isinstance(self.max_iter, numbers.Integral)) or (self.max_iter < 0)):
raise ValueError(('Maximum number of iterations must be a positive integer; got (max_iter=%r)' % self.max_iter))
if ((not isinstance(self.tol, numbers.Number)) or (self.tol < 0)):
raise ValueError(('Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol))
if ((self.init == 'custom') and update_H):
_check_init(H, (n_components, n_features), 'NMF (input H)')
_check_init(W, (n_samples, n_components), 'NMF (input W)')
elif (not update_H):
_check_init(H, (n_components, n_features), 'NMF (input H)')
W = np.zeros((n_samples, n_components))
else:
(W, H) = _initialize_nmf(X, n_components, init=self.init, random_state=self.random_state)
if update_H:
(W, H, n_iter) = _fit_projected_gradient(X, W, H, self.tol, self.max_iter, self.nls_max_iter, self.alpha, self.l1_ratio)
else:
(Wt, _, n_iter) = _nls_subproblem(X.T, H.T, W.T, self.tol, self.nls_max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio)
W = Wt.T
if ((n_iter == self.max_iter) and (self.tol > 0)):
warnings.warn(('Maximum number of iteration %d reached. Increase it to improve convergence.' % self.max_iter), ConvergenceWarning)
return (W, H, n_iter) |
def set_location_header(request):
url = request.GET.get('next', '/')
response = HttpResponse(status=302)
response['Location'] = url
return response |
def get_key(value, dic, add_1=False, pad=0):
if (add_1 and (value != pad)):
value += 1
if (value == pad):
out = 'pad'
else:
out = list(dic.keys())[list(dic.values()).index(value)]
return out |
class CFuncDeclaratorNode(CDeclaratorNode):
child_attrs = ['base', 'args', 'exception_value']
overridable = 0
optional_arg_count = 0
is_const_method = 0
templates = None
def analyse_templates(self):
if isinstance(self.base, CArrayDeclaratorNode):
from .ExprNodes import TupleNode, NameNode
template_node = self.base.dimension
if isinstance(template_node, TupleNode):
template_nodes = template_node.args
elif isinstance(template_node, NameNode):
template_nodes = [template_node]
else:
error(template_node.pos, 'Template arguments must be a list of names')
return None
self.templates = []
for template in template_nodes:
if isinstance(template, NameNode):
self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name))
else:
error(template.pos, 'Template arguments must be a list of names')
self.base = self.base.base
return self.templates
else:
return None
def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False):
if (directive_locals is None):
directive_locals = {}
if nonempty:
nonempty -= 1
func_type_args = []
for (i, arg_node) in enumerate(self.args):
(name_declarator, type) = arg_node.analyse(env, nonempty=nonempty, is_self_arg=((i == 0) and env.is_c_class_scope and ('staticmethod' not in env.directives)))
name = name_declarator.name
if (name in directive_locals):
type_node = directive_locals[name]
other_type = type_node.analyse_as_type(env)
if (other_type is None):
error(type_node.pos, 'Not a type')
elif ((type is not PyrexTypes.py_object_type) and (not type.same_as(other_type))):
error(self.base.pos, 'Signature does not agree with previous declaration')
error(type_node.pos, 'Previous declaration here')
else:
type = other_type
if name_declarator.cname:
error(self.pos, 'Function argument cannot have C name specification')
if ((i == 0) and env.is_c_class_scope and type.is_unspecified):
type = env.parent_type
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
if type.is_void:
error(arg_node.pos, 'Use spam() rather than spam(void) to declare a function with no arguments.')
func_type_args.append(PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, 'Non-default argument follows default argument')
exc_val = None
exc_check = 0
if (self.exception_check == '+'):
env.add_include_file('ios')
env.add_include_file('new')
env.add_include_file('stdexcept')
env.add_include_file('typeinfo')
if (return_type.is_pyobject and (self.exception_value or self.exception_check) and (self.exception_check != '+')):
error(self.pos, 'Exception clause not allowed for function returning Python object')
else:
if ((self.exception_value is None) and self.exception_check and (self.exception_check != '+')):
if ((return_type.exception_value is not None) and ((visibility != 'extern') and (not in_pxd))):
if (not env.is_c_class_scope):
from .ExprNodes import ConstNode
self.exception_value = ConstNode(self.pos, value=return_type.exception_value, type=return_type)
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if (self.exception_check == '+'):
exc_val_type = self.exception_value.type
if ((not exc_val_type.is_error) and (not exc_val_type.is_pyobject) and (not (exc_val_type.is_cfunction and (not exc_val_type.return_type.is_pyobject) and (not exc_val_type.args))) and (not ((exc_val_type == PyrexTypes.c_char_type) and (self.exception_value.value == '*')))):
error(self.exception_value.pos, 'Exception value must be a Python exception or cdef function with no arguments or *.')
exc_val = self.exception_value
else:
self.exception_value = self.exception_value.coerce_to(return_type, env).analyse_const_expression(env)
exc_val = self.exception_value.get_constant_c_result_code()
if (exc_val is None):
raise InternalError(('get_constant_c_result_code not implemented for %s' % self.exception_value.__class__.__name__))
if (not return_type.assignable_from(self.exception_value.type)):
error(self.exception_value.pos, 'Exception value incompatible with function return type')
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos, 'Function cannot return a function')
func_type = PyrexTypes.CFuncType(return_type, func_type_args, self.has_varargs, optional_arg_count=self.optional_arg_count, exception_value=exc_val, exception_check=exc_check, calling_convention=self.base.calling_convention, nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable, is_const_method=self.is_const_method, templates=self.templates)
if self.optional_arg_count:
if func_type.is_fused:
def declare_opt_arg_struct(func_type, fused_cname):
self.declare_optional_arg_struct(func_type, env, fused_cname)
func_type.declare_opt_arg_struct = declare_opt_arg_struct
else:
self.declare_optional_arg_struct(func_type, env)
callspec = env.directives['callspec']
if callspec:
current = func_type.calling_convention
if (current and (current != callspec)):
error(self.pos, ("cannot have both '%s' and '%s' calling conventions" % (current, callspec)))
func_type.calling_convention = callspec
return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
scope = StructOrUnionScope()
arg_count_member = ('%sn' % Naming.pyrex_prefix)
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[(len(func_type.args) - self.optional_arg_count):]:
scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
if (fused_cname is not None):
struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname)
op_args_struct = env.global_scope().declare_struct_or_union(name=struct_cname, kind='struct', scope=scope, typedef_flag=0, pos=self.pos, cname=struct_cname)
op_args_struct.defined_in_pxd = 1
op_args_struct.used = 1
func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type) |
_fl_task(model='model', data_loader='val_loader', device='device')
def validate(model, val_loader, device):
print(f'''
TASK VALIDATE GOT DEVICE {device}
''')
model.eval()
model.to(device)
AVAIL_GPUS = (1 if ('cuda' in device) else 0)
trainer = Trainer(gpus=AVAIL_GPUS, max_epochs=1, callbacks=[MetricsCallback()])
trainer.validate(model=model, dataloaders=val_loader)
print('validation logged metrics', trainer.logged_metrics)
val_loss = trainer.logged_metrics['Discriminator val loss']
return {'val_loss': val_loss} |
def score(system_conllu_file, gold_conllu_file):
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation['Words']
(p, r, f) = (el.precision, el.recall, el.f1)
return (p, r, f) |
def Distinct(*args):
args = _get_args(args)
ctx = _ctx_from_ast_arg_list(args)
if z3_debug():
_z3_assert((ctx is not None), 'At least one of the arguments must be a Z3 expression')
args = _coerce_expr_list(args, ctx)
(_args, sz) = _to_ast_array(args)
return BoolRef(Z3_mk_distinct(ctx.ref(), sz, _args), ctx) |
class BatchPolopt2(RLAlgorithm, abc.ABC):
def __init__(self, env_spec, policy, baseline, scope=None, max_path_length=500, discount=0.99, gae_lambda=1, center_adv=True, positive_adv=False, fixed_horizon=False, flatten_input=True):
self._env_spec = env_spec
self._policy = policy
self._baseline = baseline
self._scope = scope
self._max_path_length = max_path_length
self._discount = discount
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._fixed_horizon = fixed_horizon
self._flatten_input = flatten_input
self._episode_reward_mean = collections.deque(maxlen=100)
if policy.vectorized:
self._sampler_cls = OnPolicyVectorizedSampler
else:
self._sampler_cls = BatchSampler
self.init_opt()
def max_path_length(self):
return self._max_path_length
def policy(self):
return self._policy
def sampler_cls(self):
return self._sampler_cls
def train(self, runner):
last_return = None
for _ in runner.step_epochs():
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr, runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
paths = self.process_samples(itr, paths)
self.log_diagnostics(paths)
logger.log('Optimizing policy...')
self.optimize_policy(itr, paths)
return paths['average_return']
def log_diagnostics(self, paths):
logger.log('Logging diagnostics...')
self._policy.log_diagnostics(paths)
self._baseline.log_diagnostics(paths)
def process_samples(self, itr, paths):
baselines = []
returns = []
if self._flatten_input:
paths = [dict(observations=self._env_spec.observation_space.flatten_n(path['observations']), actions=self._env_spec.action_space.flatten_n(path['actions']), rewards=path['rewards'], env_infos=path['env_infos'], agent_infos=path['agent_infos']) for path in paths]
else:
paths = [dict(observations=path['observations'], actions=self._env_spec.action_space.flatten_n(path['actions']), rewards=path['rewards'], env_infos=path['env_infos'], agent_infos=path['agent_infos']) for path in paths]
if hasattr(self._baseline, 'predict_n'):
all_path_baselines = self._baseline.predict_n(paths)
else:
all_path_baselines = [self._baseline.predict(path) for path in paths]
for (idx, path) in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = ((path['rewards'] + (self._discount * path_baselines[1:])) - path_baselines[:(- 1)])
path['advantages'] = np_tensor_utils.discount_cumsum(deltas, (self._discount * self._gae_lambda))
path['deltas'] = deltas
path['baselines'] = all_path_baselines[idx]
baselines.append(path['baselines'])
path['returns'] = np_tensor_utils.discount_cumsum(path['rewards'], self._discount)
returns.append(path['returns'])
obs = np.concatenate([path['observations'] for path in paths])
actions = np.concatenate([path['actions'] for path in paths])
rewards = np.concatenate([path['rewards'] for path in paths])
returns = np.concatenate(returns)
baselines = np.concatenate(baselines)
agent_infos_path = [path['agent_infos'] for path in paths]
agent_infos = dict()
for key in self._policy.state_info_keys:
agent_infos[key] = np.concatenate([infos[key] for infos in agent_infos_path])
env_infos_path = [path['env_infos'] for path in paths]
env_infos = dict()
for key in paths[0]['env_infos'].keys():
env_infos[key] = np.concatenate([infos[key] for infos in env_infos_path])
valids = np.asarray([np.ones_like(path['returns']) for path in paths])
lengths = np.asarray([v.sum() for v in valids])
average_discounted_return = np.mean([path['returns'][0] for path in paths])
undiscounted_returns = [sum(path['rewards']) for path in paths]
self._episode_reward_mean.extend(undiscounted_returns)
samples_data = dict(observations=obs, actions=actions, rewards=rewards, baselines=baselines, returns=returns, lengths=lengths, valids=valids, agent_infos=agent_infos, env_infos=env_infos, paths=paths, average_return=np.mean(undiscounted_returns))
tabular.record('Iteration', itr)
tabular.record('AverageDiscountedReturn', average_discounted_return)
tabular.record('AverageReturn', np.mean(undiscounted_returns))
tabular.record('Extras/EpisodeRewardMean', np.mean(self._episode_reward_mean))
tabular.record('NumTrajs', len(paths))
tabular.record('StdReturn', np.std(undiscounted_returns))
tabular.record('MaxReturn', np.max(undiscounted_returns))
tabular.record('MinReturn', np.min(undiscounted_returns))
return samples_data
def init_opt(self):
def get_itr_snapshot(self, itr):
def optimize_policy(self, itr, samples_data): |
def test_initialize_local_classifiers_2(digraph_multiple_roots):
digraph_multiple_roots.local_classifier = None
digraph_multiple_roots._initialize_local_classifiers()
assert isinstance(digraph_multiple_roots.local_classifier_, LogisticRegression) |
.parametrize('input_dim, output_dim, hidden_sizes', plain_settings)
def test_std_network_output_values(input_dim, output_dim, hidden_sizes):
init_std = 2.0
module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = module(torch.ones(input_dim))
exp_mean = torch.full((output_dim,), (input_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
exp_variance = (init_std ** 2)
assert dist.mean.equal(exp_mean)
assert dist.variance.equal(torch.full((output_dim,), exp_variance, dtype=torch.float))
assert (dist.rsample().shape == (output_dim,)) |
class FalconInt8Engine(CausalEngine):
config_name: str = 'falcon_int8_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
super().__init__(model_name='tiiuae/falcon-7b', weights_path=weights_path, load_8bit=True, trust_remote_code=True)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id |
class BertGenerationTokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
def test_statement_replace_3(field_mock, default_test_case):
ref = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(int))
ref_2 = vr.FieldReference(ref, gao.GenericField(default_test_case.test_cluster.type_system.to_type_info(MagicMock), 'foo', default_test_case.test_cluster.type_system.convert_type_hint(int)))
statement = stmt.FieldStatement(default_test_case, field_mock, ref_2)
new = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(int))
statement.replace(ref, new)
assert (statement.source.get_variable_reference() == new) |
class Translator_w_head(nn.Module):
def __init__(self, num_tok, num_tok_out, dim, dim_out, mult=2, depth=5):
super().__init__()
self.trans = translator_tok_dim_v1(num_tok, num_tok_out, dim, dim_out, mult=mult, last_ln=True)
self.tail_1 = translator_tok_dim_v1(num_tok_out, num_tok_out, dim_out, dim_out, mult=mult, last_ln=False)
self.tail_2 = translator_tok_dim_v1(num_tok_out, num_tok_out, dim_out, dim_out, mult=mult, last_ln=False)
self.in_blocks = nn.ModuleList([translator_base_v1(num_tok, dim, dim, mult=mult) for d in range(3)])
self.out_blocks = nn.ModuleList([translator_base_v1(num_tok_out, dim_out, dim_out, mult=mult) for d in range((depth - 3))])
self.gelu = nn.GELU()
def forward(self, x):
for block in self.in_blocks:
x = (block(x) + x)
x = self.gelu(x)
x = self.trans(x)
for block in self.out_blocks:
x = (block(x) + x)
x = self.gelu(x)
x = self.tail_2(self.gelu((self.tail_1(x) + x)))
return x |
def ansi(s, attr):
if ((os.name != 'nt') and sys.stdout.isatty()):
return ((ansi_codes[attr] + str(s)) + ansi_codes['reset'])
else:
return str(s) |
class StoppingCriteria(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def autolevel(image, footprint, out=None, mask=None, shift_x=False, shift_y=False, shift_z=False):
np_image = np.asanyarray(image)
if (np_image.ndim == 2):
return _apply_scalar_per_pixel(generic_cy._autolevel, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
elif (np_image.ndim == 3):
return _apply_scalar_per_pixel_3D(generic_cy._autolevel_3D, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, shift_z=shift_z)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') |
def write_json(fn, output):
try:
j = json.dumps(output, sort_keys=True, indent=4)
with open(fn, 'w', encoding='utf-8') as f:
print(j, file=f)
except Exception as e:
raise sb.errors.SmartBugsError(e) |
class POI(POIarray):
def __init__(self, parameter, value: (int | float)):
if isinstance(value, Collection):
raise TypeError('A single value for the POI is required.')
super().__init__(parameter=parameter, values=[value])
self._value = value
def value(self):
return self._value
def __eq__(self, other):
if (not isinstance(other, POI)):
return NotImplemented
value_equal = (self.value == other.value)
name_equal = (self.name == other.name)
return (value_equal and name_equal)
def __repr__(self):
return f"POI('{self.name}', value={self.value})"
def __hash__(self):
return hash((self.name, self.value)) |
def p_error(token):
if token:
raise LcmParseError('Unable to parse starting from "{}" on line {}'.format(token.value, token.lineno))
else:
raise LcmParseError('Unexpected end of input') |
class spatial_attn_layer(nn.Module):
def __init__(self, kernel_size=3):
super(spatial_attn_layer, self).__init__()
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=((kernel_size - 1) // 2), relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out)
return (x * scale) |
def context_decoder_fn_inference(output_fn, encoder_state, embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, num_decoder_symbols, context_vector, dtype=dtypes.int32, name=None, decode_type='greedy'):
with ops.name_scope(name, 'simple_decoder_fn_inference', [output_fn, encoder_state, embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, num_decoder_symbols, dtype]):
start_of_sequence_id = ops.convert_to_tensor(start_of_sequence_id, dtype)
end_of_sequence_id = ops.convert_to_tensor(end_of_sequence_id, dtype)
maxium_length_int = (maximum_length + 1)
maximum_length = ops.convert_to_tensor(maximum_length, dtype)
num_decoder_symbols = ops.convert_to_tensor(num_decoder_symbols, dtype)
encoder_info = nest.flatten(encoder_state)[0]
batch_size = encoder_info.get_shape()[0].value
if (output_fn is None):
output_fn = (lambda x: x)
if (batch_size is None):
batch_size = array_ops.shape(encoder_info)[0]
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
with ops.name_scope(name, 'simple_decoder_fn_inference', [time, cell_state, cell_input, cell_output, context_state]):
if (cell_input is not None):
raise ValueError(('Expected cell_input to be None, but saw: %s' % cell_input))
if (cell_output is None):
next_input_id = (array_ops.ones([batch_size], dtype=dtype) * start_of_sequence_id)
done = array_ops.zeros([batch_size], dtype=dtypes.bool)
cell_state = encoder_state
cell_output = array_ops.zeros([num_decoder_symbols], dtype=dtypes.float32)
context_state = tf.zeros((batch_size, maxium_length_int), dtype=tf.int32)
else:
cell_output = output_fn(cell_output)
if (decode_type == 'sample'):
matrix_U = ((- 1.0) * tf.log(((- 1.0) * tf.log(tf.random_uniform(tf.shape(cell_output), minval=0.0, maxval=1.0)))))
next_input_id = math_ops.cast(tf.argmax(tf.subtract(cell_output, matrix_U), dimension=1), dtype=dtype)
elif (decode_type == 'greedy'):
next_input_id = math_ops.cast(math_ops.argmax(cell_output, 1), dtype=dtype)
else:
raise ValueError('unknown decode type')
done = math_ops.equal(next_input_id, end_of_sequence_id)
expanded_next_input = tf.expand_dims(next_input_id, axis=1)
sliced_context_state = tf.slice(context_state, [0, 0], [(- 1), (maxium_length_int - 1)])
context_state = tf.concat([expanded_next_input, sliced_context_state], axis=1)
context_state = tf.reshape(context_state, [batch_size, maxium_length_int])
next_input = array_ops.gather(embeddings, next_input_id)
if (context_vector is not None):
next_input = tf.concat([next_input, context_vector], axis=1)
done = control_flow_ops.cond(math_ops.greater(time, maximum_length), (lambda : array_ops.ones([batch_size], dtype=dtypes.bool)), (lambda : done))
return (done, cell_state, next_input, cell_output, context_state)
return decoder_fn |
def threshold_predictions(y, threshold):
y_out = np.zeros_like(y)
for (ind, pred) in enumerate(y):
y_out[ind] = (1 if (pred > threshold) else 0)
return y_out |
_kl(Beta, Uniform)
def _kl_beta_uniform(p, q):
result = ((- p.entropy()) + (q.high - q.low).log())
result[((q.low > p.support.lower_bound) | (q.high < p.support.upper_bound))] = inf
return result |
def img_to_ndarray(arr: ti.types.ndarray()):
for I in grouped(img):
for c in range(img_c):
arr[(I, c)] = img[I] |
class NodeDataLoader():
def __init__(self, data: Data, stage: Stage, batch_size: Union[(int, Literal['full'])]='full', hops: Optional[int]=None, shuffle: bool=True, drop_last: bool=False, poisson_sampling: bool=False):
self.data = data
self.stage = stage
self.batch_size = batch_size
self.hops = hops
self.shuffle = shuffle
self.drop_last = drop_last
self.poisson_sampling = poisson_sampling
self.device = data.x.device
if (batch_size != 'full'):
self.node_indices = data[f'{stage}_mask'].nonzero().view((- 1))
self.num_nodes = self.node_indices.size(0)
def __iter__(self) -> Iterator[Data]:
if (self.batch_size == 'full'):
(yield self.data)
return
if (self.shuffle and (not self.poisson_sampling)):
perm = torch.randperm(self.num_nodes, device=self.device)
self.node_indices = self.node_indices[perm]
for i in range(0, self.num_nodes, self.batch_size):
if (self.drop_last and ((i + self.batch_size) > self.num_nodes)):
break
if self.poisson_sampling:
sampling_prob = (self.batch_size / self.num_nodes)
sample_mask = (torch.rand(self.num_nodes, device=self.device) < sampling_prob)
batch_nodes = self.node_indices[sample_mask]
else:
batch_nodes = self.node_indices[i:(i + self.batch_size)]
if (self.hops is None):
batch_mask = torch.zeros(self.data.num_nodes, device=self.device, dtype=torch.bool)
batch_mask[batch_nodes] = True
data = Data(**self.data.to_dict())
data[f'{self.stage}_mask'] = (data[f'{self.stage}_mask'] & batch_mask)
else:
if (not hasattr(self, 'edge_index')):
self.edge_index = torch.stack(self.data.adj_t.t().coo()[:2], dim=0)
(subset, batch_edge_index, mapping, _) = k_hop_subgraph(node_idx=batch_nodes, num_hops=self.hops, edge_index=self.edge_index, relabel_nodes=True, num_nodes=self.data.num_nodes)
batch_mask = torch.zeros(subset.size(0), device=self.device, dtype=torch.bool)
batch_mask[mapping] = True
data = Data(x=self.data.x[subset], y=self.data.y[subset], edge_index=batch_edge_index)
data[f'{self.stage}_mask'] = batch_mask
data = ToSparseTensor()(data)
(yield data)
def __len__(self) -> int:
if (self.batch_size == 'full'):
return 1
elif self.drop_last:
return (self.num_nodes // self.batch_size)
else:
return (((self.num_nodes + self.batch_size) - 1) // self.batch_size) |
class GeneralAddAttConvLayer(MessagePassing):
def __init__(self, in_channels, out_channels, improved=False, cached=False, bias=True, **kwargs):
super(GeneralAddAttConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)
self.heads = cfg.gnn.att_heads
self.in_channels = int(((in_channels // self.heads) * self.heads))
self.out_channels = int(((out_channels // self.heads) * self.heads))
self.improved = improved
self.cached = cached
self.normalize = cfg.gnn.normalize_adj
self.negative_slope = 0.2
self.head_channels = (out_channels // self.heads)
self.scaling = (self.head_channels ** (- 0.5))
self.linear_msg = nn.Linear(in_channels, out_channels, bias=False)
self.att = Parameter(torch.Tensor(1, self.heads, (2 * self.head_channels)))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.att)
zeros(self.bias)
self.cached_result = None
self.cached_num_edges = None
def norm(edge_index, num_nodes, edge_weight=None, improved=False, dtype=None):
if (edge_weight is None):
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype, device=edge_index.device)
fill_value = (1.0 if (not improved) else 2.0)
(edge_index, edge_weight) = add_remaining_self_loops(edge_index, edge_weight, fill_value, num_nodes)
(row, col) = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow((- 0.5))
deg_inv_sqrt[(deg_inv_sqrt == float('inf'))] = 0
return (edge_index, ((deg_inv_sqrt[row] * edge_weight) * deg_inv_sqrt[col]))
def forward(self, x, edge_index, edge_weight=None):
if (self.cached and (self.cached_result is not None)):
if (edge_index.size(1) != self.cached_num_edges):
raise RuntimeError('Cached {} number of edges, but found {}. Please disable the caching behavior of this layer by removing the `cached=True` argument in its constructor.'.format(self.cached_num_edges, edge_index.size(1)))
if ((not self.cached) or (self.cached_result is None)):
self.cached_num_edges = edge_index.size(1)
if self.normalize:
(edge_index, norm) = self.norm(edge_index, x.size(self.node_dim), edge_weight, self.improved, x.dtype)
else:
norm = edge_weight
self.cached_result = (edge_index, norm)
(edge_index, norm) = self.cached_result
x = self.linear_msg(x)
return self.propagate(edge_index, x=x, norm=norm)
def message(self, edge_index_i, x_i, x_j, norm, size_i):
x_i = x_i.view((- 1), self.heads, self.head_channels)
x_j = x_j.view((- 1), self.heads, self.head_channels)
alpha = (torch.cat([x_i, x_j], dim=(- 1)) * self.att).sum(dim=(- 1))
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index_i, num_nodes=size_i)
alpha = alpha.view((- 1), self.heads, 1)
return (((norm.view((- 1), 1) * x_j) * alpha) if (norm is not None) else (x_j * alpha))
def update(self, aggr_out):
aggr_out = aggr_out.view((- 1), self.out_channels)
if (self.bias is not None):
aggr_out = (aggr_out + self.bias)
return aggr_out
def __repr__(self):
return '{}({}, {}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.heads) |
def _replace_tone2_style_dict_to_default(string):
regex = re.compile(RE_TONE2.pattern.replace('$', ''))
d = phonetic_symbol.phonetic_symbol_reverse
def _replace(m):
s = m.group(0)
return (d.get(s) or s)
return regex.sub(_replace, string) |
def on_mouse_motion(x, y, dx, dy):
action[0][0] = (((x / 1920) - 0.5) * 2)
action[0][1] = (((y / 1080) - 0.5) * 2) |
class NonNegativeIntegers(UniqueRepresentation, Parent):
def __init__(self, category=None):
from sage.rings.integer_ring import ZZ
Parent.__init__(self, facade=ZZ, category=InfiniteEnumeratedSets().or_subcategory(category))
def _repr_(self):
return 'Non negative integers'
def __contains__(self, elt):
try:
i = Integer(elt)
return ((i >= Integer(0)) and (i == elt))
except (TypeError, ValueError):
return False
def _element_constructor_(self, i):
if (i in self):
return self.from_integer(i)
raise ValueError(('Value %s in not in %s.' % (i, self)))
from_integer = Integer
Element = Integer
def __iter__(self):
i = 0
while True:
(yield self.from_integer(i))
i += 1
def an_element(self):
return self.from_integer(Integer(42))
def some_elements(self):
return [Integer(0), Integer(1), Integer(3), Integer(42)]
def next(self, o):
return self.from_integer((o + 1))
def unrank(self, rnk):
return self.from_integer(rnk)
def _sympy_(self):
from sympy import Naturals0
from sage.interfaces.sympy import sympy_init
sympy_init()
return Naturals0 |
(scope='module', autouse=True)
def to_hdf_buffer(hdf_file_path, simulation_verysimple):
simulation_verysimple.simulation_state.to_hdf(hdf_file_path, overwrite=True) |
.mpi
def test_redistribute_matrix_2d_2d_2():
P = dace.symbol('P', dace.int32)
def matrix_2d_2d_2(A: dace.int32[((4 * P), 16)]):
a_grid = dace.comm.Cart_create([2, (P // 2)])
b_grid = dace.comm.Cart_create([P, 1])
B = np.empty_like(A, shape=(8, (8 * P)))
a_arr = dace.comm.Subarray(((8 * P), (8 * P)), A, process_grid=a_grid)
b_arr = dace.comm.Subarray(((8 * P), (8 * P)), B, process_grid=b_grid)
rdistr = dace.comm.Redistribute(A, a_arr, B, b_arr)
return B
from mpi4py import MPI
commworld = MPI.COMM_WORLD
rank = commworld.Get_rank()
size = commworld.Get_size()
even_size = ((size // 2) * 2)
if (size < 2):
raise ValueError('Please run this test with at least two processes.')
sdfg = None
if (rank == 0):
sdfg = matrix_2d_2d_2.to_sdfg()
func = utils.distributed_compile(sdfg, commworld)
A = np.arange(((64 * even_size) * even_size), dtype=np.int32).reshape((8 * even_size), (8 * even_size))
lA = A.reshape(2, (4 * even_size), (even_size // 2), 16).transpose(0, 2, 1, 3)
lB = A.reshape(even_size, 8, 1, (8 * even_size)).transpose(0, 2, 1, 3)
if (rank < even_size):
B = func(A=lA[((rank // (even_size // 2)), (rank % (even_size // 2)))].copy(), P=even_size)
else:
B = func(A=np.zeros((1,), dtype=np.int32), P=even_size)
if (rank < even_size):
assert np.array_equal(B, lB[(rank, 0)]) |
def main(outdir):
pwd = os.path.dirname(__file__)
src_files = (os.path.abspath(__file__), os.path.abspath(os.path.join(pwd, 'functions.json')), os.path.abspath(os.path.join(pwd, '_add_newdocs.py')))
dst_files = ('_ufuncs.pyx', '_ufuncs_defs.h', '_ufuncs_cxx.pyx', '_ufuncs_cxx.pxd', '_ufuncs_cxx_defs.h', '_ufuncs.pyi', 'cython_special.pyx', 'cython_special.pxd')
dst_files = (os.path.join(outdir, f) for f in dst_files)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print('scipy/special/_generate_pyx.py: all files up-to-date')
return
(ufuncs, fused_funcs) = ([], [])
with open('functions.json') as data:
functions = json.load(data)
for (f, sig) in functions.items():
ufuncs.append(Ufunc(f, sig))
fused_funcs.append(FusedFunc(f, sig))
generate_ufuncs(os.path.join(outdir, '_ufuncs'), os.path.join(outdir, '_ufuncs_cxx'), ufuncs)
generate_ufuncs_type_stubs(os.path.join(outdir, '_ufuncs'), ufuncs)
generate_fused_funcs(os.path.join(outdir, 'cython_special'), os.path.join(outdir, '_ufuncs'), fused_funcs) |
def train(hparams, run_opts):
if (hparams['pretrained_wavlm_path'] is not None):
hparams['wavlm'].load_state_dict(torch.load(hparams['pretrained_wavlm_path']))
test(hparams, run_opts, hparams['base_locales'], f'wer_test_before.txt')
for (i, locale) in enumerate(hparams['new_locales']):
old_mas_params = hparams.pop('mas_params', None)
if (not hparams['skip_mas']):
if (i == 0):
mas_params = compute_mas_params(hparams, run_opts, hparams['base_locales'])
else:
mas_params = compute_mas_params(hparams, run_opts, [hparams['new_locales'][(i - 1)]])
for name in mas_params[1]:
if (name in old_mas_params[1]):
old_importance = old_mas_params[1][name]
mas_params[1][name] *= (1 - hparams['mas_alpha'])
mas_params[1][name] += (hparams['mas_alpha'] * old_importance)
hparams['mas_params'] = mas_params
run_on_main(prepare_common_voice, kwargs={'locales': [locale], 'data_folder': hparams['data_folder'], 'max_durations': hparams['max_durations']})
tokenizer = hparams['wavlm'].tokenizer
logging.info(f"Total number of tokens: {hparams['wavlm'].model.decoder.out_proj.out_features}")
(train_data, valid_data, _) = dataio_prepare(hparams, tokenizer)
checkpoint_folder = os.path.join(hparams['save_folder'], locale)
os.makedirs(checkpoint_folder, exist_ok=True)
hparams['checkpointer'].checkpoints_dir = pathlib.Path(checkpoint_folder)
hparams['lr_annealing'].hyperparam_value = hparams['lr']
hparams['lr_annealing'].metric_values.clear()
hparams['lr_annealing'].current_patient = 0
asr_brain = ASR(modules=hparams['modules'], hparams=hparams, run_opts=run_opts, opt_class=hparams['opt_class'], checkpointer=hparams['checkpointer'])
asr_brain.tokenizer = tokenizer
hparams['valid_dataloader_kwargs'].pop('ckpt_prefix', None)
hparams['epoch_counter'].current = 0
asr_brain.fit(hparams['epoch_counter'], train_data, valid_data, train_loader_kwargs=hparams['train_dataloader_kwargs'], valid_loader_kwargs=hparams['valid_dataloader_kwargs'])
test(hparams, run_opts, (hparams['base_locales'] + hparams['new_locales'][:(i + 1)]), f'wer_test_after_{locale}.txt') |
class TypeTracerArray(NDArrayOperatorsMixin, ArrayLike):
_dtype: numpy.dtype
_shape: tuple[(ShapeItem, ...)]
def __new__(cls, *args, **kwargs):
raise TypeError("internal_error: the `TypeTracer` nplike's `TypeTracerArray` object should never be directly instantiated")
def __reduce__(self):
return (object.__new__, (type(self),), vars(self))
def _new(cls, dtype: DType, shape: tuple[(ShapeItem, ...)], form_key: (str | None)=None, report: (TypeTracerReport | None)=None):
self = super().__new__(cls)
self.form_key = form_key
self.report = report
if (not isinstance(shape, tuple)):
raise TypeError('typetracer shape must be a tuple')
if (not all(((isinstance(x, int) or (x is unknown_length)) for x in shape))):
raise TypeError('typetracer shape must be integers or unknown-length')
if (not isinstance(dtype, np.dtype)):
raise TypeError('typetracer dtype must be an instance of np.dtype')
self._shape = shape
self._dtype = dtype
return self
def __repr__(self):
dtype = repr(self._dtype)
if (self.shape is None):
shape = ''
else:
shape = (', shape=' + repr(self._shape))
return f'TypeTracerArray({dtype}{shape})'
def __str__(self):
if (self.ndim == 0):
return '##'
else:
return repr(self)
def T(self) -> Self:
return TypeTracerArray._new(self.dtype, self._shape[::(- 1)], self.form_key, self.report)
def dtype(self) -> DType:
return self._dtype
def size(self) -> ShapeItem:
size: ShapeItem = 1
for item in self._shape:
size *= item
return size
def shape(self) -> tuple[(ShapeItem, ...)]:
self.touch_shape()
return self._shape
def form_key(self) -> (str | None):
return self._form_key
_key.setter
def form_key(self, value: (str | None)):
if ((value is not None) and (not isinstance(value, str))):
raise TypeError('form_key must be None or a string')
self._form_key = value
def report(self) -> (TypeTracerReport | None):
return self._report
def report(self, value: (TypeTracerReport | None)):
if ((value is not None) and (not isinstance(value, TypeTracerReport))):
raise TypeError('report must be None or a TypeTracerReport')
self._report = value
def touch_shape(self):
if (self._report is not None):
self._report.touch_shape(self._form_key)
def touch_data(self):
if (self._report is not None):
self._report.touch_data(self._form_key)
def nplike(self) -> TypeTracer:
return TypeTracer.instance()
def ndim(self) -> int:
return len(self._shape)
def nbytes(self) -> ShapeItem:
return (self.size * self._dtype.itemsize)
def view(self, dtype: DTypeLike) -> Self:
dtype = np.dtype(dtype)
if (len(self._shape) >= 1):
(last, remainder) = divmod((self._shape[(- 1)] * self._dtype.itemsize), dtype.itemsize)
if ((remainder is not unknown_length) and (remainder != 0)):
raise ValueError('new size of array with larger dtype must be a divisor of the total size in bytes (of the last axis of the array)')
shape = (self._shape[:(- 1)] + (last,))
else:
shape = self._shape
return self._new(dtype, shape=shape, form_key=self._form_key, report=self._report)
def forget_length(self) -> Self:
return self._new(self._dtype, ((unknown_length,) + self._shape[1:]), self._form_key, self._report)
def __iter__(self):
raise AssertionError('bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array')
def __array__(self, dtype=None):
raise AssertionError('bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array')
class _CTypes():
data = 0
def ctypes(self):
return self._CTypes
def __len__(self):
raise AssertionError('bug in Awkward Array: attempt to get length of a TypeTracerArray')
def __getitem__(self, key: ((((SupportsIndex | slice) | EllipsisType) | tuple[((((SupportsIndex | slice) | EllipsisType) | ArrayLike), ...)]) | ArrayLike)) -> Self:
if (not isinstance(key, tuple)):
key = (key,)
has_seen_ellipsis = 0
n_basic_non_ellipsis = 0
n_advanced = 0
for item in key:
if (isinstance(item, (slice, int)) or is_unknown_integer(item)):
n_basic_non_ellipsis += 1
elif (isinstance(item, TypeTracerArray) and (np.issubdtype(item.dtype, np.integer) or np.issubdtype(item.dtype, np.bool_))):
n_advanced += 1
elif (item is Ellipsis):
if (not has_seen_ellipsis):
has_seen_ellipsis = True
else:
raise NotImplementedError('only one ellipsis value permitted for advanced index')
elif (item is np.newaxis):
pass
else:
raise NotImplementedError('only integer, unknown scalar, slice, ellipsis, or array indices are permitted')
n_dim_index = (n_basic_non_ellipsis + n_advanced)
if (n_dim_index > self.ndim):
raise IndexError(f'too many indices for array: array is {self.ndim}-dimensional, but {n_dim_index} were indexed')
key_parts: list[((SupportsIndex | slice) | ArrayLike)] = []
for item in key:
if (item is Ellipsis):
n_missing_dims = (self.ndim - n_dim_index)
key_parts.extend(((slice(None),) * n_missing_dims))
elif (is_unknown_array(item) and np.issubdtype(item, np.bool_)):
key_parts.append(self.nplike.nonzero(item)[0])
else:
key_parts.append(item)
key = tuple(key_parts)
advanced_is_at_front = False
previous_item_is_basic = True
advanced_shapes: list[tuple[(ShapeItem, ...)]] = []
adjacent_advanced_shape: list[ShapeItem] = []
result_shape_parts: list[Sequence[ShapeItem]] = []
iter_shape = iter(self.shape)
for item in key:
if (item is np.newaxis):
result_shape_parts.append((1,))
previous_item_is_basic = True
else:
dimension_length = next(iter_shape)
if (n_advanced and (isinstance(item, int) or is_unknown_integer(item) or is_unknown_array(item))):
try_touch_data(item)
try_touch_data(self)
item = self.nplike.asarray(item)
if (not advanced_shapes):
result_shape_parts.append(adjacent_advanced_shape)
elif previous_item_is_basic:
advanced_is_at_front = True
advanced_shapes.append(item.shape)
previous_item_is_basic = False
elif isinstance(item, slice):
(start, stop, step, slice_length) = self.nplike.derive_slice_for_length(item, dimension_length)
result_shape_parts.append((slice_length,))
previous_item_is_basic = True
elif (isinstance(item, int) or is_unknown_integer(item)):
try_touch_data(item)
try_touch_data(self)
item = self.nplike.asarray(item)
if (is_unknown_length(dimension_length) or is_unknown_integer(item)):
continue
if (not (0 <= item < dimension_length)):
raise NotImplementedError('integer index out of bounds')
advanced_shape = self.nplike.broadcast_shapes(*advanced_shapes)
if advanced_is_at_front:
result_shape_parts.insert(0, advanced_shape)
else:
adjacent_advanced_shape[:] = advanced_shape
broadcast_shape = tuple((i for p in result_shape_parts for i in p))
result_shape = (broadcast_shape + tuple(iter_shape))
return self._new(self._dtype, result_shape, self._form_key, self._report)
def __setitem__(self, key: ((((SupportsIndex | slice) | EllipsisType) | tuple[((((SupportsIndex | slice) | EllipsisType) | ArrayLike), ...)]) | ArrayLike), value: ((((int | float) | bool) | complex) | ArrayLike)):
existing_value = self.__getitem__(key)
if (isinstance(value, TypeTracerArray) and (value.ndim > existing_value.ndim)):
raise ValueError('cannot assign shape larger than destination')
def copy(self):
self.touch_data()
return self
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
kwargs.pop('out', None)
if (len(kwargs) > 0):
raise ValueError('TypeTracerArray does not support kwargs for ufuncs')
return self.nplike.apply_ufunc(ufunc, method, inputs, kwargs)
def __bool__(self) -> bool:
raise RuntimeError('cannot realise an unknown value')
def __int__(self) -> int:
raise RuntimeError('cannot realise an unknown value')
def __index__(self) -> int:
raise RuntimeError('cannot realise an unknown value')
def __dlpack_device__(self) -> tuple[(int, int)]:
raise RuntimeError('cannot realise an unknown value')
def __dlpack__(self, stream: Any=None) -> Any:
raise RuntimeError('cannot realise an unknown value') |
class val_Dataset():
def __init__(self, img_list):
self.img_path = opt.path_img
self.img_list = img_list
return
def __getitem__(self, idx):
case_name = self.img_list[idx]
(crop_img, pos_list, tmp_mask) = in_model.get_val_img(self.img_path, case_name)
return_list = [case_name, crop_img, tmp_mask, pos_list]
return return_list
def __len__(self):
return len(self.img_list) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.