code stringlengths 101 5.91M |
|---|
class Conv1_1_Branch(nn.Module):
def __init__(self, in_ch, block_ch):
super(Conv1_1_Branch, self).__init__()
self.conv1_1 = nn.Sequential(nn.Conv1d(in_channels=in_ch, out_channels=block_ch, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm1d(block_ch, affine=False, track_running_stats=True), nn.ReLU6(inplace=True))
def forward(self, x):
return self.conv1_1(x) |
_REGISTRY.register()
class DescribableTextures(DatasetBase):
dataset_dir = 'dtd'
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, 'images')
self.split_path = os.path.join(self.dataset_dir, 'split_zhou_DescribableTextures.json')
self.split_fewshot_dir = os.path.join(self.dataset_dir, 'split_fewshot')
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
(train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir)
else:
(train, val, test) = self.read_and_split_data(self.image_dir)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
if (num_shots >= 1):
seed = cfg.SEED
preprocessed = os.path.join(self.split_fewshot_dir, f'shot_{num_shots}-seed_{seed}.pkl')
if os.path.exists(preprocessed):
print(f'Loading preprocessed few-shot data from {preprocessed}')
with open(preprocessed, 'rb') as file:
data = pickle.load(file)
(train, val) = (data['train'], data['val'])
else:
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
data = {'train': train, 'val': val}
print(f'Saving preprocessed few-shot data to {preprocessed}')
with open(preprocessed, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
subsample = cfg.DATASET.SUBSAMPLE_CLASSES
(train, val, test) = OxfordPets.subsample_classes(train, val, test, subsample=subsample)
super().__init__(train_x=train, val=val, test=test)
def read_and_split_data(image_dir, p_trn=0.5, p_val=0.2, ignored=[], new_cnames=None):
categories = listdir_nohidden(image_dir)
categories = [c for c in categories if (c not in ignored)]
categories.sort()
p_tst = ((1 - p_trn) - p_val)
print(f'Splitting into {p_trn:.0%} train, {p_val:.0%} val, and {p_tst:.0%} test')
def _collate(ims, y, c):
items = []
for im in ims:
item = Datum(impath=im, label=y, classname=c)
items.append(item)
return items
(train, val, test) = ([], [], [])
for (label, category) in enumerate(categories):
category_dir = os.path.join(image_dir, category)
images = listdir_nohidden(category_dir)
images = [os.path.join(category_dir, im) for im in images]
random.shuffle(images)
n_total = len(images)
n_train = round((n_total * p_trn))
n_val = round((n_total * p_val))
n_test = ((n_total - n_train) - n_val)
assert ((n_train > 0) and (n_val > 0) and (n_test > 0))
if ((new_cnames is not None) and (category in new_cnames)):
category = new_cnames[category]
train.extend(_collate(images[:n_train], label, category))
val.extend(_collate(images[n_train:(n_train + n_val)], label, category))
test.extend(_collate(images[(n_train + n_val):], label, category))
return (train, val, test) |
def safe_log(a: Tensor, *, eps: Optional[float]=None) -> Tensor:
if (eps is None):
eps = {'float16': 6e-08, 'bfloat16': 9.1835e-41, 'float32': 1.4013e-45, 'float64': 5e-324}[a.dtype]
return a._raw_backend.safe_log(a, eps=eps) |
class Binarizer():
def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=(- 1)):
(nseq, ntok) = (0, 0)
replaced = Counter()
def replaced_consumer(word, idx):
if ((idx == dict.unk_index) and (word != dict.unk_word)):
replaced.update([word])
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if ((end > 0) and (f.tell() > end)):
break
ids = dict.encode_line(line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
def find_offsets(filename, num_chunks):
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = (size // num_chunks)
offsets = [0 for _ in range((num_chunks + 1))]
for i in range(1, num_chunks):
f.seek((chunk_size * i))
safe_readline(f)
offsets[i] = f.tell()
return offsets |
def parse_args():
parser = argparse.ArgumentParser('Generating annotations for spatial 3D reference (Sr3D).')
parser.add_argument('-preprocessed_scannet_file', type=str, help='.pkl (output) of prepare_scannet_data.py', required=True)
parser.add_argument('-valid_targets_file', type=str, help='.txt file describing (one line per) the target classes for which we will make language.', required=True)
parser.add_argument('-save_dir', type=str, help='top-dir to save the results.', required=True)
parser.add_argument('-name', type=str, help='Name of the gerneated sr3d csv file.', required=True)
parser.add_argument('--verbose', type=str2bool, default=True, help='verbose')
parser.add_argument('--stimulus_is_too_hard', type=int, default=6, help='ignore cases that include more than `this` contrasting, same-class objects.')
parser.add_argument('--max_samples_per_context', type=int, default=1, help='max number of utterances to produce per context/reference-type (by simple sampling linguistic variations, see: SYNTHETIC_TO_HUMAN_LANGUAGE_FILE).')
parser.add_argument('--random_seed', type=int, default=3, help='seed used in sampling template utterances for each reference.')
parser.add_argument('--targets-must-be-multiple', type=str2bool, default='false')
parser.add_argument('--anchor_must_be_unique', type=str2bool, default='true')
args = parser.parse_args()
args_string = pprint.pformat(vars(args))
print(args_string)
if (not osp.isdir(args.save_dir)):
os.makedirs(args.save_dir)
with open(osp.join(args.save_dir, 'sr3d_configs.json.txt'), 'w') as fout:
json.dump(vars(args), fout, indent=4, sort_keys=True)
return args |
class DonutSwinModelTester():
def __init__(self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-05, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
self.encoder_stride = encoder_stride
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return (config, pixel_values, labels)
def get_config(self):
return DonutSwinConfig(image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride)
def create_and_check_model(self, config, pixel_values, labels):
model = DonutSwinModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_seq_len = (((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)))
expected_dim = int((config.embed_dim * (2 ** (len(config.depths) - 1))))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, pixel_values, labels) = config_and_inputs
inputs_dict = {'pixel_values': pixel_values}
return (config, inputs_dict) |
def parse_args():
import argparse
parser = argparse.ArgumentParser('Script for subsampling particles from a coordinates table')
parser.add_argument('file', help='path to particle coordinates file')
parser.add_argument('-n', '--number', type=int, help='number of particles to sample')
parser.add_argument('--seed', default=0, type=int, help='random seed for sampling')
return parser.parse_args() |
def compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale, skip_gpipe_seq=False):
if (not skip_gpipe_seq):
time_to_best_result(seq_gpipe_dict, virtual_stale_dict, seq_gpipe_times, virtual_times_stale, slow_alg_name='gpipe_seq', fast_alg_name='stale_mixed')
print('epoch_speedup', epoch_speedup_from_cumsum_times(seq_gpipe_times, virtual_times_stale))
time_to_best_result(seq_gpipe_dict, virtual_gpipe_dict, seq_gpipe_times, virtual_times_gpipe, slow_alg_name='gpipe_seq', fast_alg_name='gpipe_mixed')
print('epoch_speedup', epoch_speedup_from_cumsum_times(seq_gpipe_times, virtual_times_gpipe))
time_to_best_result(virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale, slow_alg_name='gpipe_mixed', fast_alg_name='stale_mixed')
print('epoch_speedup', epoch_speedup_from_cumsum_times(virtual_times_gpipe, virtual_times_stale))
time_to_best_result(seq_stale_dict, virtual_stale_dict, seq_stale_times, virtual_times_stale, slow_alg_name='stale_seq', fast_alg_name='stale_mixed')
print('epoch_speedup', epoch_speedup_from_cumsum_times(seq_stale_times, virtual_times_stale)) |
class Annotator(Callback):
def __init__(self, cfg):
self.envs = None
self.cfg = cfg
self.device = None
self.lang_folder = cfg.lang_folder
self.tasks = hydra.utils.instantiate(cfg.callbacks.rollout.tasks)
self.demo_task_counter_train = Counter()
self.demo_task_counter_val = Counter()
self.train_dataset = None
self.val_dataset = None
self.file_name = 'auto_lang_ann.npy'
self.train_lang_folder = None
self.val_lang_folder = None
self.collected_data_train = {'language': {'ann': [], 'task': [], 'emb': []}, 'info': {'episodes': [], 'indx': []}}
self.collected_data_val = {'language': {'ann': [], 'task': [], 'emb': []}, 'info': {'episodes': [], 'indx': []}}
self.lang_model = None
self.num_samples_train = None
self.num_samples_val = None
self.finished_annotation_val = False
self.scene_idx_info = None
_zero_only
def create_folders(self):
self.train_lang_folder = (self.train_dataset.abs_datasets_dir / self.lang_folder)
self.train_lang_folder.mkdir(parents=True, exist_ok=True)
self.val_lang_folder = (self.val_dataset.abs_datasets_dir / self.lang_folder)
self.val_lang_folder.mkdir(parents=True, exist_ok=True)
_zero_only
def compute_val_embeddings(self):
val_sent = self.cfg.val_instructions
embeddings = {}
for (task, ann) in val_sent.items():
embeddings[task] = {}
language_embedding = self.lang_model(list(ann))
embeddings[task]['emb'] = language_embedding.cpu().numpy()
embeddings[task]['ann'] = ann
np.save((self.val_lang_folder / 'embeddings'), embeddings)
logger.info('Done saving val language embeddings for Rollouts !')
def init_vars(self, trainer, pl_module):
self.device = pl_module.device
self.val_dataset = trainer.val_dataloaders[0].dataset.datasets['vis']
self.train_dataset = trainer.train_dataloader.dataset.datasets['vis']
self.scene_idx_info = np.load((self.train_dataset.abs_datasets_dir / 'scene_info.npy'), allow_pickle=True).item()
self.envs = {scene: hydra.utils.instantiate(self.cfg.callbacks.rollout.env_cfg, self.val_dataset, pl_module.device, scene=scene) for (scene, _) in self.scene_idx_info.items()}
if (self.cfg.validation_scene not in self.envs):
self.envs[self.cfg.validation_scene] = hydra.utils.instantiate(self.cfg.callbacks.rollout.env_cfg, self.val_dataset, pl_module.device, scene=self.cfg.validation_scene, cameras=())
self.create_folders()
self.lang_model = hydra.utils.instantiate(self.cfg.model)
self.compute_val_embeddings()
self.num_samples_train = int(((self.cfg.eps * len(self.train_dataset)) / len(self.cfg.train_instructions.keys())))
self.num_samples_val = int(((self.cfg.eps * len(self.val_dataset)) / len(self.cfg.train_instructions.keys())))
def on_validation_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
if (self.envs is None):
self.init_vars(trainer, pl_module)
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
if (self.envs is None):
self.init_vars(trainer, pl_module)
def on_validation_batch_end(self, trainer: Trainer, pl_module: LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
batch = (batch['vis'] if isinstance(batch, dict) else batch)
(self.collected_data_val, self.demo_task_counter_val, current_task_counter) = self.annotate(batch, self.val_dataset, self.collected_data_val, self.demo_task_counter_val, self.num_samples_val)
if (dist.is_available() and dist.is_initialized()):
global_counters = [None for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather_object(global_counters, current_task_counter)
current_task_counter = reduce(add, global_counters)
self.demo_task_counter_val += current_task_counter
if self.check_done(self.demo_task_counter_val, self.num_samples_val, batch_idx, trainer.num_val_batches[0], 'val'):
print()
print()
print()
logger.info('Finished annotating val dataset')
print()
print()
print()
self.finished_annotation_val = True
def on_train_batch_end(self, trainer: Trainer, pl_module: LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
batch = (batch['vis'] if isinstance(batch, dict) else batch)
(self.collected_data_train, self.demo_task_counter_train, current_task_counter) = self.annotate(batch, self.train_dataset, self.collected_data_train, self.demo_task_counter_train, self.num_samples_train)
if (dist.is_available() and dist.is_initialized()):
global_counters = [None for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather_object(global_counters, current_task_counter)
current_task_counter = reduce(add, global_counters)
self.demo_task_counter_train += current_task_counter
if self.check_done(self.demo_task_counter_train, self.num_samples_train, batch_idx, trainer.num_training_batches, 'train'):
print()
print()
print()
log_rank_0('Finished annotating train dataset')
print()
print()
print()
pl_module.finished_annotation_train = True
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, unused: Optional[int]=None) -> None:
self.save_and_postprocess(self.collected_data_train, self.train_lang_folder, 'train', len(self.train_dataset))
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
self.save_and_postprocess(self.collected_data_val, self.val_lang_folder, 'val', len(self.val_dataset))
def save_and_postprocess(self, collected_data, lang_folder, mod, length):
if (dist.is_available() and dist.is_initialized()):
global_collected_data = [None for _ in range(dist.get_world_size())]
torch.distributed.all_gather_object(global_collected_data, collected_data)
if (dist.get_rank() == 0):
global_collected_data = merge_data(global_collected_data)
np.save('lang_ann', global_collected_data)
else:
np.save('lang_ann', collected_data)
if self.cfg.postprocessing:
language = collected_data['language']['ann']
language_embedding = self.lang_model(language)
collected_data['language']['emb'] = language_embedding.cpu().numpy()
logger.info(f'Done extracting {mod} language embeddings !')
if (dist.is_available() and dist.is_initialized()):
global_collected_data = [None for _ in range(dist.get_world_size())]
torch.distributed.all_gather_object(global_collected_data, collected_data)
if (dist.get_rank() != 0):
return
collected_data = merge_data(global_collected_data)
np.save(self.file_name, collected_data)
np.save((lang_folder / self.file_name), collected_data)
logger.info(f'Done saving {mod} language annotations !')
lang_length = float(len(collected_data['language']['ann']))
logger.info(f'''
Vision Dataset contains {length} datapoints
Language Dataset contains {lang_length} datapoints
VISION --> {((100.0 * length) / (length + lang_length)):.3f} %
LANGUAGE --> {((100.0 * lang_length) / (length + lang_length)):.3f} %''')
def check_done(self, counter, num_samples, batch_idx, num_batches, mode):
if ((batch_idx % 10) == 0):
log_rank_0(f'{mode} Tasks Objective: {num_samples}')
log_rank_0(f'Tasks Lang: {self.cfg.train_instructions.keys()}')
log_rank_0(f'Tasks Annotations Progress: {counter}')
log_rank_0((((((('Progress [ ' + ('=' * int((((0.5 * 100) * batch_idx) / num_batches)))) + '>') + ('-' * int((((0.5 * 100) * (num_batches - batch_idx)) / num_batches)))) + str(round(((100 * batch_idx) / num_batches)))) + '%') + ']'))
return ((len(counter.values()) >= len(self.cfg.train_instructions)) and (min(counter.values()) >= num_samples))
def select_env(self, dataset, idx):
if ('validation' in dataset.abs_datasets_dir.as_posix()):
return self.envs[self.cfg.validation_scene]
seq_idx = dataset.episode_lookup[idx]
for (scene, interval) in self.scene_idx_info.items():
if (interval[0] <= seq_idx <= interval[1]):
return self.envs[scene]
raise ValueError
def annotate(self, episode, dataset, collected_data, global_task_counter, num_samples):
state_obs = episode['robot_obs']
reset_info = episode['state_info']
idx = episode['idx']
(batch_size, seq_length) = (state_obs.shape[0], state_obs.shape[1])
current_task_counter = Counter()
for i in range(batch_size):
env = self.select_env(dataset, idx[i])
env.reset(reset_info, i, (- 1))
goal_info = env.get_info()
prior_steps = np.random.randint(16, 32)
env.reset(reset_info, i, prior_steps)
middle_info = env.get_info()
env.reset(reset_info, i, (seq_length - 16))
close_to_end_info = env.get_info()
task_info = self.tasks.get_task_info(middle_info, goal_info)
if ((len(task_info) != 1) or (not (task_info <= self.cfg.train_instructions.keys())) or len(self.tasks.get_task_info_for_set(middle_info, close_to_end_info, task_info))):
continue
task = list(task_info)[0]
if ((global_task_counter[task] + current_task_counter[task]) >= num_samples):
continue
env.reset(reset_info, i, 0)
start_info = env.get_info()
env.reset(reset_info, i, 32)
middle_info2 = env.get_info()
if (len(self.tasks.get_task_info_for_set(start_info, goal_info, task_info)) and (not len(self.tasks.get_task_info(start_info, middle_info2)))):
start_idx = idx[i]
window_size = seq_length
else:
start_idx = (idx[i] + prior_steps)
window_size = (seq_length - prior_steps)
current_task_counter += Counter(task_info)
collected_data = self.label_seq(collected_data, dataset, window_size, start_idx, task)
return (collected_data, global_task_counter, current_task_counter)
def label_seq(self, collected_data, dataset, seq_length, idx, task):
seq_idx = dataset.episode_lookup[idx]
collected_data['info']['indx'].append((seq_idx, (seq_idx + seq_length)))
task_lang = self.cfg.train_instructions[task]
lang_ann = task_lang[np.random.randint(len(task_lang))]
collected_data['language']['ann'].append(lang_ann)
collected_data['language']['task'].append(task)
return collected_data |
_on_pypy
def test_inherited_protocol():
matrix = m.SquareMatrix(5)
assert (memoryview(matrix).shape == (5, 5))
assert (np.asarray(matrix).shape == (5, 5)) |
def get_module(module):
backbones = inspect.getmembers(Modules)
_cls = [_c for (name, _c) in backbones if (name == module)]
return _cls[0] |
class no_grad(object):
def __init__(self):
self.prev = torch.is_grad_enabled()
def __enter__(self):
torch._C.set_grad_enabled(False)
def __exit__(self, *args):
torch.set_grad_enabled(self.prev)
return False
def __call__(self, func):
(func)
def decorate_no_grad(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate_no_grad |
def no_default_args_signature(type_system):
return InferredSignature(signature=inspect.Signature(parameters=[inspect.Parameter(name='a', kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=float), inspect.Parameter(name='b', kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=float), inspect.Parameter(name='c', kind=inspect.Parameter.KEYWORD_ONLY, annotation=float, default=0.42)]), original_return_type=type_system.convert_type_hint(float), original_parameters={'a': type_system.convert_type_hint(float), 'b': type_system.convert_type_hint(float), 'c': type_system.convert_type_hint(float)}, type_system=type_system) |
def get_args():
import argparse
parser = argparse.ArgumentParser(description='For EAD2019 challenge: semantic segmentation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--generalizationMetric_seg_1', type=str, default='../Result_test/metrics_det_EAD2020.json', help='json file for detection')
parser.add_argument('--generalizationMetric_seg_2', type=str, default='../Result_test/metric_gen_score.json', help='json file for generalization')
parser.add_argument('--semanticMetric', type=str, default='../Result_test/metrics_sem.json', help='son file for segmentation')
parser.add_argument('--caseType', type=int, default=1, help='please set 0: only for dection both balanced, 1: only for instance segmentation only, 2: for generalization, 3: for all tasks')
parser.add_argument('--Result_dir', type=str, default='finalEvaluationScores', help='all evaluation scores used for grading')
parser.add_argument('--jsonFileName', type=str, default='metrics.json', help='all evaluation scores used for grading')
args = parser.parse_args()
return args |
def _convert_python_version(value):
if (not value):
return (None, None)
parts = value.split('.')
if (len(parts) > 3):
return ((), 'at most three version parts are allowed')
if (len(parts) == 1):
value = parts[0]
if (len(value) > 1):
parts = [value[0], value[1:]]
try:
version_info = tuple((int(part) for part in parts))
except ValueError:
return ((), 'each version part must be an integer')
return (version_info, None) |
class GluePartitioner(PartitioningTask):
def __init__(self, args) -> None:
super().__init__(args)
self.tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
def batch_dim(self) -> int:
return 0
def get_input(self, args, analysis=False):
return get_sample(args, self.tokenizer, analysis=analysis)
def get_model(self, args) -> torch.nn.Module:
config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=(args.cache_dir if args.cache_dir else None))
setattr(config, 'precompute_attention_mask', args.precompute_attention_mask)
config.num_labels = glue_tasks_num_labels.get(args.task_name)
model_cls = {'bert': BertForSequenceClassification, 'roberta': RobertaForSequenceClassification}
model_cls = model_cls[args.model_type]
model = model_cls.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=(args.cache_dir if args.cache_dir else None)).train()
return model
def register_functions(self):
register_new_explicit_untraced_function(operator.is_, operator)
register_new_explicit_untraced_function(operator.is_not, operator)
register_new_traced_function(math.sqrt, math) |
def save_wiki_pickle(wiki_map, pathp='./'):
if path.exists((pathp + 'wiki_map.pickle')):
old_wiki_map = get_wiki_pickle()
for (k, v) in old_wiki_map.items():
if (k not in wiki_map):
wiki_map[k] = v
with open((pathp + 'wiki_map.pickle'), 'wb') as handle:
pickle.dump(wiki_map, handle, protocol=pickle.HIGHEST_PROTOCOL)
return wiki_map |
def _load_vocabulary(filename):
tf.logging.info('Reading vocabulary from %s', filename)
vocab = collections.OrderedDict()
with tf.gfile.GFile(filename, mode='r') as f:
for (i, line) in enumerate(f):
word = line.decode('utf-8').strip()
assert (word not in vocab), ('Attempting to add word twice: %s' % word)
vocab[word] = i
tf.logging.info('Read vocabulary of size %d', len(vocab))
return vocab |
class BaseMetric(ABC):
def __init__(self):
self.score = None
def update(self, y_true, y_pred):
pass
def get(self):
return self.score |
class AwaitIterNextExprNode(AwaitExprNode):
def _generate_break(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached('StopAsyncIteration', 'Coroutine.c'))
code.putln('PyObject* exc_type = __Pyx_PyErr_Occurred();')
code.putln('if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ( exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit && __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {')
code.putln('PyErr_Clear();')
code.putln('break;')
code.putln('}')
def fetch_iteration_result(self, code):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
self._generate_break(code)
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
def generate_sent_value_handling_code(self, code, value_cname):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
code.putln(('if (unlikely(!%s)) {' % value_cname))
self._generate_break(code)
code.putln(code.error_goto(self.pos))
code.putln('}') |
def fig_posterior(task_name: str, num_observation: int=1, num_samples: int=1000, prior: bool=False, reference: bool=True, true_parameter: bool=False, samples_path: Optional[str]=None, samples_tensor: Optional[torch.Tensor]=None, samples_name: Optional[str]=None, samples_color: Optional[str]=None, title: Optional[str]=None, title_dx: int=0, legend: bool=True, seed: int=101, config: Optional[str]=None, width: Optional[int]=None, height: Optional[int]=None, default_color: str='#0035FD', colors_dict: Dict[(str, Any)]={}, interactive: bool=False, limits: Optional[Union[(List[float], str)]]=None, num_bins: int=40, scatter_size: float=1.0, **kwargs: Any):
task = sbibm.get_task(task_name)
samples = []
labels_samples = []
colors = {}
samples_prior = task.get_prior()(num_samples=num_samples)
if prior:
sample_name = 'Prior'
samples.append(samples_prior.numpy())
labels_samples.append(sample_name)
if (sample_name in colors_dict):
colors[sample_name] = colors_dict[sample_name]
else:
colors[sample_name] = '#646464'
if reference:
sample_name = 'Ref. Posterior'
samples_reference = sample(task.get_reference_posterior_samples(num_observation=num_observation).numpy(), num_samples, replace=False, seed=seed)
samples.append(samples_reference)
labels_samples.append(sample_name)
if (sample_name in colors_dict):
colors[sample_name] = colors_dict[sample_name]
else:
colors[sample_name] = '#0a0a0a'
if true_parameter:
sample_name = 'True parameter'
samples.append(task.get_true_parameters(num_observation=num_observation).repeat(num_samples, 1).numpy())
labels_samples.append(sample_name)
if (sample_name in colors_dict):
colors[sample_name] = colors_dict[sample_name]
else:
colors[sample_name] = '#f92700'
if ((samples_tensor is not None) or (samples_path is not None)):
if (samples_tensor is not None):
samples_ = samples_tensor.numpy()
else:
samples_ = get_ndarray_from_csv(samples_path)
samples_algorithm = sample(samples_, num_samples, replace=False, seed=seed)
samples.append(samples_algorithm)
if (samples_name is None):
sample_name = 'Algorithm'
else:
sample_name = samples_name
labels_samples.append(sample_name)
if (samples_color is not None):
colors[sample_name] = samples_color
elif (sample_name in colors_dict):
colors[sample_name] = colors_dict[samples_name]
else:
colors[sample_name] = default_color
if (len(samples) == 0):
return None
for s in samples:
assert (s.shape[0] == num_samples)
numbers_unicode = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
labels_dim = [f'{numbers_unicode[(i + 1)]}' for i in range(task.dim_parameters)]
df = den.np2df(samples=[sample for sample in samples], field='sample', labels_samples=labels_samples, labels_dim=labels_dim)
style = {}
keywords = {}
keywords['color'] = den.colorscale(colors, shorthand='sample:N', legend=legend)
keywords['interactive'] = interactive
if (limits is None):
if (task_name in _LIMITS_):
limits = _LIMITS_[task_name]
else:
limits = [list(i) for i in zip(samples_prior.min(dim=0)[0].tolist(), samples_prior.max(dim=0)[0].tolist())]
elif (type(limits) == str):
assert (limits in labels_samples)
samples_limits = torch.from_numpy(samples[labels_samples.index(limits)])
limits = [list(i) for i in zip(samples_limits.min(dim=0)[0].tolist(), samples_limits.max(dim=0)[0].tolist())]
keywords['limits'] = limits
keywords['num_bins'] = num_bins
if (config == 'manuscript'):
style['font_family'] = 'Inter'
keywords['width'] = (100 if (width is None) else width)
keywords['height'] = (100 if (height is None) else height)
style['font_size'] = 12
if (config == 'streamlit'):
size = (500 / task.dim_parameters)
keywords['width'] = (size if (width is None) else width)
keywords['height'] = (size if (height is None) else height)
style['font_size'] = 16
alt.themes.enable('default')
den.set_style(extra={'config': {'axisX': {'domain': False, 'domainWidth': 0, 'ticks': False, 'tickWidth': 0, 'grid': False}, 'axisY': {'domain': False, 'domainWidth': 0, 'ticks': False, 'tickWidth': 0, 'grid': False}}}, **style)
chart = den.pairplot(df, field='sample', scatter_size=scatter_size, bar_opacity=0.4, **keywords)
if (title is not None):
chart = chart.properties(title={'text': [title]}).configure_title(offset=10, orient='top', anchor='middle', dx=title_dx)
return chart |
class Resnet101Triplet(nn.Module):
def __init__(self, embedding_dimension=512, pretrained=False):
super(Resnet101Triplet, self).__init__()
self.model = resnet101(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
self.model.fc = nn.Linear(input_features_fc_layer, embedding_dimension, bias=False)
def forward(self, images):
embedding = self.model(images)
embedding = F.normalize(embedding, p=2, dim=1)
return embedding |
.script
def inv_apply_mean_var(x, mean, var, eps):
stdev = torch.sqrt(torch.max(var, eps))
return torch.addcmul(mean.to(x.dtype), stdev.to(x.dtype), x, value=1.0) |
def fill_standard_subplot(axis, x_vals_unsorted, y_vals_unsorted, label, available_items_scaling, max_depth):
sorted_pairs = sorted(zip(x_vals_unsorted, y_vals_unsorted), key=(lambda x: x[0]))
if (len(sorted_pairs) > 0):
(x_vals, y_vals) = map(list, zip(*sorted_pairs))
else:
x_vals = x_vals_unsorted
y_vals = y_vals_unsorted
if (len(available_items_scaling) > 0):
axis.scatter(x_vals, y_vals, s=available_items_scaling, color=csToMplColor(label), marker='o', alpha=1.0)
axis.plot(x_vals, y_vals, label=label, color=csToMplColor(label))
if (len(x_vals) >= 1):
axis.plot([x_vals[(- 1)], max_depth], [y_vals[(- 1)], y_vals[(- 1)]], label=label, color=csToMplColor(label), linestyle='--', alpha=0.6)
axis.plot([0, x_vals[0]], [y_vals[0], y_vals[0]], label=label, color=csToMplColor(label), linestyle='--', alpha=0.6) |
def _to_array_with_correct_type(obj: Any) -> np.ndarray:
if (isinstance(obj, np.ndarray) and issubclass(obj.dtype.type, (np.bool_, np.number))):
return obj
obj_array = np.asanyarray(obj)
if (not issubclass(obj_array.dtype.type, (np.bool_, np.number))):
obj_array = obj_array.astype(object)
if (obj_array.dtype == object):
if (not obj_array.shape):
obj_array = obj_array.item(0)
elif all((isinstance(arr, np.ndarray) for arr in obj_array.reshape((- 1)))):
return obj_array
elif any((isinstance(arr, torch.Tensor) for arr in obj_array.reshape((- 1)))):
raise ValueError('Numpy arrays of tensors are not supported yet.')
return obj_array |
def get_top_n(root: Path, n_speakers: int=10, min_n_tokens: int=5) -> pd.DataFrame:
df = load_df_from_tsv((root / 'validated.tsv'))
df['n_tokens'] = [len(s.split()) for s in df['sentence']]
df = df[(df['n_tokens'] >= min_n_tokens)]
df['n_frames'] = [torchaudio.info(((root / 'clips') / p).as_posix()).num_frames for p in tqdm(df['path'])]
df['id'] = [Path(p).stem for p in df['path']]
total_duration_ms = df.groupby('client_id')['n_frames'].agg(['sum'])
total_duration_ms = total_duration_ms.sort_values('sum', ascending=False)
top_n_total_duration_ms = total_duration_ms.head(n_speakers)
top_n_client_ids = set(top_n_total_duration_ms.index.tolist())
df_top_n = df[df['client_id'].isin(top_n_client_ids)]
return df_top_n |
.skipif((sys.version_info[0] < 3), reason='NumPy exposes slightly different functions on Python 2')
def test_numpy_namespace():
undocumented = {'Tester': 'numpy.testing._private.nosetester.NoseTester', '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', 'add_docstring': 'numpy.core._multiarray_umath.add_docstring', 'add_newdoc': 'numpy.core.function_base.add_newdoc', 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', 'byte_bounds': 'numpy.lib.utils.byte_bounds', 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays', 'deprecate': 'numpy.lib.utils.deprecate', 'deprecate_with_doc': 'numpy.lib.utils.<lambda>', 'disp': 'numpy.lib.function_base.disp', 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose', 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap', 'get_include': 'numpy.lib.utils.get_include', 'int_asbuffer': 'numpy.core._multiarray_umath.int_asbuffer', 'mafromtxt': 'numpy.lib.npyio.mafromtxt', 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt', 'recfromcsv': 'numpy.lib.npyio.recfromcsv', 'recfromtxt': 'numpy.lib.npyio.recfromtxt', 'safe_eval': 'numpy.lib.utils.safe_eval', 'set_string_function': 'numpy.core.arrayprint.set_string_function', 'show_config': 'numpy.__config__.show', 'who': 'numpy.lib.utils.who'}
builtins = {'bool': 'builtins.bool', 'complex': 'builtins.complex', 'float': 'builtins.float', 'int': 'builtins.int', 'long': 'builtins.int', 'object': 'builtins.object', 'str': 'builtins.str', 'unicode': 'builtins.str'}
whitelist = dict(undocumented, **builtins)
bad_results = check_dir(np)
assert (bad_results == whitelist) |
def run_program(sdfg):
in0 = np.zeros((16,), np.float32)
in1 = np.ones((16,), np.float32)
in2 = np.ones((16,), np.float32)
out0 = np.empty((16,), np.float32)
out1 = np.empty((16,), np.float32)
sdfg(in0=in0, in1=in1, in2=in2, out0=out0, out1=out1)
assert np.allclose(out0, (2 * ((in0 + 1) + (in1 + 1))))
assert np.allclose(out1, (in2 * in2)) |
class ConfigError(InputError):
def __init__(self, message='The config file contains an error.'):
super().__init__(f'CONFIG ERROR: {message}') |
class Head(nn.Module):
num_features: int
num_classes: int = 1000
global_pool: str = 'avg'
drop_rate: float = 0.0
dtype: Dtype = jnp.float32
conv_layer: ModuleDef = conv2d
norm_layer: ModuleDef = batchnorm2d
linear_layer: ModuleDef = linear
act_fn: Callable = nn.relu
def __call__(self, x, training: bool):
x = self.conv_layer(self.num_features, 1, name='conv_pw')(x)
x = self.norm_layer(name='bn')(x, training=training)
x = self.act_fn(x)
if (self.global_pool == 'avg'):
x = jnp.asarray(x, jnp.float32)
x = x.mean((1, 2))
x = jnp.asarray(x, self.dtype)
x = Dropout(rate=self.drop_rate)(x, training=training)
if (self.num_classes > 0):
x = self.linear_layer(self.num_classes, bias=True, name='classifier')(x)
return x |
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([])
cls.add_constructor([param('ns3::Packet const &', 'o')])
cls.add_constructor([param('uint32_t', 'size')])
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True)
cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')])
cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True)
cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')])
cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')])
cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True)
cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True)
cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True)
cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True)
cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True)
cls.add_method('EnableChecking', 'void', [], is_static=True)
cls.add_method('EnablePrinting', 'void', [], is_static=True)
cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True)
cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True)
cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True)
cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True)
cls.add_method('GetSize', 'uint32_t', [], is_const=True)
cls.add_method('GetUid', 'uint64_t', [], is_const=True)
cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True)
cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True)
cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')])
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True)
cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True)
cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True)
cls.add_method('RemoveAllByteTags', 'void', [])
cls.add_method('RemoveAllPacketTags', 'void', [])
cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')])
cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')])
cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')])
cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')])
cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')])
cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')])
cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True)
cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
cls.add_method('ToString', 'std::string', [], is_const=True)
return |
class SentenceTransformersVectorizer(BaseSentenceVectorizer):
def __init__(self, model_name_or_path: str='all-MiniLM-L6-v2', vectorize_bs: int=256, max_gpu_devices: int=1, normalize_embeddings: bool=False):
try:
from sentence_transformers import SentenceTransformer
except ImportError as e:
raise ImportError('You need to install sentence_transformers library to use pretrained embedders. Please check the official doc or simply run `pip install sentence-transformers')
from dsp.utils.ann_utils import determine_devices
(self.num_devices, self.is_gpu) = determine_devices(max_gpu_devices)
self.proxy_device = ('cuda' if self.is_gpu else 'cpu')
self.model = SentenceTransformer(model_name_or_path, device=self.proxy_device)
self.model_name_or_path = model_name_or_path
self.vectorize_bs = vectorize_bs
self.normalize_embeddings = normalize_embeddings
def __call__(self, inp_examples: List) -> np.ndarray:
text_to_vectorize = self._extract_text_from_examples(inp_examples)
if (self.is_gpu and (self.num_devices > 1)):
target_devices = list(range(self.num_devices))
pool = self.model.start_multi_process_pool(target_devices=target_devices)
emb = self.model.encode_multi_process(sentences=text_to_vectorize, pool=pool, batch_size=self.vectorize_bs)
self.model.stop_multi_process_pool(pool)
if self.normalize_embeddings:
emb = (emb / np.linalg.norm(emb))
return emb
else:
emb = self.model.encode(sentences=text_to_vectorize, batch_size=self.vectorize_bs, normalize_embeddings=self.normalize_embeddings)
return emb |
def randomGaussian(image, mean=0.1, sigma=0.35):
def gaussianNoisy(im, mean=mean, sigma=sigma):
for _i in range(len(im)):
im[_i] += random.gauss(mean, sigma)
return im
img = np.asarray(image)
(width, height) = img.shape
img = gaussianNoisy(img[:].flatten(), mean, sigma)
img = img.reshape([width, height])
return Image.fromarray(np.uint8(img)) |
class JumanppTokenizer():
def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False):
self.do_lower_case = do_lower_case
self.never_split = (never_split if (never_split is not None) else [])
self.normalize_text = normalize_text
self.trim_whitespace = trim_whitespace
try:
import rhoknp
except ImportError:
raise ImportError('You need to install rhoknp to use JumanppTokenizer. See for installation.')
self.juman = rhoknp.Jumanpp()
def tokenize(self, text, never_split=None, **kwargs):
if self.normalize_text:
text = unicodedata.normalize('NFKC', text)
text = text.strip()
never_split = (self.never_split + (never_split if (never_split is not None) else []))
tokens = []
for mrph in self.juman.apply_to_sentence(text).morphemes:
token = mrph.text
if (self.do_lower_case and (token not in never_split)):
token = token.lower()
if self.trim_whitespace:
if (token.strip() == ''):
continue
else:
token = token.strip()
tokens.append(token)
return tokens |
_dispatch
def fft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None, *, plan=None):
return (Dispatchable(x, np.ndarray),) |
def active_augment_learn(init_flag=None, train_data=None, num_initial=200, active_policy=uncertainty_sampling, augment_method=lf_augment, num_query=5, num_sample=[100, 100, 100, 100, 100], augment_rate=0.2, augment_decay=1, hyper_alpha=8, alpha_decay=1, Epochs=10, score_limit_low=0, score_limit_upper=500, fit_only_new_data=False, mixup_flag=True, single_use=False, prefix='SeqMix'):
func_paras = locals()
pool = copy.deepcopy(train_data)
train_data = copy.deepcopy(train_data)
original_datasize = len(train_data)
initial_idx = np.random.choice(range(len(train_data)), size=num_initial, replace=False)
train_data = np.array(train_data)[initial_idx]
(init_data_loader, query_idx) = get_tr_set(size=num_initial, train_examples=train_data)
pool = np.delete(pool, query_idx, axis=0)
print(np.array(pool).shape)
if init_flag:
init_dir = 'init_dir'
model = Ner.from_pretrained(init_dir)
print('Initial model loaded from google drive')
else:
model = active_train(init_data_loader, None, Epochs)
report = evaluate('Intialization', model)
print_table = PrettyTable(['Model', 'Number of Query', 'Data Usage', 'Data Augmented', 'Test_F1'])
print_table.add_row(['Initial Model', 'Model Initialization', (len(train_data) / original_datasize), 0, report.split()[(- 2)]])
print(print_table)
test_f1 = []
dev_f1 = []
num_augment = int((num_initial * augment_rate))
if ((augment_method == slack_augment) or soft_augment):
(soft_data, soft_labels, new_sample_count) = augment_method(train_data, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_loader = get_tr_set(train_examples=soft_data, soft_labels=soft_labels)
else:
(mix_data, new_sample_count) = augment_method(train_data, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_loader = None
aug_data_loader = get_tr_set(train_examples=train_data)
model = active_train(data_loader=aug_data_loader, model=model, Epochs=Epochs, soft_loader=soft_loader)
report = evaluate('SeedSetAug', model)
aug_total_count = new_sample_count
print_table.add_row(['Augment Model', 'Seed Set Augmented', (len(train_data) / original_datasize), aug_total_count, report.split()[(- 2)]])
print(print_table)
save_result(prefix=prefix, func_paras=func_paras, report=report, table=print_table)
print('Learning loop start')
for idx in range(num_query):
num_augment = int(((num_sample[idx] * augment_rate) * (augment_decay ** idx)))
hyper_alpha = (hyper_alpha * (alpha_decay ** idx))
print(('Query no. %d' % (idx + 1)))
(query_idx, query_instance) = active_policy(model, pool, num_sample[idx])
mixup_candidate = pool[query_idx]
pool = np.delete(pool, query_idx, axis=0)
if ((augment_method == slack_augment) or soft_augment):
(new_soft_data, new_soft_labels, new_sample_count) = augment_method(mixup_candidate, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_data = np.concatenate((soft_data, new_soft_data))
soft_labels = np.concatenate((soft_labels, new_soft_labels))
soft_loader = get_tr_set(train_examples=soft_data, soft_labels=soft_labels)
mix_data = mixup_candidate
elif mixup_flag:
(mix_data, new_sample_count) = augment_method(mixup_candidate, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_loader = None
else:
(mix_data, new_sample_count) = duplicate_pair_data(mixup_candidate_X, mixup_candidate_y, num_augment)
train_data = np.concatenate((train_data, mix_data))
aug_total_count += new_sample_count
aug_data_loader = get_tr_set(train_examples=train_data)
model = active_train(data_loader=aug_data_loader, model=model, Epochs=Epochs, soft_loader=soft_loader)
if single_use:
train_data = train_data[:(- new_sample_count)]
aug_total_count = new_sample_count
report = evaluate('SeqMixAug', model)
data_usage = len(train_data)
if (augment_method == lf_mixup):
data_usage -= aug_total_count
print_table.add_row(['Augmented Model', (idx + 1), (data_usage / original_datasize), aug_total_count, report.split()[(- 2)]])
print(print_table)
save_result(prefix=prefix, func_paras=func_paras, report=report, table=print_table)
return model |
class GPTJLoraInt8(CausalLoraInt8Model):
config_name: str = 'gptj_lora_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(GPTJLoraInt8Engine.config_name, weights_path) |
(scope='module')
def source_2bin_2channel():
with open('validation/data/2bin_2channel_example1.json', encoding='utf-8') as read_json:
return json.load(read_json) |
.parametrize('time_threshold, user_answer, item_answer', [(datetime.strptime('06-01-2020', '%d-%m-%Y'), [[1, 1, 1, 1, 1, 3, 3, 3, 3, 3], [2, 2, 2, 2, 2]], [[1, 2, 3, 4, 5, 1, 5, 3, 1, 2], [1, 2, 3, 9, 10]])])
.parametrize('dataset_type', [pytest.param('spark_dataframe_test', marks=pytest.mark.spark), pytest.param('pandas_dataframe_test', marks=pytest.mark.core)])
def test_time_splitter_without_drops(time_threshold, user_answer, item_answer, dataset_type, request):
dataframe = request.getfixturevalue(dataset_type)
filtered_dataframe = TimeSplitter(time_threshold=time_threshold, query_column='user_id', drop_cold_users=False, drop_cold_items=False).split(dataframe)
if (dataset_type == 'pandas_dataframe_test'):
item_ids = _get_column_list_pandas(filtered_dataframe, 'item_id')
user_ids = _get_column_list_pandas(filtered_dataframe, 'user_id')
else:
item_ids = _get_column_list(filtered_dataframe, 'item_id')
user_ids = _get_column_list(filtered_dataframe, 'user_id')
_check_assert(user_ids, item_ids, user_answer, item_answer) |
class FileSystemLoader(BaseLoader):
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if ((not isinstance(searchpath, abc.Iterable)) or isinstance(searchpath, string_types)):
searchpath = [searchpath]
self.searchpath = [fspath(p) for p in searchpath]
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if (f is None):
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return (path.getmtime(filename) == mtime)
except OSError:
return False
return (contents, filename, uptodate)
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for (dirpath, _, filenames) in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename)[len(searchpath):].strip(os.path.sep).replace(os.path.sep, '/')
if (template[:2] == './'):
template = template[2:]
if (template not in found):
found.add(template)
return sorted(found) |
def cat(g, tensor_list, dim, scale=None, zero_point=None):
tensors = sym_help._unpack_list(tensor_list)
input = tensors[0]
if (input not in sym_help._quantized_ops):
from torch.onnx.symbolic_opset9 import cat
return cat(g, tensor_list, dim)
dim = sym_help._parse_arg(dim, 'i')
kwargs = {'Y_scale_f': tensors[0].node()['Y_scale'], 'Y_zero_point_i': tensors[0].node()['Y_zero_point']}
output = g.op('_caffe2::Int8Concat', *tensors, axis_i=dim, **kwargs)
sym_help._quantized_ops.add(output)
return output |
def resnet50_inspecs_params_with_broadcast():
inspecs = []
u = I.UniformInitializer((0.5, 1.0))
inspecs.append([Inspec((5, 1024, 14, 14), u), Inspec((1, 1024, 1, 1), u)])
inspecs.append([Inspec((5, 1024, 14, 14), u), Inspec((1, 1024, 14, 14), u)])
inspecs.append([Inspec((5, 112, 112, 64), u), Inspec((1, 1, 1, 64), u)])
inspecs.append([Inspec((5, 112, 112, 64), u), Inspec((1, 112, 112, 64), u)])
inspecs.append([Inspec((5, 128, 28, 28), u), Inspec((1, 128, 1, 1), u)])
inspecs.append([Inspec((5, 128, 28, 28), u), Inspec((1, 128, 28, 28), u)])
inspecs.append([Inspec((5, 128, 56, 56), u), Inspec((1, 128, 1, 1), u)])
inspecs.append([Inspec((5, 128, 56, 56), u), Inspec((1, 128, 56, 56), u)])
inspecs.append([Inspec((5, 14, 14, 1024), u), Inspec((1, 1, 1, 1024), u)])
inspecs.append([Inspec((5, 14, 14, 1024), u), Inspec((1, 14, 14, 1024), u)])
inspecs.append([Inspec((5, 14, 14, 256), u), Inspec((1, 1, 1, 256), u)])
inspecs.append([Inspec((5, 14, 14, 256), u), Inspec((1, 14, 14, 256), u)])
inspecs.append([Inspec((5, 14, 14, 512), u), Inspec((1, 1, 1, 512), u)])
inspecs.append([Inspec((5, 14, 14, 512), u), Inspec((1, 14, 14, 512), u)])
inspecs.append([Inspec((5, 2048, 7, 7), u), Inspec((1, 2048, 1, 1), u)])
inspecs.append([Inspec((5, 2048, 7, 7), u), Inspec((1, 2048, 7, 7), u)])
inspecs.append([Inspec((5, 256, 14, 14), u), Inspec((1, 256, 1, 1), u)])
inspecs.append([Inspec((5, 256, 14, 14), u), Inspec((1, 256, 14, 14), u)])
inspecs.append([Inspec((5, 256, 28, 28), u), Inspec((1, 256, 1, 1), u)])
inspecs.append([Inspec((5, 256, 28, 28), u), Inspec((1, 256, 28, 28), u)])
inspecs.append([Inspec((5, 256, 56, 56), u), Inspec((1, 256, 1, 1), u)])
inspecs.append([Inspec((5, 256, 56, 56), u), Inspec((1, 256, 56, 56), u)])
inspecs.append([Inspec((5, 28, 28, 128), u), Inspec((1, 1, 1, 128), u)])
inspecs.append([Inspec((5, 28, 28, 128), u), Inspec((1, 28, 28, 128), u)])
inspecs.append([Inspec((5, 28, 28, 256), u), Inspec((1, 1, 1, 256), u)])
inspecs.append([Inspec((5, 28, 28, 256), u), Inspec((1, 28, 28, 256), u)])
inspecs.append([Inspec((5, 28, 28, 512), u), Inspec((1, 1, 1, 512), u)])
inspecs.append([Inspec((5, 28, 28, 512), u), Inspec((1, 28, 28, 512), u)])
inspecs.append([Inspec((5, 512, 14, 14), u), Inspec((1, 512, 1, 1), u)])
inspecs.append([Inspec((5, 512, 14, 14), u), Inspec((1, 512, 14, 14), u)])
inspecs.append([Inspec((5, 512, 28, 28), u), Inspec((1, 512, 1, 1), u)])
inspecs.append([Inspec((5, 512, 28, 28), u), Inspec((1, 512, 28, 28), u)])
inspecs.append([Inspec((5, 512, 7, 7), u), Inspec((1, 512, 1, 1), u)])
inspecs.append([Inspec((5, 512, 7, 7), u), Inspec((1, 512, 7, 7), u)])
inspecs.append([Inspec((5, 56, 56, 128), u), Inspec((1, 1, 1, 128), u)])
inspecs.append([Inspec((5, 56, 56, 128), u), Inspec((1, 56, 56, 128), u)])
inspecs.append([Inspec((5, 56, 56, 256), u), Inspec((1, 1, 1, 256), u)])
inspecs.append([Inspec((5, 56, 56, 256), u), Inspec((1, 56, 56, 256), u)])
inspecs.append([Inspec((5, 56, 56, 64), u), Inspec((1, 1, 1, 64), u)])
inspecs.append([Inspec((5, 56, 56, 64), u), Inspec((1, 56, 56, 64), u)])
inspecs.append([Inspec((5, 64, 112, 112), u), Inspec((1, 64, 1, 1), u)])
inspecs.append([Inspec((5, 64, 112, 112), u), Inspec((1, 64, 112, 112), u)])
inspecs.append([Inspec((5, 64, 56, 56), u), Inspec((1, 64, 1, 1), u)])
inspecs.append([Inspec((5, 64, 56, 56), u), Inspec((1, 64, 56, 56), u)])
inspecs.append([Inspec((5, 7, 7, 2048), u), Inspec((1, 1, 1, 2048), u)])
inspecs.append([Inspec((5, 7, 7, 2048), u), Inspec((1, 7, 7, 2048), u)])
inspecs.append([Inspec((5, 7, 7, 512), u), Inspec((1, 1, 1, 512), u)])
inspecs.append([Inspec((5, 7, 7, 512), u), Inspec((1, 7, 7, 512), u)])
return inspecs |
_start_docstrings('\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', REGNET_START_DOCSTRING)
class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: RegNetConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.regnet = TFRegNetMainLayer(config, name='regnet')
self.classifier = [tf.keras.layers.Flatten(), (tf.keras.layers.Dense(config.num_labels, name='classifier.1') if (config.num_labels > 0) else tf.identity)]
_inputs
_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def call(self, pixel_values: tf.Tensor=None, labels: tf.Tensor=None, output_hidden_states: bool=None, return_dict: bool=None, training=False) -> Union[(TFSequenceClassifierOutput, Tuple[tf.Tensor])]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
pooled_output = (outputs.pooler_output if return_dict else outputs[1])
flattened_output = self.classifier[0](pooled_output)
logits = self.classifier[1](flattened_output)
loss = (None if (labels is None) else self.hf_compute_loss(labels=labels, logits=logits))
if (not return_dict):
output = ((logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=output.hidden_states) |
def _extract_tarfiles(data_dir):
path = os.path.join(data_dir, 'features')
if (not os.path.isdir(path)):
os.mkdir(path)
feature_tar_files = open(os.path.join(data_dir, 'tar_files.txt')).read().strip().split('\n')
feature_tar_files = [os.path.join(data_dir, s) for s in feature_tar_files]
for fpath in feature_tar_files:
with tarfile.open(fpath, 'r') as tf:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return (prefix == abs_directory)
def safe_extract(tar, path='.', members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if (not is_within_directory(path, member_path)):
raise Exception('Attempted Path Traversal in Tar File')
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(tf, data_dir)
tmp_dir = fpath[:(- 7)]
for featname in os.listdir(tmp_dir):
src = os.path.join(tmp_dir, featname)
dst = os.path.join(data_dir, 'features', featname)
shutil.move(src, dst) |
def add_noise(word, probability):
word = remove_letters(word, (probability / 3))
word = add_letters(word, (probability / 3))
return word |
def produceImgAndLabel():
root_path = '/home/lmin/data/PennFudanPed/'
imgpath = sorted(glob(os.path.join(root_path, 'PNGImages/*.png')))
txtpath = sorted(glob(os.path.join(root_path, 'PedMasks/*.png')))
train_seg_txt = open(((root_path + 'train') + '_ins.txt'), 'a')
val_seg_txt = open(((root_path + 'val') + '_ins.txt'), 'a')
for (i, (imgline, txtline)) in enumerate(zip(imgpath, txtpath)):
print(imgline.replace(root_path, ''))
print(txtline.replace(root_path, ''))
train_seg_txt.write((((imgline.replace(root_path, '') + ' ') + txtline.replace(root_path, '')) + '\n'))
val_seg_txt.write((((imgline.replace(root_path, '') + ' ') + txtline.replace(root_path, '')) + '\n'))
train_seg_txt.close()
val_seg_txt.close() |
def get_fields(data_type, n_src_features, n_tgt_features):
if (data_type == 'text'):
return TextDataset.get_fields(n_src_features, n_tgt_features)
elif (data_type == 'img'):
return ImageDataset.get_fields(n_src_features, n_tgt_features)
elif (data_type == 'audio'):
return AudioDataset.get_fields(n_src_features, n_tgt_features) |
_with_pre_post_option('clean_arguments', pre=clean_polys_pre, default=True)
_with_pre_post_option('easy_linear_polynomials', pre=easy_linear_polynomials_pre, default=True)
_with_pre_post_option('result_to_list', post=result_to_list_post, default=True)
_heuristic(interpolation_gb_heuristic)
_with_pre_post_option('invert', pre=invert_all_pre, post=invert_all_post, default=False)
_with_pre_post_option('gauss_on_linear', pre=gauss_on_linear_pre, default=True)
_with_pre_post_option('ll_constants', pre=ll_constants_pre, post=ll_constants_post, default=True)
_with_pre_post_option('eliminate_identical_variables', pre=eliminate_identical_variables_pre, post=llfirst_post, default=True)
_heuristic(ll_heuristic)
_with_pre_post_option('llfirst', if_not_option=['llfirstonthefly'], pre=llfirst_pre, post=llfirst_post, default=False)
_with_pre_post_option('llfirstonthefly', pre=llfirstonthefly_pre, post=llfirst_post, default=False)
_with_pre_post_option('incremental', pre=incremental_pre)
_heuristic(change_order_heuristic)
_with_pre_post_option('other_ordering_first', if_not_option=['interpolation_gb'], pre=other_ordering_pre, default=False)
_heuristic(linear_algebra_heuristic)
_with_pre_post_option('fix_deg_bound', if_not_option=['interpolation_gb'], post=fix_deg_bound_post, default=True)
_with_pre_post_option('minsb', post=minsb_post, if_not_option=['redsb', 'deg_bound', 'interpolation_gb', 'convert_with_fglm_from_ring'], default=True)
_with_pre_post_option('redsb', post=redsb_post, if_not_option=['deg_bound', 'interpolation_gb', 'convert_with_fglm_from_ring'], default=True)
def groebner_basis(I, heuristic=True, unique_ideal_generator=False, interpolation_gb=False, clean_and_restart_algorithm=False, convert_with_fglm_from_ring=None, convert_with_fglm_to_ring=None, fglm_bound=40000, modified_linear_algebra=True, preprocessor=None, deg_bound=False, implementation='Python', full_prot=False, prot=False, draw_matrices=False, preprocess_only=False, **impl_options):
if (not I):
return I
if full_prot:
prot = True
if prot:
print('number of passed generators:', len(I))
if (convert_with_fglm_from_ring is not None):
from_ring = convert_with_fglm_from_ring
to_ring = convert_with_fglm_to_ring
return _fglm(I, from_ring, to_ring)
if interpolation_gb:
first = next(iter(I))
if ((len(I) != 1) or (first.ring().get_order_code() != OrderCode.lp)):
raise ValueError
return lex_groebner_basis_for_polynomial_via_variety(first)
if (deg_bound is False):
deg_bound =
I = [Polynomial(p) for p in I if (not p.is_zero())]
if (unique_ideal_generator and I):
prod = 1
for p in I:
prod = ((p + 1) * prod)
I = [(prod + 1)]
if (implementation == 'Python'):
implementation = symmGB_F2_python
else:
implementation = symmGB_F2_C
if preprocessor:
I = preprocessor(I)
if preprocess_only:
for p in I:
print(p)
import sys
sys.exit(0)
def call_algorithm(I, max_generators=None):
return implementation(I, deg_bound=deg_bound, full_prot=full_prot, prot=prot, max_generators=max_generators, draw_matrices=draw_matrices, **filter_newstyle_options(implementation, **impl_options))
if clean_and_restart_algorithm:
for max_generators in [1000, 10000, 50000, 100000, 200000, 300000, 400000, None]:
try:
return call_algorithm(I, max_generators=max_generators)
except GeneratorLimitExceeded as e:
I = list(e.strat.all_generators())
del e.strat
if prot:
print('generator limit exceeded:', max_generators, 'restarting algorithm')
else:
return call_algorithm(I) |
class XGLMTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def make_selectors(opt, kb_dict):
selectors = {}
input_size = (opt.rnn_size + (6 * opt.kb_embed_size))
selectors['enc'] = nn.GRU(input_size=opt.rnn_size, hidden_size=opt.kb_embed_size, bias=True, bidirectional=True)
selectors['dec'] = nn.Sequential(nn.Linear((11 * opt.rnn_size), opt.rnn_size), nn.Tanh(), nn.Linear(opt.rnn_size, opt.sel_hid_size), nn.Tanh(), nn.Linear(opt.sel_hid_size, (6 * kb_dict.size)))
return selectors |
class _MLPVectorProjector(nn.Module):
def __init__(self, input_hidden_size: int, lm_hidden_size: int, num_layers: int, width: int):
super(_MLPVectorProjector, self).__init__()
self.mlps = nn.ModuleList()
for _ in range(width):
mlp = [nn.Linear(input_hidden_size, lm_hidden_size)]
for _ in range(1, num_layers):
mlp.append(nn.GELU())
mlp.append(nn.Linear(lm_hidden_size, lm_hidden_size))
self.mlps.append(nn.Sequential(*mlp))
def forward(self, x):
return torch.cat([mlp(x) for mlp in self.mlps], dim=(- 2)) |
class RoBERTaConfig(LMConfig):
def __init__(self, args=None):
super(RoBERTaConfig, self).__init__(args)
self.model = 'RoBerta'
self._post_init(args)
para_prefix = {**LMConfig.para_prefix}
args_to_parse = list(para_prefix.keys())
meta_data = {'RoBerta': SN(hf_model='roberta-base', hidden_dim=768, father_model='RoBerta', max_bsz=SN(train={12: 8, 16: 12, 24: 9, 32: 30, 40: 18, 70: 48}, inf={12: 150, 16: 200, 24: 150, 32: 720, 40: 300, 70: 560}), prt_lm={'arxiv': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=4 --eq_batch_size=36 --eval_patience=50000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=2e-05 --warmup_epochs=0.6', max_n_gpus=4), 'products': SN(model='FtV1', cmd='--lr=2e-05 --eq_batch_size=144 --weight_decay=0.01 --dropout=0.1 --att_dropout=0.3 --cla_dropout=0.2 --cla_bias=T --warmup_epochs=0.2 --eval_patience=65308 --epochs=4 --label_smoothing_factor=0.1 --warmup_epochs=0.6', max_n_gpus=8), 'paper': SN(model='FtV1', cmd='--att_dropout=0.1 --cla_dropout=0.4 --dropout=0.3 --epochs=5 --eq_batch_size=288 --eval_patience=410000 --label_smoothing_factor=0.3 --load_best_model_at_end=T --lr=5e-05 --warmup_epochs=0.6', max_n_gpus=16)}), 'Roberta-large': SN(hf_model='roberta-large', father_model='RoBerta', hidden_dim=1024, max_bsz=SN(train={12: 6, 16: 10, 24: 16, 32: 12}, inf={12: 150, 16: 200, 24: 150, 32: 250}), prt_lm={'arxiv': SN(model='FtV1', cmd='--lr=1e-05 --eq_batch_size=36 --weight_decay=0.01 --dropout=0.1 --att_dropout=0.1 --cla_dropout=0.1 --cla_bias=T --epochs=4 --warmup_epochs=0.2 --eval_patience=50000'), 'products': SN(model='FtV1', cmd='--lr=2e-05 --eq_batch_size=144 --weight_decay=0.01 --dropout=0.1 --att_dropout=0.3 --cla_dropout=0.2 --cla_bias=T --warmup_epochs=0.2 --eval_patience=65308 --epochs=4 --label_smoothing_factor=0.1 --warmup_epochs=0.6', max_n_gpus=8)})} |
def add_vignette_node_group() -> bpy.types.NodeGroup:
group = bpy.data.node_groups.new(type='CompositorNodeTree', name='Vignette')
input_node = group.nodes.new('NodeGroupInput')
group.inputs.new('NodeSocketColor', 'Image')
group.inputs.new('NodeSocketFloat', 'Amount')
group.inputs['Amount'].default_value = 0.2
group.inputs['Amount'].min_value = 0.0
group.inputs['Amount'].max_value = 1.0
lens_distortion_node = group.nodes.new(type='CompositorNodeLensdist')
lens_distortion_node.inputs['Distort'].default_value = 1.0
separate_rgba_node = group.nodes.new(type='CompositorNodeSepRGBA')
blur_node = group.nodes.new(type='CompositorNodeBlur')
blur_node.filter_type = 'GAUSS'
blur_node.size_x = 300
blur_node.size_y = 300
blur_node.use_extended_bounds = True
mix_node = group.nodes.new(type='CompositorNodeMixRGB')
mix_node.blend_type = 'MULTIPLY'
output_node = group.nodes.new('NodeGroupOutput')
group.outputs.new('NodeSocketColor', 'Image')
group.links.new(input_node.outputs['Amount'], mix_node.inputs['Fac'])
group.links.new(input_node.outputs['Image'], mix_node.inputs[1])
group.links.new(input_node.outputs['Image'], lens_distortion_node.inputs['Image'])
group.links.new(lens_distortion_node.outputs['Image'], separate_rgba_node.inputs['Image'])
group.links.new(separate_rgba_node.outputs['A'], blur_node.inputs['Image'])
group.links.new(blur_node.outputs['Image'], mix_node.inputs[2])
group.links.new(mix_node.outputs['Image'], output_node.inputs['Image'])
arrange_nodes(group)
return group |
def _make_time_sift_events(prev_time, post_time):
time_interval = int(round(((post_time - prev_time) * 100)))
results = []
while (time_interval >= RANGE_TIME_SHIFT):
results.append(Event(event_type='time_shift', value=(RANGE_TIME_SHIFT - 1)))
time_interval -= RANGE_TIME_SHIFT
if (time_interval == 0):
return results
else:
return (results + [Event(event_type='time_shift', value=(time_interval - 1))]) |
def _seg_12():
return [(3113, 'X'), (3114, 'V'), (3130, 'X'), (3133, 'V'), (3141, 'X'), (3142, 'V'), (3145, 'X'), (3146, 'V'), (3150, 'X'), (3157, 'V'), (3159, 'X'), (3160, 'V'), (3163, 'X'), (3168, 'V'), (3172, 'X'), (3174, 'V'), (3184, 'X'), (3191, 'V'), (3213, 'X'), (3214, 'V'), (3217, 'X'), (3218, 'V'), (3241, 'X'), (3242, 'V'), (3252, 'X'), (3253, 'V'), (3258, 'X'), (3260, 'V'), (3269, 'X'), (3270, 'V'), (3273, 'X'), (3274, 'V'), (3278, 'X'), (3285, 'V'), (3287, 'X'), (3294, 'V'), (3295, 'X'), (3296, 'V'), (3300, 'X'), (3302, 'V'), (3312, 'X'), (3313, 'V'), (3315, 'X'), (3328, 'V'), (3341, 'X'), (3342, 'V'), (3345, 'X'), (3346, 'V'), (3397, 'X'), (3398, 'V'), (3401, 'X'), (3402, 'V'), (3408, 'X'), (3412, 'V'), (3428, 'X'), (3430, 'V'), (3456, 'X'), (3457, 'V'), (3460, 'X'), (3461, 'V'), (3479, 'X'), (3482, 'V'), (3506, 'X'), (3507, 'V'), (3516, 'X'), (3517, 'V'), (3518, 'X'), (3520, 'V'), (3527, 'X'), (3530, 'V'), (3531, 'X'), (3535, 'V'), (3541, 'X'), (3542, 'V'), (3543, 'X'), (3544, 'V'), (3552, 'X'), (3558, 'V'), (3568, 'X'), (3570, 'V'), (3573, 'X'), (3585, 'V'), (3635, 'M', u''), (3636, 'V'), (3643, 'X'), (3647, 'V'), (3676, 'X'), (3713, 'V'), (3715, 'X'), (3716, 'V'), (3717, 'X'), (3718, 'V'), (3723, 'X'), (3724, 'V'), (3748, 'X'), (3749, 'V'), (3750, 'X'), (3751, 'V'), (3763, 'M', u''), (3764, 'V')] |
def createDict(word_freqs):
words = [k for k in word_freqs.keys()]
freqs = [v for v in word_freqs.values()]
sorted_idx = np.argsort(freqs)
sorted_words = [words[ii] for ii in sorted_idx[::(- 1)]]
_GO = '_GO'
EOS = '_EOS'
UNK = '_UNK'
PAD = '_PAD'
SEP0 = '_SEP0'
SEP1 = '_SEP1'
SEP2 = '_SEP2'
SEP3 = '_SEP3'
SEP4 = '_SEP4'
SEP5 = '_SEP5'
SEP6 = '_SEP6'
SEP7 = '_SEP7'
extra_tokens = [_GO, EOS, UNK, PAD, SEP0, SEP1, SEP2, SEP3, SEP4, SEP5, SEP6, SEP7]
worddict = OrderedDict()
for (ii, ww) in enumerate(extra_tokens):
worddict[ww] = ii
for (ii, ww) in enumerate(sorted_words):
worddict[ww] = ii
new_worddict = worddict.copy()
for (key, idx) in worddict.items():
if (idx >= DICT_SIZE):
del new_worddict[key]
return new_worddict |
class TestOptions(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--K', type=int, dest='K', default=10, help='Number of steps to observe from the past')
self.parser.add_argument('--T', type=int, dest='T', default=10, help='Number of steps into the middle')
self.parser.add_argument('--F', type=int, dest='F', default=10, help='Number of steps to observe from the future')
self.parser.add_argument('--c_dim', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--result_dir', type=str, default='./results', help='temporary results are saved here')
self.parser.add_argument('--comb_type', type=str, default='avg', help='type of combination [repeat_P|repeat_F|avg|w_avg]')
self.parser.add_argument('--image_size', type=int, nargs='+', dest='image_size', default=[128], help='image size h w')
self.parser.add_argument('--dataroot', required=True, help='path to videos (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--textroot', required=True, help='path to trainings (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--video_list', type=str, default='test_data_list.txt', help='the name of the videolist file')
self.parser.add_argument('--data', required=True, type=str, help='name of test dataset [KTH|UCF]')
self.parser.add_argument('--pick_mode', default='Slide', type=str, help='pick up clip [Random|First|Slide]')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
if (len(self.opt.image_size) == 1):
a = self.opt.image_size[0]
self.opt.image_size.append(a)
args = vars(self.opt)
print(' Options ')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print(' End ')
self.opt.serial_batches = True
self.opt.video_list = 'test_data_list.txt'
self.opt.test_name = ((((self.opt.name + '_') + str(self.opt.K)) + '_') + str(self.opt.T))
self.opt.quant_dir = os.path.join(self.opt.result_dir, 'quantitative', self.opt.data, ((((self.opt.name + '_') + str(self.opt.K)) + '_') + str(self.opt.T)))
makedir(self.opt.quant_dir)
file_name = os.path.join(self.opt.quant_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(' Options \n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write(' End \n')
return self.opt |
(config_path='control_pcgrl/configs', config_name='pod')
def main(cfg: PoDConfig):
cfg = validate_config(cfg)
if (cfg is False):
print('Invalid config!')
return
traj_dir = os.path.join(cfg.log_dir, 'repair-paths')
register_env('pcgrl', make_env)
model_cls = CustomFeedForwardModel
ModelCatalog.register_custom_model('custom_model', model_cls)
if (cfg.offline_algo == 'BC'):
algo_config = BCConfig()
elif (cfg.offline_algo == 'MARWIL'):
algo_config = MARWILConfig()
else:
raise ValueError(f'Invalid offline algorithm: {cfg.offline_algo}')
algo_config.model = {'custom_model': 'custom_model', 'custom_model_config': {}}
print(algo_config.beta)
algo_config.training(lr=0.001)
traj_glob = os.path.join(traj_dir, '*.json')
algo_config.offline_data(input_=traj_glob)
algo_config.environment(env='pcgrl')
algo_config.env_config = {**cfg}
algo_config.framework('torch')
il_log_dir = 'il_logs'
exp_name = cfg.offline_algo
exp_dir = os.path.join(il_log_dir, exp_name)
if ((not cfg.overwrite) and os.path.exists(exp_dir)):
tuner = tune.Tuner.restore(exp_dir)
else:
shutil.rmtree(exp_dir, ignore_errors=True)
run_config = air.RunConfig(checkpoint_config=air.CheckpointConfig(checkpoint_at_end=True, checkpoint_frequency=10, num_to_keep=2), local_dir=il_log_dir)
tuner = tune.Tuner(cfg.offline_algo, param_space=algo_config.to_dict(), tune_config=tune.TuneConfig(metric='info/learner/default_policy/learner_stats/policy_loss', mode='min'), run_config=run_config)
if cfg.infer:
algo_cls = (BC if (cfg.offline_algo == 'BC') else MARWIL)
best_result = tuner.get_results().get_best_result()
ckpt = best_result.best_checkpoints[0][0]
bc_model = algo_cls.from_checkpoint(ckpt)
print(f'Restored from checkpoint {ckpt}')
env = make_pod_env(cfg)
while True:
(obs, info) = env.reset()
(done, truncated) = (False, False)
while ((not done) and (not truncated)):
action = bc_model.compute_single_action(obs, explore=True)
(obs, reward, done, truncated, info) = env.step(action)
env.render()
else:
result = tuner.fit() |
def setup_test_file():
val = b'a\x00string'
(fd, fname) = mkstemp()
with os.fdopen(fd, 'wb') as fs:
fs.write(val)
with open(fname, 'rb') as fs:
gs = BytesIO(val)
cs = cStringIO(val)
(yield (fs, gs, cs))
os.unlink(fname) |
def test_download_7z_file(mocker, mock_download_from_remote, mock_un7z):
mock_download_from_remote.return_value = 'foo'
download_utils.download_7z_file('a', 'b', False, False)
mock_download_from_remote.assert_called_once_with('a', 'b', False)
mock_un7z.assert_called_once_with('foo', cleanup=False)
_clean('a') |
def get_image(image_path, is_grayscale=False):
image = imread(image_path, is_grayscale)
return transform(image) |
def register_Ns3CallbackImpl__Void_Ns3DataRate_Ns3DataRate_Ns3Mac48Address_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::DataRate, ns3::DataRate, ns3::Mac48Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::DataRate', 'arg0'), param('ns3::DataRate', 'arg1'), param('ns3::Mac48Address', 'arg2')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
class Pipeline(object):
def __init__(self, convert_token=None):
if (convert_token is None):
self.convert_token = Pipeline.identity
elif callable(convert_token):
self.convert_token = convert_token
else:
raise ValueError('Pipeline input convert_token {} is not None or callable'.format(convert_token))
self.pipes = [self]
def __call__(self, x, *args):
for pipe in self.pipes:
x = pipe.call(x, *args)
return x
def call(self, x, *args):
if isinstance(x, list):
return [self.convert_token(tok, *args) for tok in x]
return self.convert_token(x, *args)
def add_before(self, pipeline):
if (not isinstance(pipeline, Pipeline)):
pipeline = Pipeline(pipeline)
self.pipes = (pipeline.pipes[:] + self.pipes[:])
return self
def add_after(self, pipeline):
if (not isinstance(pipeline, Pipeline)):
pipeline = Pipeline(pipeline)
self.pipes = (self.pipes[:] + pipeline.pipes[:])
return self
def identity(x):
return x |
class MBartTokenizer(XLMRobertaTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, *args, tokenizer_file=None, src_lang=None, tgt_lang=None, **kwargs):
super().__init__(*args, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, **kwargs)
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {code: ((self.sp_model_size + i) + self.fairseq_offset) for (i, code) in enumerate(FAIRSEQ_LANGUAGE_CODES)}
self.id_to_lang_code = {v: k for (k, v) in self.lang_code_to_id.items()}
self.fairseq_tokens_to_ids['<mask>'] = ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
self._additional_special_tokens = list(self.lang_code_to_id.keys())
self._src_lang = (src_lang if (src_lang is not None) else 'en_XX')
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def vocab_size(self):
return (((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) + 1)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro_RO', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def as_target_tokenizer(self):
self.set_tgt_lang_special_tokens(self.tgt_lang)
(yield)
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
self.cur_lang_code = self.lang_code_to_id[src_lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
self.cur_lang_code = self.lang_code_to_id[lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] |
def get_models_status():
ner_status = subprocess.run(['docker', 'inspect', '-f', '{{.State.Running}}', 'myner'], capture_output=True, text=True).stdout.strip('\n')
intent_status = subprocess.run(['docker', 'inspect', '-f', '{{.State.Running}}', 'myintent'], capture_output=True, text=True).stdout.strip('\n')
return ((ner_status == 'true') and (intent_status == 'true')) |
class ChangeFinalWeightQCAttrTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test, experimental_exporter=True)
def get_debug_config(self):
return DebugConfig(network_editor=[EditRule(filter=NodeTypeFilter(layers.Conv2D), action=ChangeFinalWeightsQuantConfigAttr(weights_bias_correction=False))])
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(3, 4, use_bias=False)(inputs)
model = keras.Model(inputs=inputs, outputs=x)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
conv_layer = get_layers_from_model_by_type(quantized_model, layers.Conv2D)[0]
self.unit_test.assertTrue((conv_layer.layer.bias is None)) |
def Inception(inputs, units=8, strides=1):
x1 = Conv2D(units, 5, padding='same', activation='relu', strides=strides)(inputs)
x2 = Conv2D(units, 3, padding='same', activation='relu', strides=strides)(inputs)
x3 = Conv2D(units, 1, padding='same', activation='relu', strides=strides)(inputs)
outputs = Concatenate()([x1, x2, x3])
return outputs |
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True |
class EdgeResidual(BaseModule):
def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_residual=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), drop_path_rate=0.0, with_cp=False, init_cfg=None, **kwargs):
super(EdgeResidual, self).__init__(init_cfg=init_cfg)
assert (stride in [1, 2])
self.with_cp = with_cp
self.drop_path = (DropPath(drop_path_rate) if (drop_path_rate > 0) else nn.Identity())
self.with_se = (se_cfg is not None)
self.with_residual = ((stride == 1) and (in_channels == out_channels) and with_residual)
if self.with_se:
assert isinstance(se_cfg, dict)
self.conv1 = ConvModule(in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.conv2 = ConvModule(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
out = self.conv1(out)
if self.with_se:
out = self.se(out)
out = self.conv2(out)
if self.with_residual:
return (x + self.drop_path(out))
else:
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out |
class TFXLNetModelTest(TFCommonTestCases.TFCommonModelTester):
all_model_classes = ((TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, TFXLNetForQuestionAnsweringSimple) if is_tf_available() else ())
test_pruning = False
class TFXLNetModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, mem_len=10, clamp_len=(- 1), reuse_len=15, is_training=True, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], hidden_size=32, num_attention_heads=4, d_inner=128, num_hidden_layers=5, max_position_embeddings=10, type_sequence_label_size=2, untie_r=True, bi_data=False, same_length=False, initializer_range=0.05, seed=1, type_vocab_size=2):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.mem_len = mem_len
self.clamp_len = clamp_len
self.reuse_len = reuse_len
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.cutoffs = cutoffs
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.d_inner = d_inner
self.num_hidden_layers = num_hidden_layers
self.max_position_embeddings = max_position_embeddings
self.bi_data = bi_data
self.untie_r = untie_r
self.same_length = same_length
self.initializer_range = initializer_range
self.seed = seed
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2, dtype=tf.float32)
input_ids_q = ids_tensor([self.batch_size, (self.seq_length + 1)], self.vocab_size)
perm_mask = tf.zeros((self.batch_size, (self.seq_length + 1), self.seq_length), dtype=tf.float32)
perm_mask_last = tf.ones((self.batch_size, (self.seq_length + 1), 1), dtype=tf.float32)
perm_mask = tf.concat([perm_mask, perm_mask_last], axis=(- 1))
target_mapping = tf.zeros((self.batch_size, 1, self.seq_length), dtype=tf.float32)
target_mapping_last = tf.ones((self.batch_size, 1, 1), dtype=tf.float32)
target_mapping = tf.concat([target_mapping, target_mapping_last], axis=(- 1))
sequence_labels = None
lm_labels = None
is_impossible_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32)
config = XLNetConfig(vocab_size_or_config_json_file=self.vocab_size, d_model=self.hidden_size, n_head=self.num_attention_heads, d_inner=self.d_inner, n_layer=self.num_hidden_layers, untie_r=self.untie_r, max_position_embeddings=self.max_position_embeddings, mem_len=self.mem_len, clamp_len=self.clamp_len, same_length=self.same_length, reuse_len=self.reuse_len, bi_data=self.bi_data, initializer_range=self.initializer_range, num_labels=self.type_sequence_label_size)
return (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels)
def set_seed(self):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def create_and_check_xlnet_base_model(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetModel(config)
inputs = {'input_ids': input_ids_1, 'input_mask': input_mask, 'token_type_ids': segment_ids}
(_, _) = model(inputs)
inputs = [input_ids_1, input_mask]
(outputs, mems_1) = model(inputs)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'outputs': outputs.numpy()}
self.parent.assertListEqual(list(result['outputs'].shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_lm_head(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetLMHeadModel(config)
inputs_1 = {'input_ids': input_ids_1, 'token_type_ids': segment_ids}
(all_logits_1, mems_1) = model(inputs_1)
inputs_2 = {'input_ids': input_ids_2, 'mems': mems_1, 'token_type_ids': segment_ids}
(all_logits_2, mems_2) = model(inputs_2)
inputs_3 = {'input_ids': input_ids_q, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
(logits, _) = model(inputs_3)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'all_logits_1': all_logits_1.numpy(), 'mems_2': [mem.numpy() for mem in mems_2], 'all_logits_2': all_logits_2.numpy()}
self.parent.assertListEqual(list(result['all_logits_1'].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
self.parent.assertListEqual(list(result['all_logits_2'].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_2'])), ([[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_qa(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetForQuestionAnsweringSimple(config)
inputs = {'input_ids': input_ids_1, 'attention_mask': input_mask, 'token_type_ids': segment_ids}
(start_logits, end_logits, mems) = model(inputs)
result = {'start_logits': start_logits.numpy(), 'end_logits': end_logits.numpy(), 'mems': [m.numpy() for m in mems]}
self.parent.assertListEqual(list(result['start_logits'].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['end_logits'].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_sequence_classif(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetForSequenceClassification(config)
(logits, mems_1) = model(input_ids_1)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.type_sequence_label_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids_1}
return (config, inputs_dict)
def setUp(self):
self.model_tester = TFXLNetModelTest.TFXLNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlnet_base_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs)
def test_xlnet_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs)
def test_xlnet_sequence_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs)
def test_xlnet_qa(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_qa(*config_and_inputs)
.slow
def test_model_from_pretrained(self):
cache_dir = '/tmp/transformers_test/'
for model_name in list(TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = TFXLNetModel.from_pretrained(model_name, cache_dir=cache_dir)
shutil.rmtree(cache_dir)
self.assertIsNotNone(model) |
class DynamicBaselineConfig(DetectorConfig):
_default_trends = ['weekly', 'daily']
def __init__(self, fixed_period: Tuple[(str, str)]=None, train_window: str=None, wind_sz: str='1h', trends: List[str]=None, **kwargs):
super().__init__(**kwargs)
self.trends = (self._default_trends if (trends is None) else trends)
self.wind_sz = wind_sz
if (not xor((fixed_period is None), (train_window is None))):
fixed_period = None
train_window = (train_window if (train_window is not None) else self.determine_train_window())
self.fixed_period = fixed_period
self.train_window = train_window
def fixed_period(self):
return self._fixed_period
_period.setter
def fixed_period(self, period: Tuple[(str, str)]):
if (period is not None):
assert (len(period) == 2)
period = tuple((to_pd_datetime(t) for t in period))
self._fixed_period = period
def trends(self):
return self._trends
def trends(self, trends: List[str]):
assert all(((t.lower() in Trend.__members__) for t in trends)), f'Encountered a trend that is unsupported. Supported trend types include: {Trend.__members__.keys()}'
self._trends = [Trend[t.lower()] for t in trends]
def determine_train_window(self):
assert (self.trends is not None), 'cannot determine `train_window` without trends'
if (Trend.monthly in self.trends):
return '14w'
elif (Trend.weekly in self.trends):
return '4w'
return '2w'
def to_dict(self, _skipped_keys=None):
_skipped_keys = (_skipped_keys if (_skipped_keys is not None) else set())
config_dict = super().to_dict(_skipped_keys.union({'trends'}))
if ('trends' not in _skipped_keys):
config_dict['trends'] = [t.name for t in self.trends]
return config_dict |
class CtcCriterionConfig(FairseqDataclass):
zero_infinity: bool = field(default=False, metadata={'help': 'zero inf loss when source length <= target length'})
sentence_avg: bool = II('optimization.sentence_avg')
post_process: str = field(default='letter', metadata={'help': 'how to post process predictions into words. can be letter, wordpiece, BPE symbols, etc. See fairseq.data.data_utils.post_process() for full list of options'})
wer_kenlm_model: Optional[str] = field(default=None, metadata={'help': 'if this is provided, use kenlm to compute wer (along with other wer_* args)'})
wer_lexicon: Optional[str] = field(default=None, metadata={'help': 'lexicon to use with wer_kenlm_model'})
wer_lm_weight: float = field(default=2.0, metadata={'help': 'lm weight to use with wer_kenlm_model'})
wer_word_score: float = field(default=(- 1.0), metadata={'help': 'lm word score to use with wer_kenlm_model'})
wer_args: Optional[str] = field(default=None, metadata={'help': 'DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)'}) |
_task('translation_multi_simple_epoch')
class TranslationMultiSimpleEpochTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr', action=FileContentsAction)
parser.add_argument('--keep-inference-langtok', action='store_true', help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
self.source_langs = [d.split('-')[0] for d in self.lang_pairs]
self.target_langs = [d.split('-')[1] for d in self.lang_pairs]
self.check_dicts(self.dicts, self.source_langs, self.target_langs)
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(args, self.lang_pairs, langs, dicts, self.sampling_method)
def check_dicts(self, dicts, source_langs, target_langs):
if ((self.args.source_dict is not None) or (self.args.target_dict is not None)):
return
src_dict = dicts[source_langs[0]]
tgt_dict = dicts[target_langs[0]]
for src_lang in source_langs:
assert (src_dict == dicts[src_lang]), 'Diffrent dictionary are specified for different source languages; '
for tgt_lang in target_langs:
assert (tgt_dict == dicts[tgt_lang]), 'Diffrent dictionary are specified for different target languages; '
def setup_task(cls, args, **kwargs):
(langs, dicts, training) = MultilingualDatasetManager.prepare(cls.load_dictionary, args, **kwargs)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
if (split in self.datasets):
dataset = self.datasets[split]
if self.has_sharded_data(split):
if (self.args.virtual_epoch_size is not None):
if dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
return
else:
shard_epoch = epoch
else:
shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch)
logger.info(f'loading data for {split} epoch={epoch}/{shard_epoch}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
if (split in self.datasets):
del self.datasets[split]
logger.info('old dataset deleted manually')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
self.datasets[split] = self.data_manager.load_dataset(split, self.training, epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if (constraints is not None):
raise NotImplementedError('Constrained decoding with the multilingual_translation task is not supported')
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
(src_langtok_spec, tgt_langtok_spec) = self.args.langtoks['main']
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(dataset, src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(self.args.source_lang, self.args.target_lang, dataset=dataset.src, spec=src_langtok_spec)
return dataset
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if (not getattr(args, 'keep_inference_langtok', False)):
(_, tgt_langtok_spec) = self.args.langtoks['main']
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
extra_gen_cls_kwargs = (extra_gen_cls_kwargs or {})
extra_gen_cls_kwargs['symbols_to_strip_from_output'] = {tgt_lang_tok}
return super().build_generator(models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
(loss, sample_size, logging_output) = super().valid_step(sample, model, criterion)
return (loss, sample_size, logging_output)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
(_, tgt_langtok_spec) = self.args.langtoks['main']
if (not self.args.lang_tok_replacing_bos_eos):
if ((prefix_tokens is None) and tgt_langtok_spec):
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.size(0)
prefix_tokens = torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens)
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints)
else:
return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=(self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec) if tgt_langtok_spec else self.target_dictionary.eos()))
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def source_dictionary(self):
return self.data_manager.get_source_dictionary(self.source_langs[0])
def target_dictionary(self):
return self.data_manager.get_target_dictionary(self.target_langs[0])
def create_batch_sampler_func(self, max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=1, seed=1):
def construct_batch_sampler(dataset, epoch):
splits = [s for (s, _) in self.datasets.items() if (self.datasets[s] == dataset)]
split = (splits[0] if (len(splits) > 0) else None)
if (epoch is not None):
dataset.set_epoch(epoch)
start_time = time.time()
logger.info(f'start batch sampler: mem usage: {data_utils.get_mem_usage()}')
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
logger.info(f'[{split}] _sampler order indices time: {get_time_gap(start_time, time.time())}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
if (max_positions is not None):
my_time = time.time()
indices = self.filter_indices_by_size(indices, dataset, max_positions, ignore_invalid_inputs)
logger.info(f'[{split}] _sampler filter_by_size time: {get_time_gap(my_time, time.time())}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
my_time = time.time()
batch_sampler = dataset.batch_by_size(indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple)
logger.info(f'[{split}] _sampler batch_by_size time: {get_time_gap(my_time, time.time())}')
logger.info(f'[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}')
logger.info(f'mem usage: {data_utils.get_mem_usage()}')
return batch_sampler
return construct_batch_sampler
def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False):
assert isinstance(dataset, FairseqDataset)
if (dataset in self.dataset_to_epoch_iter):
return self.dataset_to_epoch_iter[dataset]
if (self.args.sampling_method == 'RoundRobin'):
batch_iter = super().get_batch_iterator(dataset, max_tokens=max_tokens, max_sentences=max_sentences, max_positions=max_positions, ignore_invalid_inputs=ignore_invalid_inputs, required_batch_size_multiple=required_batch_size_multiple, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, data_buffer_size=data_buffer_size, disable_iterator_cache=disable_iterator_cache)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=required_batch_size_multiple, seed=seed)
epoch_iter = iterators.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=construct_batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch)
return epoch_iter |
class ConvolutionBranch(nn.Module):
def __init__(self, input_size, linear_units=3072, kernel_size=31, activation=nn.GELU, gate_activation=nn.Identity, dropout=0.0, use_linear_after_conv=False):
super().__init__()
self.pre_channel_proj = nn.Linear(input_size, linear_units)
self.post_channel_proj = nn.Linear((linear_units // 2), input_size)
self.activation = activation()
self.csgu = ConvolutionalSpatialGatingUnit(input_size=linear_units, kernel_size=kernel_size, dropout=dropout, use_linear_after_conv=use_linear_after_conv, activation=gate_activation)
def forward(self, x):
x = self.activation(self.pre_channel_proj(x))
x = self.csgu(x)
x = self.post_channel_proj(x)
return x |
def conditional_bilinear_classifier(inputs1, inputs2, n_classes, probs, keep_prob, add_bias1=True, add_bias2=True):
input_shape = tf.shape(inputs1)
batch_size = input_shape[0]
bucket_size = input_shape[1]
input_size = inputs1.get_shape().as_list()[(- 1)]
input_shape_to_set = [tf.Dimension(None), tf.Dimension(None), (input_size + 1)]
if (len(probs.get_shape().as_list()) == 2):
probs = tf.to_float(tf.one_hot(tf.to_int64(probs), bucket_size, 1, 0))
else:
probs = tf.stop_gradient(probs)
if (keep_prob < 1):
noise_shape = tf.stack([batch_size, 1, input_size])
inputs1 = tf.nn.dropout(inputs1, keep_prob, noise_shape=noise_shape)
inputs2 = tf.nn.dropout(inputs2, keep_prob, noise_shape=noise_shape)
inputs1 = tf.concat(axis=2, values=[inputs1, tf.ones(tf.stack([batch_size, bucket_size, 1]))])
inputs1.set_shape(input_shape_to_set)
inputs2 = tf.concat(axis=2, values=[inputs2, tf.ones(tf.stack([batch_size, bucket_size, 1]))])
inputs2.set_shape(input_shape_to_set)
bilin = bilinear(inputs1, inputs2, n_classes, add_bias1=add_bias1, add_bias2=add_bias2, initializer=tf.zeros_initializer())
bilin = tf.reshape(bilin, [batch_size, bucket_size, n_classes, bucket_size])
weighted_bilin = tf.squeeze(tf.matmul(bilin, tf.expand_dims(probs, 3)), (- 1))
return (weighted_bilin, bilin) |
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for (dilation, padding) in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range((len(self.conv2d_list) - 1)):
out += self.conv2d_list[(i + 1)](x)
return out |
def run(seed):
tf.reset_default_graph()
dataset = uci_woval(args.dataset, seed=seed)
(train_x, test_x, train_y, test_y) = (dataset.x_train, dataset.x_test, dataset.y_train, dataset.y_test)
std_y_train = dataset.std_y_train[0]
(N, input_dim) = train_x.shape
lower_ap = np.minimum(np.min(train_x), np.min(test_x))
upper_ap = np.maximum(np.max(train_x), np.max(test_x))
(mean_x_train, std_x_train) = (np.mean(train_x, 0), np.std(train_x, 0))
with tf.variable_scope('prior'):
ls = median_distance_local(train_x).astype('float32')
ls[(abs(ls) < 1e-06)] = 1.0
prior_kernel = gfs.kernels.RBF(input_dim=input_dim, name='rbf', lengthscales=ls, ARD=True)
with tf.variable_scope('likelihood'):
obs_log1p = tf.get_variable('obs_log1p', shape=[], initializer=tf.constant_initializer(np.log((np.exp(0.5) - 1.0))))
obs_var = (tf.nn.softplus(obs_log1p) ** 2.0)
def rand_generator(*arg):
if (args.rand == 'uniform'):
return tf.random_uniform(shape=[args.n_rand, input_dim], minval=lower_ap, maxval=upper_ap)
elif (args.rand == 'normal'):
return (mean_x_train + (std_x_train * tf.random_normal(shape=[args.n_rand, input_dim])))
else:
raise NotImplementedError
layer_sizes = (([input_dim] + ([args.n_units] * args.n_hidden)) + [1])
model = EntropyEstimationFVI(prior_kernel, get_posterior('bnn')(layer_sizes, logstd_init=(- 2.0)), rand_generator=rand_generator, obs_var=obs_var, input_dim=input_dim, n_rand=args.n_rand, injected_noise=args.injected_noise)
model.build_prior_gp(init_var=0.1)
update_op = tf.group(model.infer_latent, model.infer_likelihood)
with tf.control_dependencies([update_op]):
train_op = tf.assign(obs_log1p, tf.maximum(tf.maximum(tf.to_float(tf.log((tf.exp((model.gp_var ** 0.5)) - 1.0))), obs_log1p), tf.log((tf.exp(0.05) - 1.0))))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
gp_epochs = 5000
for epoch in range(gp_epochs):
feed_dict = {model.x_gp: train_x, model.y_gp: train_y, model.learning_rate_ph: 0.003}
(_, loss, gp_var) = sess.run([model.infer_gp, model.gp_loss, model.gp_var], feed_dict=feed_dict)
if ((epoch % args.print_interval) == 0):
print('>>> Seed {:5d} >>> Pretrain GP Epoch {:5d}/{:5d}: Loss={:.5f} | Var={:.5f}'.format(seed, epoch, gp_epochs, loss, gp_var))
epoch_iters = max((N // args.batch_size), 1)
for epoch in range(1, (args.epochs + 1)):
indices = np.random.permutation(N)
(train_x, train_y) = (train_x[indices], train_y[indices])
for iter in range(epoch_iters):
x_batch = train_x[(iter * args.batch_size):((iter + 1) * args.batch_size)]
y_batch = train_y[(iter * args.batch_size):((iter + 1) * args.batch_size)]
feed_dict = {model.x: x_batch, model.y: y_batch, model.learning_rate_ph: args.learning_rate, model.n_particles: args.train_samples}
feed_dict.update(model.default_feed_dict())
sess.run(train_op, feed_dict=feed_dict)
if (((epoch % args.test_interval) == 0) or (epoch == args.epochs)):
feed_dict = {model.x: test_x, model.y: test_y, model.n_particles: args.test_samples}
(rmse, lld, ov) = sess.run([model.eval_rmse, model.eval_lld, obs_var], feed_dict=feed_dict)
rmse = (rmse * std_y_train)
lld = (lld - np.log(std_y_train))
print('>>> Seed {:5d} >>> Epoch {:5d}/{:5d} | rmse={:.5f} | lld={:.5f} | obs_var={:.5f}'.format(seed, epoch, args.epochs, rmse, lld, ov))
if (epoch == args.epochs):
return (rmse, lld) |
def affiliation_partition(Is=[(1, 1.5), (2, 5), (5, 6), (8, 9)], E_gt=[(1, 2.5), (2.5, 4.5), (4.5, 10)]):
out = ([None] * len(E_gt))
for j in range(len(E_gt)):
E_gt_j = E_gt[j]
discarded_idx_before = [(I[1] < E_gt_j[0]) for I in Is]
discarded_idx_after = [(I[0] > E_gt_j[1]) for I in Is]
kept_index = [(not (a or b)) for (a, b) in zip(discarded_idx_before, discarded_idx_after)]
Is_j = [x for (x, y) in zip(Is, kept_index)]
out[j] = [interval_intersection(I, E_gt[j]) for I in Is_j]
return out |
def clip_loss(similarity: tf.Tensor) -> tf.Tensor:
caption_loss = contrastive_loss(similarity)
image_loss = contrastive_loss(tf.transpose(similarity))
return ((caption_loss + image_loss) / 2.0) |
def get_proposed_method(selector):
_added = _get_proposed(['causal-da'], selector)
_cv = (cv_group + ['method'])
return VirtualValidation(_added).fit(_cv, [('min_selector', {'larger_is_better': False})])[(((_cv + ['target_c']) + ['test_metric']) + ['sacred_run_id'])] |
def main():
args = get_arguments()
start = time.time()
logger = setup_logger()
writer = SummaryWriter(args.model_save_path)
logger.info(json.dumps(vars(args), indent=1))
logger.info('Setting model...')
model = DRSN()
model.init(args.init_model_path, 'yvos_train')
model.train()
model.float()
model = torch.nn.DataParallel(model.cuda())
logger.info('Setting criterion...')
criterion = Criterion2().cuda()
os.makedirs(args.model_save_path, exist_ok=True)
trainset = YVOSDataset(args.img_size)
trainset.data_len = (args.max_iters * args.batch_size)
trainloader = data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=32, pin_memory=True)
learning_rate = args.learning_rate[0]
optimizer = optim.Adam([{'params': filter((lambda p: p.requires_grad), model.parameters()), 'lr': learning_rate}], lr=learning_rate, weight_decay=args.weight_decay)
for (i_iter, batch) in enumerate(trainloader):
(ref_images, ref_masks, p_images, p_masks, q_images, q_masks, pre_masks, images, masks) = batch
(ref_images, p_images, q_images, images) = (ref_images.float().cuda(), p_images.float().cuda(), q_images.float().cuda(), images.float().cuda())
(ref_masks, p_masks, q_masks, pre_masks, masks) = (ref_masks.float().cuda(), p_masks.float().cuda(), q_masks.float().cuda(), pre_masks.float().cuda(), masks.long().cuda())
ref_imasks = torch.cat([ref_images, ref_masks], 1)
p_imasks = torch.cat([p_images, p_masks], 1)
q_imasks = torch.cat([q_images, q_masks], 1)
n_imasks = torch.cat([images, pre_masks], 1)
optimizer.zero_grad()
preds = model(ref_imasks, p_imasks, q_imasks, n_imasks)
loss = criterion(preds, masks)
loss.backward()
optimizer.step()
loss = loss.data.cpu().numpy()
if (i_iter == args.decayat):
optimizer.param_groups[0]['lr'] = (learning_rate * 0.1)
if ((i_iter % 200) == 0):
writer.add_scalar('MaskTrack_LearningRate', optimizer.param_groups[0]['lr'], i_iter)
writer.add_scalar('MaskTrack_Loss/TrainLoss', loss, i_iter)
logger.info('Train iter {} of {} completed, loss = {}'.format(i_iter, args.max_iters, loss))
if ((((i_iter + 1) % args.save_iters) == 0) or (i_iter >= (args.max_iters - 1))):
snapshot_fn = osp.join(args.model_save_path, (('drsn_' + str((i_iter + 1))) + '.pth'))
logger.info('Snapshot {} dumped...'.format(snapshot_fn))
torch.save(model.state_dict(), snapshot_fn)
end = time.time()
(total_h, total_m) = sec2hm((end - start))
logger.info('The whole training costs {}h {}m...'.format(total_h, total_m)) |
class TNEANetMPNodeI(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TNEANetMPNodeI_swiginit(self, _snap.new_TNEANetMPNodeI(*args))
def Next(self):
return _snap.TNEANetMPNodeI_Next(self)
def __lt__(self, NodeI):
return _snap.TNEANetMPNodeI___lt__(self, NodeI)
def __eq__(self, NodeI):
return _snap.TNEANetMPNodeI___eq__(self, NodeI)
def GetId(self):
return _snap.TNEANetMPNodeI_GetId(self)
def GetDeg(self):
return _snap.TNEANetMPNodeI_GetDeg(self)
def GetInDeg(self):
return _snap.TNEANetMPNodeI_GetInDeg(self)
def GetOutDeg(self):
return _snap.TNEANetMPNodeI_GetOutDeg(self)
def GetInNId(self, NodeN):
return _snap.TNEANetMPNodeI_GetInNId(self, NodeN)
def GetOutNId(self, NodeN):
return _snap.TNEANetMPNodeI_GetOutNId(self, NodeN)
def GetNbrNId(self, NodeN):
return _snap.TNEANetMPNodeI_GetNbrNId(self, NodeN)
def IsInNId(self, NId):
return _snap.TNEANetMPNodeI_IsInNId(self, NId)
def IsOutNId(self, NId):
return _snap.TNEANetMPNodeI_IsOutNId(self, NId)
def IsNbrNId(self, NId):
return _snap.TNEANetMPNodeI_IsNbrNId(self, NId)
__swig_destroy__ = _snap.delete_TNEANetMPNodeI |
class MetricGroup():
def __init__(self, metric_kwarg_list):
self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
def run(self, *args, **kwargs):
for metric in self.metrics:
metric.run(*args, **kwargs)
def get_result_str(self):
return ' '.join((metric.get_result_str() for metric in self.metrics))
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries() |
def num_cpus_used_by_tokenizer(tokenizer) -> int:
if getattr(tokenizer, 'is_fast', False):
if (os.getenv('TOKENIZERS_PARALLELISM', 'true').lower() in _HF_TOKENIZER_OFF_VALUES):
return 1
else:
return min(max(1, (logical_cpu_core_count() - 2)), 8)
else:
return 1 |
def register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'bool', [param('ns3::Ptr< ns3::Socket >', 'arg0'), param('ns3::Address const &', 'arg1')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
class DataProcessor():
def __init__(self, path):
self.data = self.load_data(path)
def load_data(self, path):
multi_label_data = {}
with open(path) as f:
data = json.load(f)
for dial in data:
dialog_history = ''
for (idx, turn) in enumerate(dial['dialogue']):
label_list = []
turn_domain = turn['domain']
text_a = dialog_history
text_b = turn['system_transcript']
dialog_history = ((((dialog_history + ' ') + turn['system_transcript']) + ' ') + turn['transcript'])
dialog_history = dialog_history.strip()
multi_label_data[(dial['dialogue_idx'] + str(idx))] = {'text_a': text_a, 'text_b': text_b, 'label_list': label_list}
return multi_label_data
def get_labels(self):
return ['attraction-area', 'attraction-name', 'attraction-type', 'hotel-area', 'hotel-book day', 'hotel-book people', 'hotel-book stay', 'hotel-internet', 'hotel-name', 'hotel-parking', 'hotel-pricerange', 'hotel-stars', 'hotel-type', 'restaurant-area', 'restaurant-book day', 'restaurant-book people', 'restaurant-book time', 'restaurant-food', 'restaurant-name', 'restaurant-pricerange', 'taxi-arriveby', 'taxi-departure', 'taxi-destination', 'taxi-leaveat', 'train-arriveby', 'train-book people', 'train-day', 'train-departure', 'train-destination', 'train-leaveat']
def create_examples(self, dialogue_idx, turn_id, user_utters, turn_label):
examples = []
meta_info = self.data[(dialogue_idx + str(turn_id))]
for user_utter in user_utters:
text_a = meta_info['text_a']
text_b = ((meta_info['text_b'] + ' ') + user_utter)
labels = []
for label in turn_label:
labels.append(label[0])
examples.append(InputExample(text_a=text_a.strip(), text_b=text_b.strip(), label=labels))
return examples |
class DoxyParameterItem(DoxyMember):
def _parse(self):
if self._parsed:
return
super(DoxyParameterItem, self)._parse()
names = []
for nl in self._parse_data.parameternamelist:
for pn in nl.parametername:
names.append(description(pn))
self._data['name'] = names[0]
pd = description(self._parse_data.get_parameterdescription())
self._data['description'] = pd
description = property((lambda self: self.data()['description']))
name = property((lambda self: self.data()['name'])) |
def dataio_prep(hparams):
language_encoder = sb.dataio.encoder.CategoricalEncoder()
.data_pipeline.takes('wav')
.data_pipeline.provides('sig')
def audio_pipeline(wav):
(sig, _) = torchaudio.load(wav)
sig = sig.transpose(0, 1).squeeze(1)
return sig
.data_pipeline.takes('language')
.data_pipeline.provides('language', 'language_encoded')
def label_pipeline(language):
(yield language)
language_encoded = language_encoder.encode_label_torch(language)
(yield language_encoded)
datasets = {}
for dataset in ['train', 'dev', 'test']:
datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams[f'{dataset}_csv'], replacements={'data_root': hparams['data_folder']}, dynamic_items=[audio_pipeline, label_pipeline], output_keys=['id', 'sig', 'language_encoded'])
language_encoder_file = os.path.join(hparams['save_folder'], 'language_encoder.txt')
language_encoder.load_or_create(path=language_encoder_file, from_didatasets=[datasets['train']], output_key='language')
return (datasets, language_encoder) |
('parsing', 'ael', AELParams)
class AEL(ParsingAlgo):
def __init__(self, params: AELParams):
self.rex = params.rex
self.minEventCount = params.minEventCount
self.merge_percent = params.merge_percent
self.df_log = None
self.logname = None
self.merged_events = []
self.bins = defaultdict(dict)
self.keep_para = params.keep_para
def fit(self, loglines: pd.DataFrame):
pass
def parse(self, loglines: pd.Series) -> pd.Series:
self.logname = 'logname'
self.load_data(loglines)
self.tokenize()
self.categorize()
self.reconcile()
templateL = ([0] * self.df_log.shape[0])
for event in self.merged_events:
for logidx in event.logs:
templateL[logidx] = event.Eventstr
return pd.Series(templateL, index=loglines.index)
def tokenize(self):
for (idx, log) in self.df_log['Content_'].iteritems():
para_count = 0
tokens = log.split()
for token in tokens:
if (token == '<*>'):
para_count += 1
if ('Logs' not in self.bins[(len(tokens), para_count)]):
self.bins[(len(tokens), para_count)]['Logs'] = [idx]
else:
self.bins[(len(tokens), para_count)]['Logs'].append(idx)
def categorize(self):
for key in self.bins:
abin = self.bins[key]
abin['Events'] = []
for logidx in abin['Logs']:
log = self.df_log['Content_'].loc[logidx]
matched = False
for event in abin['Events']:
if (log == event.Eventstr):
matched = True
event.logs.append(logidx)
break
if (not matched):
abin['Events'].append(Event(logidx, log))
def reconcile(self):
for key in self.bins:
abin = self.bins[key]
if (len(abin['Events']) > self.minEventCount):
tobeMerged = []
for e1 in abin['Events']:
if e1.merged:
continue
e1.merged = True
tobeMerged.append([e1])
for e2 in abin['Events']:
if e2.merged:
continue
if self.has_diff(e1.EventToken, e2.EventToken):
tobeMerged[(- 1)].append(e2)
e2.merged = True
for Es in tobeMerged:
merged_event = reduce(self.merge_event, Es)
merged_event.refresh_id()
self.merged_events.append(merged_event)
else:
for e in abin['Events']:
self.merged_events.append(e)
def merge_event(self, e1, e2):
for pos in range(len(e1.EventToken)):
if (e1.EventToken[pos] != e2.EventToken[pos]):
e1.EventToken[pos] = '<*>'
e1.logs.extend(e2.logs)
e1.Eventstr = ' '.join(e1.EventToken)
return e1
def has_diff(self, tokens1: list, tokens2: list):
diff = 0
for idx in range(len(tokens1)):
if (tokens1[idx] != tokens2[idx]):
diff += 1
return (True if (0 < ((diff * 1.0) / len(tokens1)) <= self.merge_percent) else False)
def load_data(self, loglines: pd.Series):
def preprocess(log):
if self.rex:
for currentRex in self.rex:
log = re.sub(currentRex, '<*>', log)
return log
self.df_log = pd.DataFrame(loglines)
self.df_log['Content_'] = self.df_log[loglines.name].map(preprocess) |
class DynamicalSystem_affine(SchemeMorphism_polynomial_affine_space, DynamicalSystem):
def __classcall_private__(cls, morphism_or_polys, domain=None):
if isinstance(morphism_or_polys, SchemeMorphism_polynomial):
morphism = morphism_or_polys
R = morphism.base_ring()
polys = list(morphism)
domain = morphism.domain()
if ((not is_AffineSpace(domain)) and (not isinstance(domain, AlgebraicScheme_subscheme_affine))):
raise ValueError('"domain" must be an affine scheme')
if (domain != morphism_or_polys.codomain()):
raise ValueError('domain and codomain do not agree')
if (R not in Fields()):
return typecall(cls, polys, domain)
if isinstance(R, FiniteField):
return DynamicalSystem_affine_finite_field(polys, domain)
return DynamicalSystem_affine_field(polys, domain)
elif isinstance(morphism_or_polys, (list, tuple)):
polys = list(morphism_or_polys)
else:
polys = [morphism_or_polys]
PR = get_coercion_model().common_parent(*polys)
fraction_field = any((is_FractionField(poly.parent()) for poly in polys))
if fraction_field:
K = PR.base_ring().fraction_field()
PR = PR.ring().change_ring(K).fraction_field()
polys = [PR(poly) for poly in polys]
else:
quotient_ring = any((is_QuotientRing(poly.parent()) for poly in polys))
if quotient_ring:
polys = [PR(poly).lift() for poly in polys]
else:
polys = [PR(poly) for poly in polys]
if (domain is None):
if isinstance(PR, sage.rings.abc.SymbolicRing):
raise TypeError('symbolic ring cannot be the base ring')
if fraction_field:
PR = PR.ring()
domain = AffineSpace(PR)
else:
PR = domain.ambient_space().coordinate_ring()
try:
if fraction_field:
PR = PR.fraction_field()
polys = [PR(poly) for poly in polys]
except TypeError:
raise TypeError('coefficients of polynomial not in {}'.format(domain.base_ring()))
if (len(polys) != domain.ambient_space().coordinate_ring().ngens()):
raise ValueError(f'number of polys does not match dimension of {domain}')
R = domain.base_ring()
if isinstance(R, sage.rings.abc.SymbolicRing):
raise TypeError('symbolic ring cannot be the base ring')
if ((not is_AffineSpace(domain)) and (not isinstance(domain, AlgebraicScheme_subscheme_affine))):
raise ValueError('"domain" must be an affine scheme')
if (R not in Fields()):
return typecall(cls, polys, domain)
if isinstance(R, FiniteField):
return DynamicalSystem_affine_finite_field(polys, domain)
return DynamicalSystem_affine_field(polys, domain)
def __init__(self, polys_or_rat_fncts, domain):
L = polys_or_rat_fncts
R = L[0].base_ring()
self._is_prime_finite_field = (isinstance(R, FiniteField) and R.is_prime_field())
DynamicalSystem.__init__(self, L, domain)
def __copy__(self):
return DynamicalSystem_affine(self._polys, self.domain())
def homogenize(self, n):
F = self.as_scheme_morphism().homogenize(n)
return F.as_dynamical_system()
def dynatomic_polynomial(self, period):
from sage.schemes.affine.affine_space import is_AffineSpace
if (not is_AffineSpace(self.domain())):
raise NotImplementedError('not implemented for subschemes')
if (self.domain().dimension_relative() > 1):
raise TypeError('does not make sense in dimension >1')
G = self.homogenize(1)
F = G.dynatomic_polynomial(period)
T = G.domain().coordinate_ring()
S = self.domain().coordinate_ring()
if isinstance(F.parent(), sage.rings.abc.SymbolicRing):
from sage.symbolic.ring import var
u = var(self.domain().coordinate_ring().variable_name())
return F.subs({F.variables()[0]: u, F.variables()[1]: 1})
elif (T(F.denominator()).degree() == 0):
R = F.parent()
phi = R.hom([S.gen(0), 1], S)
return phi(F)
else:
R = F.numerator().parent()
phi = R.hom([S.gen(0), 1], S)
return (phi(F.numerator()) / phi(F.denominator()))
def nth_iterate_map(self, n):
F = list(self._polys)
R = F[0].parent()
Coord_ring = self.codomain().ambient_space().coordinate_ring()
D = Integer(n).digits(2)
PHI = list(Coord_ring.gens())
for i in range(len(D)):
for k in range(D[i]):
PHI = [poly(F) for poly in PHI]
if (i != (len(D) - 1)):
F = [R(poly(F)) for poly in F]
return DynamicalSystem_affine(PHI, domain=self.domain())
def nth_iterate(self, P, n):
n = int(n)
if (n == 0):
return P
Q = P
for i in range(n):
Q = self(Q)
return Q
def orbit(self, P, n):
Q = P
if isinstance(n, (list, tuple)):
bounds = list(n)
else:
bounds = [0, n]
for i in range(1, (bounds[0] + 1)):
Q = self(Q)
orb = [Q]
for i in range((bounds[0] + 1), (bounds[1] + 1)):
Q = self(Q)
orb.append(Q)
return orb
def multiplier(self, P, n, check=True):
if check:
if (self.nth_iterate(P, n) != P):
raise ValueError(('%s is not periodic of period %s' % (P, n)))
if (n < 1):
raise ValueError('period must be a positive integer')
N = self.domain().ambient_space().dimension_relative()
l = identity_matrix(FractionField(self.codomain().base_ring()), N, N)
Q = P
J = self.jacobian()
for i in range(0, n):
R = self(Q)
l = (J(tuple(Q)) * l)
Q = R
return l
def conjugate(self, M):
d = self.codomain().ngens()
f = self.homogenize(d).conjugate(M)
return f.dehomogenize(d)
def degree(self):
return self.as_scheme_morphism().degree() |
def load_table(run, desired_table_name):
desired_file = [p for p in run.files() if (desired_table_name in p.name)][0]
data = ''
for line in urllib.request.urlopen(desired_file.direct_url):
data += line.decode('utf-8')
return json.loads(data) |
class TraditionalLexer(Lexer):
def __init__(self, conf):
terminals = list(conf.tokens)
assert all((isinstance(t, TerminalDef) for t in terminals)), terminals
self.re = conf.re_module
if (not conf.skip_validation):
for t in terminals:
try:
self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
except self.re.error:
raise LexError(('Cannot compile token %s: %s' % (t.name, t.pattern)))
if (t.pattern.min_width == 0):
raise LexError(('Lexer does not allow zero-width terminals. (%s: %s)' % (t.name, t.pattern)))
assert (set(conf.ignore) <= {t.name for t in terminals})
self.newline_types = frozenset((t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())))
self.ignore_types = frozenset(conf.ignore)
terminals.sort(key=(lambda x: ((- x.priority), (- x.pattern.max_width), (- len(x.pattern.value)), x.name)))
self.terminals = terminals
self.user_callbacks = conf.callbacks
self.g_regex_flags = conf.g_regex_flags
self.use_bytes = conf.use_bytes
self._mres = None
def _build(self):
(terminals, self.callback) = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)
assert all(self.callback.values())
for (type_, f) in self.user_callbacks.items():
if (type_ in self.callback):
self.callback[type_] = CallChain(self.callback[type_], f, (lambda t: (t.type == type_)))
else:
self.callback[type_] = f
self._mres = build_mres(terminals, self.g_regex_flags, self.re, self.use_bytes)
def mres(self):
if (self._mres is None):
self._build()
return self._mres
def match(self, text, pos):
for (mre, type_from_index) in self.mres:
m = mre.match(text, pos)
if m:
return (m.group(0), type_from_index[m.lastindex])
def lex(self, state, _parser_state):
with suppress(EOFError):
while True:
(yield self.next_token(state))
def next_token(self, lex_state):
line_ctr = lex_state.line_ctr
while (line_ctr.char_pos < len(lex_state.text)):
res = self.match(lex_state.text, line_ctr.char_pos)
if (not res):
allowed = ({v for (m, tfi) in self.mres for v in tfi.values()} - self.ignore_types)
if (not allowed):
allowed = {'<END-OF-FILE>'}
raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, token_history=(lex_state.last_token and [lex_state.last_token]))
(value, type_) = res
if (type_ not in self.ignore_types):
t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
line_ctr.feed(value, (type_ in self.newline_types))
t.end_line = line_ctr.line
t.end_column = line_ctr.column
t.end_pos = line_ctr.char_pos
if (t.type in self.callback):
t = self.callback[t.type](t)
if (not isinstance(t, Token)):
raise ValueError(('Callbacks must return a token (returned %r)' % t))
lex_state.last_token = t
return t
else:
if (type_ in self.callback):
t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
self.callback[type_](t2)
line_ctr.feed(value, (type_ in self.newline_types))
raise EOFError(self) |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
resize_position_embeddings: Optional[bool] = field(default=None, metadata={'help': "Whether to automatically resize the position embeddings if `max_source_length` exceeds the model's position embeddings."}) |
def visualization_experiments(num_experiments: Optional[int]=None) -> None:
print('RUNNING `visualization_experiments`')
if (num_experiments is None):
num_experiments = NUM_VISUALIZATION_EXPERIMENTS
for heuristic in hans.DEFAULT_HANS_EVAL_HEURISTICS:
visualization.main(train_task_name='hans', eval_task_name='hans', num_eval_to_collect=num_experiments, use_parallel=USE_PARALLEL, hans_heuristic=heuristic, trained_on_task_name='hans')
visualization.main(train_task_name='hans', eval_task_name='mnli-2', num_eval_to_collect=num_experiments, use_parallel=USE_PARALLEL, hans_heuristic=None, trained_on_task_name='hans') |
def _try_run(model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False):
batch_size = initial_batch_size
results = dict()
error_str = 'Unknown'
while batch_size:
try:
torch.cuda.empty_cache()
bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs)
results = bench.run()
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running benchmark.')
if (not check_batch_size_retry(error_str)):
_logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.')
break
if no_batch_size_retry:
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['error'] = error_str
return results |
def test_sdca_hinge_multiclass(train_data):
(X, y) = train_data
clf = SDCAClassifier(alpha=0.01, max_iter=100, loss='hinge', random_state=0)
clf.fit(X, y)
np.testing.assert_almost_equal(clf.score(X, y), 0.933, 3) |
class RepPAN(nn.Module):
def __init__(self, subtype='yolov6_s', in_channels=[256, 512, 1024], mid_channels=[128, 128, 256], out_channels=[128, 256, 512], layers=[12, 12, 12, 12], depth_mul=1.0, width_mul=1.0):
super().__init__()
self.subtype = subtype
assert (in_channels is not None)
assert (layers is not None)
layers = list(map((lambda x: max(round((x * depth_mul)), 1)), layers))
in_channels = list(map((lambda x: int((x * width_mul))), in_channels))
out_channels = list(map((lambda x: int((x * width_mul))), out_channels))
mid_channels = list(map((lambda x: int((x * width_mul))), mid_channels))
self.reduce_layer0 = SimConv(in_channels=in_channels[2], out_channels=mid_channels[2], kernel_size=1, stride=1)
self.upsample0 = Transpose(in_channels=mid_channels[2], out_channels=mid_channels[2])
self.Rep_p4 = RepBlock(in_channels=(in_channels[1] + mid_channels[2]), out_channels=mid_channels[2], n=layers[0])
self.reduce_layer1 = SimConv(in_channels=mid_channels[2], out_channels=mid_channels[1], kernel_size=1, stride=1)
self.upsample1 = Transpose(in_channels=mid_channels[1], out_channels=mid_channels[1])
self.Rep_p3 = RepBlock(in_channels=(in_channels[0] + mid_channels[1]), out_channels=out_channels[0], n=layers[1])
self.downsample2 = SimConv(in_channels=out_channels[0], out_channels=mid_channels[0], kernel_size=3, stride=2)
self.Rep_n3 = RepBlock(in_channels=(mid_channels[1] + mid_channels[0]), out_channels=out_channels[1], n=layers[2])
self.downsample1 = SimConv(in_channels=out_channels[1], out_channels=mid_channels[2], kernel_size=3, stride=2)
self.Rep_n4 = RepBlock(in_channels=(mid_channels[2] + mid_channels[2]), out_channels=out_channels[2], n=layers[3])
def forward(self, input):
(x2, x1, x0) = input
fpn_out0 = self.reduce_layer0(x0)
upsample_feat0 = self.upsample0(fpn_out0)
f_concat_layer0 = torch.cat([upsample_feat0, x1], 1)
f_out0 = self.Rep_p4(f_concat_layer0)
fpn_out1 = self.reduce_layer1(f_out0)
upsample_feat1 = self.upsample1(fpn_out1)
f_concat_layer1 = torch.cat([upsample_feat1, x2], 1)
pan_out2 = self.Rep_p3(f_concat_layer1)
down_feat1 = self.downsample2(pan_out2)
p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)
pan_out1 = self.Rep_n3(p_concat_layer1)
down_feat0 = self.downsample1(pan_out1)
p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)
pan_out0 = self.Rep_n4(p_concat_layer2)
outputs = [pan_out2, pan_out1, pan_out0]
return outputs |
def main(args):
outfile = args.outfile
download_tf_params()
model = Inception()
set_tf_params(model)
print('Saving ', outfile, '...')
serializers.save_hdf5(outfile, model) |
class FedAvgM_Selection(PrivilegedAggregationFunction):
def call(self, local_tensors, tensor_db, tensor_name, fl_round, tags):
tensor_db.store(tensor_name='momentum', nparray=0.9, overwrite=False)
tensor_db.store(tensor_name='aggregator_lr', nparray=1.0, overwrite=False)
if (fl_round == 0):
tensor_values = [t.tensor for t in local_tensors]
weight_values = [t.weight for t in local_tensors]
new_tensor_weight = np.average(tensor_values, weights=weight_values, axis=0)
if (tensor_name not in tensor_db.search(tags=('weight_speeds',))['tensor_name']):
tensor_db.store(tensor_name=tensor_name, tags=('weight_speeds',), nparray=np.zeros_like(local_tensors[0].tensor))
return new_tensor_weight
elif (tensor_name.endswith('weight') or tensor_name.endswith('bias')):
previous_tensor_value = None
for (_, record) in tensor_db.iterrows():
if ((record['round'] == fl_round) and (record['tensor_name'] == tensor_name) and (record['tags'] == ('aggregated',))):
previous_tensor_value = record['nparray']
break
if (previous_tensor_value is None):
logger.warning('Error in fedAvgM: previous_tensor_value is None')
logger.warning(('Tensor: ' + tensor_name))
tensor_values = [t.tensor for t in local_tensors]
weight_values = [t.weight for t in local_tensors]
new_tensor_weight = np.average(tensor_values, weights=weight_values, axis=0)
if (tensor_name not in tensor_db.search(tags=('weight_speeds',))['tensor_name']):
tensor_db.store(tensor_name=tensor_name, tags=('weight_speeds',), nparray=np.zeros_like(local_tensors[0].tensor))
return new_tensor_weight
else:
deltas = [(previous_tensor_value - t.tensor) for t in local_tensors]
weight_values = [t.weight for t in local_tensors]
average_deltas = np.average(deltas, weights=weight_values, axis=0)
tensor_weight_speed = tensor_db.retrieve(tensor_name=tensor_name, tags=('weight_speeds',))
momentum = float(tensor_db.retrieve(tensor_name='momentum'))
aggregator_lr = float(tensor_db.retrieve(tensor_name='aggregator_lr'))
new_tensor_weight_speed = ((momentum * tensor_weight_speed) + average_deltas)
tensor_db.store(tensor_name=tensor_name, tags=('weight_speeds',), nparray=new_tensor_weight_speed)
new_tensor_weight = (previous_tensor_value - (aggregator_lr * new_tensor_weight_speed))
return new_tensor_weight
else:
tensor_values = [t.tensor for t in local_tensors]
weight_values = [t.weight for t in local_tensors]
new_tensor_weight = np.average(tensor_values, weights=weight_values, axis=0)
return new_tensor_weight |
def make_nested_sdfg():
sdfg = dace.SDFG('vol_propagation_nested')
assign_loop_bound = sdfg.add_state('assign')
guard_state = sdfg.add_state('guard')
loop_state = sdfg.add_state('for')
end_state = sdfg.add_state('endfor')
sdfg.add_edge(assign_loop_bound, guard_state, InterstateEdge(assignments={'i': '0'}))
sdfg.add_edge(guard_state, loop_state, InterstateEdge(condition=CodeProperty.from_string('i < loop_bound', language=Language.Python)))
sdfg.add_edge(loop_state, guard_state, InterstateEdge(assignments={'i': 'i+1'}))
sdfg.add_edge(guard_state, end_state, InterstateEdge(condition=CodeProperty.from_string('not (i < loop_bound)', language=Language.Python)))
in_bound = assign_loop_bound.add_stream('IN_bound', dace.int32, storage=StorageType.FPGA_Local)
loop_bound = assign_loop_bound.add_scalar('loop_bound', dace.int32, transient=True, storage=StorageType.FPGA_Registers)
assign_loop_bound.add_memlet_path(in_bound, loop_bound, memlet=Memlet.simple(loop_bound, '0'))
in_a = loop_state.add_array('IN_a', [N], dace.int32, storage=StorageType.FPGA_Global)
out_stream = loop_state.add_stream('OUT_stream', dace.int32, storage=StorageType.FPGA_Local)
tasklet2 = loop_state.add_tasklet('compute', {'_IN_a'}, {'_OUT_stream'}, '_OUT_stream = _IN_a[0]')
loop_state.add_memlet_path(in_a, tasklet2, dst_conn='_IN_a', memlet=Memlet.simple(in_a, '0:N'))
loop_state.add_memlet_path(tasklet2, out_stream, src_conn='_OUT_stream', memlet=Memlet.simple(out_stream, '0'))
return sdfg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.