code
stringlengths
101
5.91M
def export(tokenizer: PreTrainedTokenizer, model: PreTrainedModel, config: OnnxConfig, opset: int, output: Path) -> Tuple[(List[str], List[str])]: if (not is_torch_available()): raise ImportError('Cannot convert because PyTorch is not installed. Please install torch first.') import torch from torch.onnx import export from ..file_utils import torch_version if (not is_torch_onnx_dict_inputs_support_available()): raise AssertionError(f'Unsupported PyTorch version, minimum required is 1.8.0, got: {torch_version}') logger.info(f'Using framework PyTorch: {torch.__version__}') with torch.no_grad(): model.config.return_dict = True model.eval() if (config.values_override is not None): logger.info(f'Overriding {len(config.values_override)} configuration item(s)') for (override_config_key, override_config_value) in config.values_override.items(): logger.info(f' - {override_config_key} -> {override_config_value}') setattr(model.config, override_config_key, override_config_value) model_inputs = config.generate_dummy_inputs(tokenizer, framework=TensorType.PYTORCH) (inputs_match, matched_inputs) = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) if (not inputs_match): raise ValueError("Model and config inputs doesn't match") config.patch_ops() if (parse(torch.__version__) <= parse('1.10.99')): export(model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes={name: axes for (name, axes) in chain(config.inputs.items(), config.outputs.items())}, do_constant_folding=True, use_external_data_format=config.use_external_data_format(model.num_parameters()), enable_onnx_checker=True, opset_version=opset) else: export(model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes={name: axes for (name, axes) in chain(config.inputs.items(), config.outputs.items())}, do_constant_folding=True, opset_version=opset) config.restore_ops() return (matched_inputs, onnx_outputs)
def fix_mismatch_jason(slot, value): if (((slot == 'type') and (value in ['nigh', 'moderate -ly priced', 'bed and breakfast', 'centre', 'venetian', 'intern', 'a cheap -er hotel'])) or ((slot == 'internet') and (value == '4')) or ((slot == 'pricerange') and (value == '2')) or ((slot == 'type') and (value in ['gastropub', 'la raza', 'galleria', 'gallery', 'science', 'm'])) or (('area' in slot) and (value in ['moderate'])) or (('day' in slot) and (value == 't'))): value = 'none' elif ((slot == 'type') and (value in ['hotel with free parking and free wifi', '4', '3 star hotel'])): value = 'hotel' elif ((slot == 'star') and (value == '3 star hotel')): value = '3' elif ('area' in slot): if (value == 'no'): value = 'north' elif (value == 'we'): value = 'west' elif (value == 'cent'): value = 'centre' elif ('day' in slot): if (value == 'we'): value = 'wednesday' elif (value == 'no'): value = 'none' elif (('price' in slot) and (value == 'ch')): value = 'cheap' elif (('internet' in slot) and (value == 'free')): value = 'yes' if (((slot == 'area') and (value in ['stansted airport', 'cambridge', 'silver street'])) or ((slot == 'area') and (value in ['norwich', 'ely', 'museum', 'same area as hotel']))): value = 'none' return (slot, value)
_pydub_effect def high_pass_filter(seg, cutoff): RC = (1.0 / ((cutoff * 2) * math.pi)) dt = (1.0 / seg.frame_rate) alpha = (RC / (RC + dt)) (minval, maxval) = get_min_max_value((seg.sample_width * 8)) original = seg.get_array_of_samples() filteredArray = array.array(seg.array_type, original) frame_count = int(seg.frame_count()) last_val = ([0] * seg.channels) for i in range(seg.channels): last_val[i] = filteredArray[i] = original[i] for i in range(1, frame_count): for j in range(seg.channels): offset = ((i * seg.channels) + j) offset_minus_1 = (((i - 1) * seg.channels) + j) last_val[j] = (alpha * ((last_val[j] + original[offset]) - original[offset_minus_1])) filteredArray[offset] = int(min(max(last_val[j], minval), maxval)) return seg._spawn(data=filteredArray)
class EvolvableKerasModel(object): def __init__(self, model, nbInputs, nbOutputs): self.nbInputs = nbInputs self.nbOutputs = nbOutputs self.nbWeights = model.count_params() self.model = model def clone(self): return EvolvableKerasModel(self.model, self.nbInputs, self.nbOutputs) def reinit(self): self.model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) modelW = self.model.get_weights() self.modelShape = [] self.modelSize = [] for i in range(len(modelW)): tmpShape = modelW[i].shape self.modelShape.append(tmpShape) self.modelSize.append(np.prod(tmpShape)) def clear(self): self._inputs = np.zeros(self.nbInputs) self._outputs = np.zeros(self.nbOutputs) def step(self, nbSteps=1): modelInputs = self._inputs.reshape((1, self.nbInputs)) for i in range(nbSteps): self._outputs = self.model.predict(modelInputs)[0] def getNbWeights(self): return self.nbWeights def setInputs(self, inputs): self._inputs = np.array(inputs) def outputs(self): return self._outputs def setWeights(self, weights): weightsIndice = 0 w = np.array(weights) modelW = self.model.get_weights() for i in range(len(self.modelSize)): modelW[i][:] = np.resize(w[weightsIndice:(weightsIndice + self.modelSize[i])], self.modelShape[i]) weightsIndice += self.modelSize[i] evo.models.set_weights(modelW)
class BatchEnvWrapper(dm_env.Environment): def __init__(self, environment: Union[(DummyVecEnv, EnvPool)]): self._environment = environment if (not isinstance(environment, DummyVecEnv)): self._num_envs = len(environment.all_env_ids) self._use_env_pool = True else: self._num_envs = environment.num_envs self._use_env_pool = False self._reset_next_step = True def reset(self) -> TimeStep: self._reset_next_step = False if is_legacy_gym: observation = self._environment.reset() else: (observation, _) = self._environment.reset() ts = TimeStep(step_type=np.full(self._num_envs, dm_env.StepType.FIRST, dtype='int32'), reward=np.zeros(self._num_envs, dtype='float32'), discount=np.ones(self._num_envs, dtype='float32'), observation=observation) return ts def step(self, action: types.NestedArray) -> TimeStep: if self._reset_next_step: return self.reset() if self._use_env_pool: if is_legacy_gym: (observation, reward, done, _) = self._environment.step(action) else: (observation, reward, term, trunc, _) = self._environment.step(action) done = (term + trunc) else: self._environment.step_async(action) (observation, reward, done, _) = self._environment.step_wait() self._reset_next_step = any(done) ts = TimeStep(step_type=(done + 1).astype(np.int32), reward=reward, discount=(1 - done).astype(np.float32), observation=observation) return ts def observation_spec(self): space = self._environment.observation_space obs_spec = specs.BoundedArray(shape=space.shape, dtype='float32', minimum=space.low, maximum=space.high, name='observation') return obs_spec def action_spec(self): space = self._environment.action_space if isinstance(space, gym.spaces.Discrete): act_spec = specs.DiscreteArray(num_values=space.n, dtype=space.dtype, name='action') return act_spec return specs.BoundedArray(shape=space.shape, dtype=space.dtype, minimum=space.low, maximum=space.high, name='single_action') def reward_spec(self): return specs.Array(name='reward', shape=[self._num_envs], dtype='float32') def close(self): self._environment.close()
class cosqa_search_train(Dataset): def __init__(self, data_dir, lang, split='train'): self.examples = read_cosqa_search_examples(f'{data_dir}/{lang}/{split}') def __len__(self): return len(self.examples) def __getitem__(self, index): ex = self.examples[index] return (ex.text, ex.code, ex.idx)
def setup_optimizer(model, learning_rate: float): param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate) return optimizer
def has_exact_support(m, m_hat): (m_nnz, m_hat_nnz, intersection_nnz) = _nonzero_intersection(m, m_hat) return int((((m_nnz + m_hat_nnz) - (2 * intersection_nnz)) == 0))
class ResNet(nn.Module): def __init__(self, block, layers, num_classes): self.inplanes = 128 super(ResNet, self).__init__() self.conv1 = conv3x3(3, 64, stride=2) self.bn1 = BatchNorm2d(64) self.relu1 = nn.ReLU(inplace=False) self.conv2 = conv3x3(64, 64) self.bn2 = BatchNorm2d(64) self.relu2 = nn.ReLU(inplace=False) self.conv3 = conv3x3(64, 128) self.bn3 = BatchNorm2d(128) self.relu3 = nn.ReLU(inplace=False) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.relu = nn.ReLU(inplace=False) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 1, 1)) self.head = RCCAModule(2048, 512, num_classes) self.dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), InPlaceABNSync(512), nn.Dropout2d(0.1), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion), affine=affine_par)) layers = [] generate_multi_grid = (lambda index, grids: (grids[(index % len(grids))] if isinstance(grids, tuple) else 1)) layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid))) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid))) return nn.Sequential(*layers) def forward(self, x, recurrence=1): x = self.relu1(self.bn1(self.conv1(x))) x = self.relu2(self.bn2(self.conv2(x))) x = self.relu3(self.bn3(self.conv3(x))) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x_dsn = self.dsn(x) x = self.layer4(x) x = self.head(x, recurrence) return [x, x_dsn]
def create_multispk_for_snips(output_dir): speakers = 'Aditi Amy Brian Emma Geraint Ivy Joanna Joey Justin Kendra Kimberly Matthew Nicole Raveena Russell Salli'.split(' ') dataset_info = [{'split': 'test', 'num_utts': 700}, {'split': 'valid', 'num_utts': 700}, {'split': 'train', 'num_utts': 13084}] test_out_f = open(os.path.join(output_dir, 'all.iob.snips.txt'), 'w') for data in dataset_info: num_utts = data['num_utts'] split = data['split'] with open(os.path.join(output_dir, ('single-matched-snips.%s.w-intent' % split))) as f: content = f.readlines() utt2line = {x.strip().split()[0]: x.strip() for x in content} for spk in speakers: for num in range(num_utts): uttid = ('%s-snips-%s-%d' % (spk, split, num)) line = utt2line[('snips-%s-%d' % (split, num))] text = line.split('\t')[1].upper() slots = line.split('\t')[2] intent = line.split('\t')[3] test_out_f.write(('%s BOS %s EOS\tO %s %s\n' % (uttid, text, slots, intent))) test_out_f.close()
def dataset(mode, input_name, output_dir): beta = 25 number_of_clusters = CLUSTER_NUMBER if (mode == 1): outputName = ('%s/old/assign.out' % output_dir) else: outputName = runNonMotifCASC(input_name, output_dir, number_of_clusters, beta, None) if (mode == 1): runHyperParameterTests(input_name, output_dir, number_of_clusters, beta, outputName)
class ReLU6(Hardtanh): def __init__(self, inplace: bool=False): super(ReLU6, self).__init__(0.0, 6.0, inplace) def extra_repr(self) -> str: inplace_str = ('inplace=True' if self.inplace else '') return inplace_str
class ConfigParams(object): def __init__(self, cfg, cur_config, cur_sys, big_cores, columns): print('[STATUS]: Initializing ConfigParams Class') self.cfg = cfg self.cur_config = cur_config self.cur_sys = cur_sys self.big_cores = big_cores self.columns = columns self.ENABLE = '1' self.DISABLE = '0' self.set_big_core_status(self.cfg['systems'][self.cur_sys]['cpu']['cores']['core1'], self.cur_config[self.columns.index('core1_status')]) self.set_big_core_status(self.cfg['systems'][self.cur_sys]['cpu']['cores']['core2'], self.cur_config[self.columns.index('core2_status')]) self.set_big_core_status(self.cfg['systems'][self.cur_sys]['cpu']['cores']['core3'], self.cur_config[self.columns.index('core3_status')]) self.set_big_core_freq(self.cfg['systems'][self.cur_sys]['cpu']['cores']['core0'], self.cur_config[self.columns.index('core_freq')]) self.set_gpu_freq(self.cur_config[self.columns.index('gpu_freq')]) self.set_emc_freq(self.cur_config[self.columns.index('emc_freq')]) self.set_scheduler_policy(self.cur_config[self.columns.index('scheduler.policy')]) self.set_cache_pressure(self.cur_config[self.columns.index('vm.vfs_cache_pressure')]) self.set_swappiness(self.cur_config[self.columns.index('vm.swappiness')]) self.set_dirty_bg_ratio(self.cur_config[self.columns.index('vm.dirty_background_ratio')]) self.set_dirty_ratio(self.cur_config[self.columns.index('vm.dirty_ratio')]) self.set_drop_caches(self.cur_config[self.columns.index('vm.drop_caches')]) self.set_sched_rt_runtime_us(self.cur_config[self.columns.index('kernel.sched_rt_runtime_us')]) self.set_sched_child_runs_first(self.cur_config[self.columns.index('kernel.sched_child_runs_first')]) self.set_overcommit_memory(self.cur_config[self.columns.index('vm.overcommit_memory')]) self.set_overcommit_ratio(self.cur_config[self.columns.index('vm.overcommit_ratio')]) def set_big_core_status(self, cpu_name, status): if (cpu_name != 'cpu0'): filename = '{0}{1}{2}'.format('/sys/devices/system/cpu/', cpu_name, '/online') cur_status = subprocess.getstatusoutput('cat {0}'.format(filename))[1] if (cur_status != status): res = subprocess.call(['sudo', 'sh', './utils/change_core_status.sh', str(cpu_name), str(status)]) if (res != 0): err = 'subprocess command failed' print('[CPU STATUS ERROR]: {0}'.format(err)) return False new_status = subprocess.getstatusoutput('cat {0}'.format(filename))[1] if (new_status != status): print(((((('[CPU STATUS ERROR]: ' + cpu_name) + '\nexpected: ') + str(status)) + '\nactual: ') + str(new_status))) return False return True else: print('invalid cpu_name argument') def set_big_core_freq(self, cpu_name, frequency): frequency = int(frequency) if (frequency is not None): filename = '{0}{1}{2}'.format('/sys/devices/system/cpu/', cpu_name, '/cpufreq/scaling_cur_freq') cur_freq = subprocess.getstatusoutput('cat {0}'.format(filename))[1] res = subprocess.call(['sudo', 'sh', './utils/change_core_frequency.sh', str(self.cur_sys), str(frequency), str(cur_freq)]) if (res != 0): err = traceback.print_exc() print('[CPU FREQUENCY ERROR]: {0}'.format(err)) return False new_freq = subprocess.getstatusoutput('cat {0}'.format(filename))[1] if (str(new_freq) != str(frequency)): print(((((('[CPU FREQUENCY ERROR]: ' + cpu_name) + '\nexpected: ') + str(frequency)) + '\nactual: ') + str(new_freq))) return False return True def set_gpu_freq(self, frequency): frequency = int(frequency) if (frequency is not None): filename = self.cfg['systems'][self.cur_sys]['gpu']['frequency']['current'] try: if (frequency is not None): cur_freq = subprocess.getstatusoutput('cat {0}'.format(filename))[1] res = subprocess.call(['sudo', 'sh', './utils/change_gpu_frequency.sh', str(self.cur_sys), str(frequency), str(cur_freq)]) if (res != 0): err = traceback.print_exc() print('[GPU FREQUENCY ERROR]: {0}'.format(err)) return False new_freq = subprocess.getstatusoutput('cat {0}'.format(filename))[1] if (new_freq != frequency): print(((('[GPU FREQUENCY ERROR]: \nexpected: ' + str(frequency)) + '\nactual: ') + str(new_freq))) return False return True except AttributeError as e: print('[GPU FREQUENCY ERROR: {0}]'.format(e)) def set_emc_freq(self, frequency): frequency = int(frequency) if (frequency is not None): filename = self.cfg['systems'][self.cur_sys]['emc']['frequency']['current'] try: if (frequency is not None): cur_freq = subprocess.getstatusoutput('cat {0}'.format(filename))[1] res = subprocess.call(['sudo', 'sh', './utils/change_emc_frequency.sh', str(self.cur_sys), str(frequency)]) if (res != 0): err = traceback.print_exc() print('[EMC FREQUENCY ERROR]: {0}'.format(err)) return False new_freq = subprocess.getstatusoutput('cat {0}'.format(filename))[1] if (new_freq != frequency): print(((('[EMC FREQUENCY ERROR]: \nexpected: ' + str(frequency)) + '\nactual: ') + str(new_freq))) return False return True except AttributeError as e: print('[EMC FREQUENCY ERROR: {0}]'.format(e)) def set_scheduler_policy(self, val): if (val == 0): os.system('echo cfq > /sys/block/mmcblk0/queue/scheduler') elif (val == 1): os.system('echo noop > /sys/block/mmcblk0/queue/scheduler') else: print('[ERROR]: Invalid policy value') def set_cache_pressure(self, val): os.system('sysctl vm.vfs_cache_pressure={0}'.format(val)) def set_swappiness(self, val): os.system('sysctl vm.swappiness={0}'.format(val)) def set_dirty_bg_ratio(self, val): os.system('sysctl vm.dirty_background_ratio={0}'.format(val)) def set_dirty_ratio(self, val): os.system('sysctl vm.dirty_ratio={0}'.format(val)) def set_drop_caches(self, val): os.system('sysctl vm.drop_caches={0}'.format(val)) def set_sched_child_runs_first(self, val): os.system('sysctl kernel.sched_child_runs_first={0}'.format(val)) def set_sched_rt_runtime_us(self, val): os.system('sysctl kernel.sched_rt_runtime_us={0}'.format(val)) def set_nr_hugepages(self, val): os.system('sysctl vm.nr_hugepages={0}'.format(val)) def set_overcommit_ratio(self, val): os.system('sysctl vm.overcommit_ratio={0}'.format(val)) def set_overcommit_memory(self, val): os.system('sysctl vm.overcommit_memory={0}'.format(val)) def set_overcommit_hugepages(self, val): os.system('sysctl vm.overcommit_hugepages={0}'.format(val)) def set_max_pids(self, val): os.system('sysctl user.max_pid_namespaces={0}'.format(val)) def set_sched_nr_migrate(self, val): os.system('sysctl kernel.sched_nr_migrate={0}'.format(val)) def set_sched_time_avg_ms(self, val): os.system('sysctl kernel.sched_time_avg_ms={0}'.format(val)) def set_cpu_time_max_percent(self, val): os.system('sysctl kernel.cpu_time_max_percent={0}'.format(val))
def torch_dtype_from_tpu_mlir(dtype) -> torch.dtype: if (dtype == 'f16'): return torch.float16 elif (dtype == 'bf16'): return torch.bfloat16 elif (dtype == 'f32'): return torch.float32 else: raise TypeError(('%s is not supported by torch' % dtype))
def word_emb_pipeline(txt, grapheme_encoded, grapheme_encoded_len, grapheme_encoder=None, word_emb=None, use_word_emb=None): char_word_emb = None if use_word_emb: raw_word_emb = word_emb().embeddings(txt) word_separator_idx = grapheme_encoder.lab2ind[' '] char_word_emb = expand_to_chars(emb=raw_word_emb.unsqueeze(0), seq=grapheme_encoded.unsqueeze(0), seq_len=grapheme_encoded_len.unsqueeze(0), word_separator=word_separator_idx).squeeze(0) return char_word_emb
def run_sac_experiment(main, mode, include_folders=None, log_dir=None, exp_prefix='experiment', exp_name=None, **kwargs): if (exp_name is None): exp_name = timestamp() if (log_dir is None): log_dir = os.path.join(DEFAULT_LOG_DIR, 'local', exp_prefix.replace('_', '-'), exp_name, ('iter' + str(kwargs['seed']))) else: log_dir = os.path.join(log_dir, ('iter' + str(kwargs['seed']))) if (include_folders is None): include_folders = list() if (mode == 'ec2'): include_folders.append('mme') all_symlinks = list() for folder in include_folders: all_symlinks.append(_create_symlink(folder)) kwargs.update(added_project_directories=all_symlinks) run_experiment_lite(stub_method_call=main, mode=mode, exp_prefix=exp_prefix, exp_name=exp_name, log_dir=log_dir, **kwargs)
_args('v', 'i', 'i', 'i', 'i') def unique_dim(g, self, dim, sorted, return_inverse, return_counts): (u, indices, inverse_indices, counts) = g.op('Unique', self, axis_i=dim, sorted_i=sorted, outputs=4) return (u, inverse_indices, counts)
.experimental .parametrize('gt_users, result', [(False, {1: (1 - (1 / np.log2(3))), 3: (((5 * (1 - (1 / np.log2(3)))) / 9) + (4 / 9))}), (True, {1: ((1 - (1 / np.log2(3))) / 2), 3: (((3 * (1 - (1 / np.log2(3)))) / 12) + (3 / 12))})]) def test_surprisal_at_k(true, recs, true_users, gt_users, result): users = (true_users if gt_users else None) assertDictAlmostEqual(Surprisal(true)(recs, [1, 3], ground_truth_users=users), result)
def test_DictBasic(): (x, y, z) = symbols('x y z') d = DictBasic({x: 2, y: z}) assert ((str(d) == '{x: 2, y: z}') or (str(d) == '{y: z, x: 2}')) assert (d[x] == 2) raises(KeyError, (lambda : d[(2 * z)])) if ((2 * z) in d): assert False d[(2 * z)] = x assert (d[(2 * z)] == x) if ((2 * z) not in d): assert False assert (set(d.items()) == set([((2 * z), x), (x, Integer(2)), (y, z)])) del d[x] assert (set(d.keys()) == set([(2 * z), y])) assert (set(d.values()) == set([x, z])) e = (y + sin((2 * z))) assert (e.subs(d) == (z + sin(x)))
def adafactor_init(optimizer): for pg in optimizer.param_groups: for p in pg['params']: state = optimizer.state[p] grad = p grad_shape = grad.shape (factored, use_first_moment) = optimizer._get_options(pg, grad_shape) if (len(state) == 0): state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad) state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0
def main(argv): ns.core.CommandLine().Parse(argv) ns.core.GlobalValue.Bind('SimulatorImplementationType', ns.core.StringValue('ns3::RealtimeSimulatorImpl')) ns.core.GlobalValue.Bind('ChecksumEnabled', ns.core.BooleanValue('true')) nodes = ns.network.NodeContainer() nodes.Create(2) csma = ns.csma.CsmaHelper() devices = csma.Install(nodes) tapBridge = ns.tap_bridge.TapBridgeHelper() tapBridge.SetAttribute('Mode', ns.core.StringValue('UseLocal')) tapBridge.SetAttribute('DeviceName', ns.core.StringValue('tap-left')) tapBridge.Install(nodes.Get(0), devices.Get(0)) tapBridge.SetAttribute('DeviceName', ns.core.StringValue('tap-right')) tapBridge.Install(nodes.Get(1), devices.Get(1)) ns.core.Simulator.Stop(ns.core.Seconds(600)) ns.core.Simulator.Run(signal_check_frequency=(- 1)) ns.core.Simulator.Destroy() return 0
def print_utterances_of_mask(df, mask, utterance_column='utterance', worker_column='WorkerId'): print('# responses:', mask.sum()) if (worker_column is not None): ndf = df[mask][[utterance_column, worker_column]] ndf = ndf.sort_values(worker_column) else: ndf = df[mask][utterance_column] for (index, content) in ndf.iterrows(): if (worker_column is not None): print(index, content[worker_column], content[utterance_column]) else: print(index, content[utterance_column])
def _preprocess(instance, prob: float=0.1, max_length: int=512): encoded = tokenizer(instance['text'], max_length=max_length, padding='max_length', truncation=True) if (instance['split'] == 'train'): encoded = mask_and_switch(encoded, prob=prob) else: pass encoded['binary_labels'] = instance['binary_labels'] return encoded
def test_constructor_statement_no_args(default_test_case, variable_reference_mock, constructor_mock): statement = stmt.ConstructorStatement(default_test_case, constructor_mock) assert (statement.args == {})
def register_Ns3SimpleRefCount__Ns3SpectrumValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumValue__gt___methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::SimpleRefCount< ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter< ns3::SpectrumValue > > const &', 'o')]) return
class GP(nn.Module): def __init__(self, K, x, y, var0=None): super(GP, self).__init__() if (var0 is None): var0 = torch.ones(1) elif numpy.isscalar(var0): var0 = torch.FloatTensor(1).fill_(var0) self.var = nn.Parameter(var0) self.x = x self.y = y self.K = K def predict(self, xstar=None): Kxx_inv = torch.inverse((self.K(self.x) + (Variable(torch.eye(self.x.size(0))) * self.var))) Kxxstar = self.K(self.x, xstar) Kxstarxstar = self.K(xstar) mu = torch.mv(Kxxstar, torch.mv(Kxx_inv, self.y)) cov = (torch.Kxstarxstar - torch.mm(Kxxstar.t(), torch.mm(Kxx_inv), Kxxstar)) return (mu, cov) def forward(self, xstar=None): Kxx_noise = (self.K(self.x) + (Variable(torch.eye(self.x.size(0))) * self.var)) L = Cholesky.apply(Kxx_noise) Linv = L.inverse() alpha = torch.mv(Kxx_noise.inverse(), self.y) if (xstar is not None): Kxxstar = self.K(self.x, xstar) Kxstarxstar = self.K(xstar) mu = torch.mv(Kxxstar.t(), alpha) v = torch.mm(Linv, Kxxstar) cov = (Kxstarxstar - torch.mm(v.t(), v)) neg_logp = (((0.5 * self.y.dot(alpha)) + L.diag().log().sum()) + ((self.y.size(0) / 2.0) * LOG2PI)) if (xstar is not None): return (mu, cov, neg_logp) else: return neg_logp
class LanczosQueryOnTriplane(PythonFunction): def __init__(self, ctx, min_=[(- 1), (- 1), (- 1)], max_=[1, 1, 1], use_ste=False, boundary_check=False): super(LanczosQueryOnTriplane, self).__init__(ctx) self._min = min_ self._max = max_ self._use_ste = use_ste self._boundary_check = boundary_check def name(self): return self.__class__.__name__ def args(self): data = dict(min_=self._min, max_=self._max, use_ste=self._use_ste, boundary_check=self._boundary_check) return data def min_outputs(self): return 1 def setup_impl(self, inputs, outputs): query = inputs[0] feature = inputs[1] assert (len(query.shape) > 1), 'Query shape must be greater than 1, e.g., (B1, ..., Bn, 3).' assert (query.shape[(- 1)] == 3), 'Query shape[-1] must be 3.' assert ((len(feature.shape) > 1) and (len(feature.shape) < 5)), 'Feature must be either 1D, 2D, 3D features' batch_sizes = query.shape[:(- 1)] D = feature.shape[(- 1)] outputs[0].reset_shape((batch_sizes + ((D * 3),)), True) def forward_impl(self, inputs, outputs): query = inputs[0] feature = inputs[1] output = outputs[0] batch_sizes = query.shape[:(- 1)] G = feature.shape[1] B = np.prod(batch_sizes) D = feature.shape[(- 1)] query_ptr = query.data.data_ptr(np.float32, self.ctx) feature_ptr = feature.data.data_ptr(np.float32, self.ctx) output_ptr = output.data.data_ptr(np.float32, self.ctx) lanczos_triplane_feature_cuda.query_on_triplane(((B * D) * 3), output_ptr, query_ptr, feature_ptr, G, D, self._min, self._max, self._boundary_check) def backward_impl(self, inputs, outputs, propagate_down, accum): query = inputs[0] feature = inputs[1] output = outputs[0] batch_sizes = query.shape[:(- 1)] G = feature.shape[1] B = np.prod(batch_sizes) D = feature.shape[(- 1)] grad_query_ptr = query.grad.data_ptr(np.float32, self.ctx) grad_feature_ptr = feature.grad.data_ptr(np.float32, self.ctx) grad_output_ptr = output.grad.data_ptr(np.float32, self.ctx) query_ptr = query.data.data_ptr(np.float32, self.ctx) feature_ptr = feature.data.data_ptr(np.float32, self.ctx) if propagate_down[1]: lanczos_triplane_feature_cuda.grad_feature(((B * D) * 3), grad_feature_ptr, grad_output_ptr, query_ptr, G, D, self._min, self._max, self._boundary_check, accum[1]) def grad_depends_output_data(self, i, o): return False def grad_depends_input_data(self, i, j): if (i == 0): return True if ((i == 1) and (j == 0)): return True return False
def token_deletion(input_sequence, mlm_probability, mask_token, thingtalk): input_sequence_masked = token_masking(input_sequence, mlm_probability, mask_token, thingtalk) input_tokens = list(filter((lambda x: (x != mask_token)), input_sequence_masked)) return ' '.join(input_tokens)
class WordEncoder(Encoder): def encode(self, src_sentence): return [src_wmap.get(w, utils.UNK_ID) for w in src_sentence.split()] def encode_trg(self, trg_sentence): return [trg_wmap_rev.get(w, utils.UNK_ID) for w in trg_sentence.split()]
class Linear(Module): def __init__(self, inputSize, outputSize, bias=True): super(Linear, self).__init__() self.weight = torch.Tensor(outputSize, inputSize) self.gradWeight = torch.Tensor(outputSize, inputSize) self.bias = (torch.Tensor(outputSize) if bias else None) self.gradBias = (torch.Tensor(outputSize) if bias else None) self.reset() self.addBuffer = None def noBias(self): self.bias = None self.gradBias = None return self def reset(self, stdv=None): if (stdv is not None): stdv = (stdv * math.sqrt(3)) else: stdv = (1.0 / math.sqrt(self.weight.size(1))) self.weight.uniform_((- stdv), stdv) if (self.bias is not None): self.bias.uniform_((- stdv), stdv) return self def _updateAddBuffer(self, input): nframe = input.size(0) if (self.addBuffer is None): self.addBuffer = input.new() if (self.addBuffer.nelement() != nframe): self.addBuffer.resize_(nframe).fill_(1) def updateOutput(self, input): assert (input.dim() == 2) nframe = input.size(0) nelement = self.output.nelement() self.output.resize_(nframe, self.weight.size(0)) if (self.output.nelement() != nelement): self.output.zero_() self._updateAddBuffer(input) self.output.addmm_(0, 1, input, self.weight.t()) if (self.bias is not None): self.output.addr_(self.addBuffer, self.bias) return self.output def updateGradInput(self, input, gradOutput): if (self.gradInput is None): return nelement = self.gradInput.nelement() self.gradInput.resize_as_(input) if (self.gradInput.nelement() != nelement): self.gradInput.zero_() assert (input.dim() == 2) self.gradInput.addmm_(0, 1, gradOutput, self.weight) return self.gradInput def accGradParameters(self, input, gradOutput, scale=1): assert (input.dim() == 2) self.gradWeight.addmm_(scale, gradOutput.t(), input) if (self.bias is not None): self._updateAddBuffer(input) self.gradBias.addmv_(scale, gradOutput.t(), self.addBuffer) def clearState(self): clear(self, 'addBuffer') return super(Linear, self).clearState() def __repr__(self): return ((super(Linear, self).__repr__() + '({} -> {})'.format(self.weight.size(1), self.weight.size(0))) + (' without bias' if (self.bias is None) else ''))
def wide_resnet_mnist_bn(in_ch=1, in_dim=28): return Wide_ResNet(10, 4, None, 10, use_bn=True, use_pooling=True, in_ch=1, in_dim=28)
def calc_local_total_norm(parameters, norm_type=2): if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = list(filter((lambda p: (p.grad is not None)), parameters)) norm_type = float(norm_type) if (norm_type == inf): total_norm = max((p.grad.detach().abs().max() for p in parameters)) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type) for p in parameters]), norm_type) return total_norm
class EnvMaskedReplayBuffer(SimpleReplayBuffer): def __init__(self, max_replay_buffer_size, env, ensemble_size, masking_probability, env_info_sizes=None): self.env = env self.masking_probability = masking_probability self.ensemble_size = ensemble_size self._ob_space = env.observation_space self._action_space = env.action_space if (env_info_sizes is None): if hasattr(env, 'info_sizes'): env_info_sizes = env.info_sizes else: env_info_sizes = dict() super().__init__(max_replay_buffer_size=max_replay_buffer_size, observation_dim=get_dim(self._ob_space), action_dim=get_dim(self._action_space), env_info_sizes=env_info_sizes) self._masks = np.zeros((max_replay_buffer_size, ensemble_size)) def add_sample(self, observation, action, reward, terminal, next_observation, mask, env_info=None, **kwargs): if isinstance(self._action_space, Discrete): new_action = np.zeros(self._action_dim) new_action[action] = 1 else: new_action = action self._observations[self._top] = observation self._actions[self._top] = new_action self._rewards[self._top] = reward self._terminals[self._top] = terminal self._next_obs[self._top] = next_observation self._masks[self._top] = mask for key in self._env_info_keys: self._env_infos[key][self._top] = env_info[key] super()._advance() def random_batch(self, batch_size): indices = np.random.choice(self._size, size=batch_size, replace=(self._replace or (self._size < batch_size))) if ((not self._replace) and (self._size < batch_size)): warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.') batch = dict(observations=self._observations[indices], actions=self._actions[indices], rewards=self._rewards[indices], terminals=self._terminals[indices], next_observations=self._next_obs[indices], masks=self._masks[indices]) for key in self._env_info_keys: assert (key not in batch.keys()) batch[key] = self._env_infos[key][indices] return batch def add_path(self, path): for (i, (obs, action, reward, next_obs, terminal, agent_info, env_info)) in enumerate(zip(path['observations'], path['actions'], path['rewards'], path['next_observations'], path['terminals'], path['agent_infos'], path['env_infos'])): mask = np.random.binomial(1, self.masking_probability, size=self.ensemble_size) if (mask.sum() == 0): mask = np.zeros(self.ensemble_size) mask[np.random.randint(self.ensemble_size)] = 1 self.add_sample(observation=obs, action=action, reward=reward, terminal=terminal, next_observation=next_obs, mask=np.random.binomial(1, self.masking_probability, size=self.ensemble_size), agent_info=agent_info, env_info=env_info) self.terminate_episode()
class TrainLoop(): def __init__(self, *, model, diffusion, data, batch_size, microbatch, lr, ema_rate, log_interval, save_interval, resume_checkpoint, use_fp16=False, fp16_scale_growth=0.001, schedule_sampler=None, weight_decay=0.0, lr_anneal_steps=0): self.model = model self.diffusion = diffusion self.data = data self.batch_size = batch_size self.microbatch = (microbatch if (microbatch > 0) else batch_size) self.lr = lr self.ema_rate = ([ema_rate] if isinstance(ema_rate, float) else [float(x) for x in ema_rate.split(',')]) self.log_interval = log_interval self.save_interval = save_interval self.resume_checkpoint = resume_checkpoint self.use_fp16 = use_fp16 self.fp16_scale_growth = fp16_scale_growth self.schedule_sampler = (schedule_sampler or UniformSampler(diffusion)) self.weight_decay = weight_decay self.lr_anneal_steps = lr_anneal_steps self.step = 0 self.resume_step = 0 self.global_batch = (self.batch_size * dist.get_world_size()) self.sync_cuda = th.cuda.is_available() self._load_and_sync_parameters() self.mp_trainer = MixedPrecisionTrainer(model=self.model, use_fp16=self.use_fp16, fp16_scale_growth=fp16_scale_growth) self.opt = AdamW(self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay) if self.resume_step: self._load_optimizer_state() self.ema_params = [self._load_ema_parameters(rate) for rate in self.ema_rate] else: self.ema_params = [copy.deepcopy(self.mp_trainer.master_params) for _ in range(len(self.ema_rate))] if th.cuda.is_available(): self.use_ddp = True self.ddp_model = DDP(self.model, device_ids=[dist_util.dev()], output_device=dist_util.dev(), broadcast_buffers=False, bucket_cap_mb=128, find_unused_parameters=False) else: if (dist.get_world_size() > 1): logger.warn('Distributed training requires CUDA. Gradients will not be synchronized properly!') self.use_ddp = False self.ddp_model = self.model def _load_and_sync_parameters(self): resume_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint) if resume_checkpoint: self.resume_step = parse_resume_step_from_filename(resume_checkpoint) if (dist.get_rank() == 0): logger.log(f'loading model from checkpoint: {resume_checkpoint}...') self.model.load_state_dict(dist_util.load_state_dict(resume_checkpoint, map_location=dist_util.dev())) dist_util.sync_params(self.model.parameters()) def _load_ema_parameters(self, rate): ema_params = copy.deepcopy(self.mp_trainer.master_params) main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint) ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate) if ema_checkpoint: if (dist.get_rank() == 0): logger.log(f'loading EMA from checkpoint: {ema_checkpoint}...') state_dict = dist_util.load_state_dict(ema_checkpoint, map_location=dist_util.dev()) ema_params = self.mp_trainer.state_dict_to_master_params(state_dict) dist_util.sync_params(ema_params) return ema_params def _load_optimizer_state(self): main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint) opt_checkpoint = bf.join(bf.dirname(main_checkpoint), f'opt{self.resume_step:06}.pt') if bf.exists(opt_checkpoint): logger.log(f'loading optimizer state from checkpoint: {opt_checkpoint}') state_dict = dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev()) self.opt.load_state_dict(state_dict) def run_loop(self): while ((not self.lr_anneal_steps) or ((self.step + self.resume_step) < self.lr_anneal_steps)): (batch, cond) = next(self.data) self.run_step(batch, cond) if ((self.step % self.log_interval) == 0): logger.dumpkvs() if ((self.step % self.save_interval) == 0): self.save() if (os.environ.get('DIFFUSION_TRAINING_TEST', '') and (self.step > 0)): return self.step += 1 if (((self.step - 1) % self.save_interval) != 0): self.save() def run_step(self, batch, cond): self.forward_backward(batch, cond) took_step = self.mp_trainer.optimize(self.opt) if took_step: self._update_ema() self._anneal_lr() self.log_step() def forward_backward(self, batch, cond): self.mp_trainer.zero_grad() for i in range(0, batch.shape[0], self.microbatch): micro = batch[i:(i + self.microbatch)].to(dist_util.dev()) micro_cond = {k: v[i:(i + self.microbatch)].to(dist_util.dev()) for (k, v) in cond.items()} last_batch = ((i + self.microbatch) >= batch.shape[0]) (t, weights) = self.schedule_sampler.sample(micro.shape[0], dist_util.dev()) compute_losses = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond) if (last_batch or (not self.use_ddp)): losses = compute_losses() else: with self.ddp_model.no_sync(): losses = compute_losses() if isinstance(self.schedule_sampler, LossAwareSampler): self.schedule_sampler.update_with_local_losses(t, losses['loss'].detach()) loss = (losses['loss'] * weights).mean() log_loss_dict(self.diffusion, t, {k: (v * weights) for (k, v) in losses.items()}) self.mp_trainer.backward(loss) def _update_ema(self): for (rate, params) in zip(self.ema_rate, self.ema_params): update_ema(params, self.mp_trainer.master_params, rate=rate) def _anneal_lr(self): if (not self.lr_anneal_steps): return frac_done = ((self.step + self.resume_step) / self.lr_anneal_steps) lr = (self.lr * (1 - frac_done)) for param_group in self.opt.param_groups: param_group['lr'] = lr def log_step(self): logger.logkv('step', (self.step + self.resume_step)) logger.logkv('samples', (((self.step + self.resume_step) + 1) * self.global_batch)) def save(self): def save_checkpoint(rate, params): state_dict = self.mp_trainer.master_params_to_state_dict(params) if (dist.get_rank() == 0): logger.log(f'saving model {rate}...') if (not rate): filename = f'model{(self.step + self.resume_step):06d}.pt' else: filename = f'ema_{rate}_{(self.step + self.resume_step):06d}.pt' with bf.BlobFile(bf.join(get_blob_logdir(), filename), 'wb') as f: th.save(state_dict, f) save_checkpoint(0, self.mp_trainer.master_params) for (rate, params) in zip(self.ema_rate, self.ema_params): save_checkpoint(rate, params) if (dist.get_rank() == 0): with bf.BlobFile(bf.join(get_blob_logdir(), f'opt{(self.step + self.resume_step):06d}.pt'), 'wb') as f: th.save(self.opt.state_dict(), f) dist.barrier()
def ground_truth(data, poly=33800): crc = 65535 for b in data: cur_byte = (255 & b) for _ in range(0, 8): if ((crc & 1) ^ (cur_byte & 1)): crc = ((crc >> 1) ^ poly) else: crc >>= 1 cur_byte >>= 1 crc = ((~ crc) & 65535) crc = ((crc << 8) | ((crc >> 8) & 255)) return (crc & 65535)
class CombinatorialObject(SageObject): def __init__(self, l, copy=True): if copy: self._list = list(l) else: self._list = l self._hash = None def __str__(self): return str(self._list) def _repr_(self): return repr(self._list) def __eq__(self, other): if isinstance(other, CombinatorialObject): return (self._list == other._list) else: return (self._list == other) def __lt__(self, other): if isinstance(other, CombinatorialObject): return (self._list < other._list) else: return (self._list < other) def __le__(self, other): if isinstance(other, CombinatorialObject): return (self._list <= other._list) else: return (self._list <= other) def __gt__(self, other): if isinstance(other, CombinatorialObject): return (self._list > other._list) else: return (self._list > other) def __ge__(self, other): if isinstance(other, CombinatorialObject): return (self._list >= other._list) else: return (self._list >= other) def __ne__(self, other): if isinstance(other, CombinatorialObject): return (self._list != other._list) else: return (self._list != other) def __add__(self, other): return (self._list + other) def __hash__(self): if (self._hash is None): self._hash = hash(str(self._list)) return self._hash def __bool__(self) -> bool: return bool(self._list) def __len__(self) -> Integer: return len(self._list) def __getitem__(self, key): return self._list[key] def __iter__(self): return iter(self._list) def __contains__(self, item): return (item in self._list) def index(self, key): return self._list.index(key)
class LookupType(numba.types.Type): arraytype = numba.types.Array(numba.intp, 1, 'C') def __init__(self): super().__init__(name='ak.LookupType()')
def __markovclient(args, network, client): return __tgen_client(args, network, client['name'], client['country_code'], TGENRC_MARKOVCLIENT_FILENAME)
_model_architecture('transformer_lm', 'transformer_lm_wiki103') _model_architecture('transformer_lm', 'transformer_lm_baevski_wiki103') def transformer_lm_baevski_wiki103(args): args.decoder_layers = safe_getattr(args, 'decoder_layers', 16) args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads', 8) args.dropout = safe_getattr(args, 'dropout', 0.3) args.adaptive_input = safe_getattr(args, 'adaptive_input', True) args.tie_adaptive_weights = safe_getattr(args, 'tie_adaptive_weights', True) args.adaptive_input_cutoff = safe_getattr(args, 'adaptive_input_cutoff', '20000,60000') args.adaptive_softmax_cutoff = safe_getattr(args, 'adaptive_softmax_cutoff', '20000,60000') args.adaptive_softmax_dropout = safe_getattr(args, 'adaptive_softmax_dropout', 0.2) args.attention_dropout = safe_getattr(args, 'attention_dropout', 0.1) args.activation_dropout = safe_getattr(args, 'activation_dropout', 0.1) args.no_decoder_final_norm = safe_getattr(args, 'no_decoder_final_norm', True) args.tie_adaptive_proj = safe_getattr(args, 'tie_adaptive_proj', True) transformer_lm_big(args)
def test_simple_aggr_generator(simple_aggr_generator): table_name = 'sample_table' with patch.object(simple_aggr_generator, '_sample_cat_num_cols', return_value=([], ['cat_col_1', 'cat_col_2'], ['num_col_1', 'num_col_2'])): generated_sql = simple_aggr_generator.sql_generate(table_name) assert ('SIMPLE-AGG-COUNT' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-COUNT-DISTINCT' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-MAX' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-MIN' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-AVG' in generated_sql['sql_tags']) assert (len(generated_sql['queries']) == 9) with patch.object(simple_aggr_generator, '_sample_cat_num_cols', return_value=([], [], ['num_col_1', 'num_col_2'])): generated_sql = simple_aggr_generator.sql_generate(table_name) assert ('SIMPLE-AGG-COUNT' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-COUNT-DISTINCT' not in generated_sql['sql_tags']) assert ('SIMPLE-AGG-MAX' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-MIN' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-AVG' in generated_sql['sql_tags']) assert (len(generated_sql['queries']) == 7) with patch.object(simple_aggr_generator, '_sample_cat_num_cols', return_value=([], ['cat_col_1', 'cat_col_2'], [])): generated_sql = simple_aggr_generator.sql_generate(table_name) assert ('SIMPLE-AGG-COUNT' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-COUNT-DISTINCT' in generated_sql['sql_tags']) assert ('SIMPLE-AGG-MAX' not in generated_sql['sql_tags']) assert ('SIMPLE-AGG-MIN' not in generated_sql['sql_tags']) assert ('SIMPLE-AGG-AVG' not in generated_sql['sql_tags']) assert (len(generated_sql['queries']) == 3)
def test_phi_plus_psi_plus(): for i in range(200): (k1, k2, k3, k4, a3) = create_scenario(phi_plus, psi_plus, i) state = correct_order(k1.state, k1.keys) if (a3.msg_log[0][2].meas_res == [0, 0]): assert numpy.array_equal(state, psi_plus) elif (a3.msg_log[0][2].meas_res == [0, 1]): assert numpy.array_equal(state, psi_plus) elif (a3.msg_log[0][2].meas_res == [1, 0]): assert numpy.array_equal(state, [0, (- (0.5 ** 0.5)), (- (0.5 ** 0.5)), 0]) else: assert numpy.array_equal(state, [0, (- (0.5 ** 0.5)), (- (0.5 ** 0.5)), 0])
def is_generalized_cartan_matrix(M): if (not is_borcherds_cartan_matrix(M)): return False n = M.ncols() return all(((M[(i, i)] == 2) for i in range(n)))
class p_iter_fork(): def __init__(self, ncpus, timeout=0, verbose=False, reset_interfaces=True): self.ncpus = int(ncpus) if (self.ncpus != ncpus): raise TypeError('ncpus must be an integer') self.timeout = float(timeout) self.verbose = verbose self.reset_interfaces = reset_interfaces def __call__(self, f, inputs): n = self.ncpus v = list(inputs) import os import sys import signal from sage.misc.persist import loads from sage.misc.temporary_file import tmp_dir dir = tmp_dir() timeout = self.timeout workers = {} try: while (v or workers): while (v and (len(workers) < n)): v0 = v.pop(0) with ContainChildren(): pid = os.fork() if (not pid): self._subprocess(f, dir, *v0) workers[pid] = WorkerData(v0) if (len(workers) > 0): T = walltime() if timeout: oldest = min((W.starttime for W in workers.values())) alarm(max((timeout - (T - oldest)), 0.1)) try: pid = os.wait()[0] cancel_alarm() W = workers.pop(pid) except AlarmInterrupt: for (pid, W) in workers.items(): if ((T - W.starttime) > timeout): if self.verbose: print(('Killing subprocess %s with input %s which took too long' % (pid, W.input))) os.kill(pid, signal.SIGKILL) W.failure = ' (timed out)' except KeyError: pass else: sobj = os.path.join(dir, ('%s.sobj' % pid)) try: with open(sobj, 'rb') as file: data = file.read() except IOError: answer = ('NO DATA' + W.failure) else: os.unlink(sobj) try: answer = loads(data, compress=False) except Exception as E: answer = 'INVALID DATA {}'.format(E) out = os.path.join(dir, ('%s.out' % pid)) try: with open(out) as file: sys.stdout.write(file.read()) os.unlink(out) except IOError: pass (yield (W.input, answer)) finally: if workers: if self.verbose: print('Killing any remaining workers...') sys.stdout.flush() for pid in workers: try: os.kill(pid, signal.SIGKILL) except OSError: pass else: try: os.waitpid(pid, 0) except OSError as msg: if self.verbose: print(msg) rmtree(dir) def _subprocess(self, f, dir, args, kwds={}): import os import sys try: from importlib import reload except ImportError: from imp import reload from sage.misc.persist import save out = os.path.join(dir, ('%s.out' % os.getpid())) sys.stdout = open(out, 'w') import sage.misc.misc reload(sage.misc.misc) if self.reset_interfaces: try: from sage.interfaces.quit import invalidate_all except ImportError: pass else: invalidate_all() value = f(*args, **kwds) sobj = os.path.join(dir, ('%s.sobj' % os.getpid())) save(value, sobj, compress=False)
def test_mio_archive_update_ex(): fitness = MagicMock() archive = MIOArchive(OrderedSet([fitness]), 3) solution = MagicMock() clone = MagicMock() solution.clone.return_value = clone clone.get_fitness_for.return_value = 1.0 result = MagicMock() result.has_test_exceptions.return_value = True clone.get_last_execution_result.return_value = result clone.get_last_mutatable_statement.return_value = 3 test_case = MagicMock() clone.test_case = test_case assert (archive.update([solution]) is True) test_case.chop.assert_called_with(3)
def scalarize_dict_values(dict_with_tensors: Dict[(str, Tensor)]): dict_with_scalar_tensors = {} for (key, val) in dict_with_tensors.items(): if torch.is_tensor(val): if (val.dim() != 0): val = val.mean() dict_with_scalar_tensors[key] = val return dict_with_scalar_tensors
class MJVOBJECTS(Structure): _fields_ = [('nlight', c_int), ('ngeom', c_int), ('maxgeom', c_int), ('lights', (MJVLIGHT * 8)), ('geoms', POINTER(MJVGEOM)), ('geomorder', POINTER(c_int))]
def filtering(df_tags, tr_track, va_track, te_track): merge_tag = (((df_tags['cals'] + df_tags['lastfm']) + df_tags['msd500']) + df_tags['allmusic']) merge_tag = merge_tag.apply((lambda x: _remove(x))) merge_tag = merge_tag.apply((lambda x: list(map(tag_normalize, x)))) tag_list = merge_tag.apply(set).apply(list) mlb = MultiLabelBinarizer() binary = mlb.fit_transform(tag_list) df = pd.DataFrame(binary, index=list(merge_tag.index), columns=mlb.classes_) tr_save = _check_stat(df, tr_track) va_save = _check_stat(df, va_track) te_save = _check_stat(df, te_track) tag = list(((tr_save & va_save) & te_save)) df_all = df[tag] df_binary = df_all.loc[(df_all.sum(axis=1) > 0)] filtered_tag = [] for idx in range(len(df_all)): item = df_all.iloc[idx] filtered_tag.append(list(item[(item == 1)].index)) return (filtered_tag, df_binary)
class Trainer(): def __init__(self, amortizer, generative_model=None, configurator=None, checkpoint_path=None, max_to_keep=3, default_lr=0.0005, skip_checks=False, memory=False, **kwargs): logger = logging.getLogger() logger.setLevel(logging.INFO) self.amortizer = amortizer self.generative_model = generative_model if (self.generative_model is None): logger.info('Trainer initialization: No generative model provided. Only offline learning mode is available!') if (type(generative_model) is MultiGenerativeModel): _num_models = generative_model.num_models elif (type(amortizer) is AmortizedModelComparison): _num_models = amortizer.num_models else: _num_models = kwargs.get('num_models') self.configurator = self._manage_configurator(configurator, num_models=_num_models) self.loss_history = LossHistory() if (memory is True): self.simulation_memory = SimulationMemory(**kwargs.pop('memory_kwargs', {})) elif (type(memory) is SimulationMemory): self.simulation_memory = memory else: self.simulation_memory = None self.replay_buffer = None self.optimizer = None self.default_lr = default_lr self.max_to_keep = max_to_keep if (checkpoint_path is not None): self.checkpoint = tf.train.Checkpoint(model=self.amortizer) self.manager = tf.train.CheckpointManager(self.checkpoint, checkpoint_path, max_to_keep=max_to_keep) self.checkpoint.restore(self.manager.latest_checkpoint) self.loss_history.load_from_file(checkpoint_path) if (self.simulation_memory is not None): self.simulation_memory.load_from_file(checkpoint_path) if self.manager.latest_checkpoint: logger.info('Networks loaded from {}'.format(self.manager.latest_checkpoint)) else: logger.info('Initialized networks from scratch.') else: self.checkpoint = None self.manager = None self.checkpoint_path = checkpoint_path if (not skip_checks): self._check_consistency() def diagnose_latent2d(self, inputs=None, **kwargs): if (type(self.amortizer) is AmortizedPosterior): if (inputs is None): if (self.simulation_memory is None): raise ConfigurationError('You should either enable simulation memory or supply the inputs argument.') else: inputs = self.simulation_memory.get_memory() else: inputs = self.configurator(inputs, **kwargs.pop('conf_args', {})) if (type(inputs) is list): (z, _) = self.amortizer.call_loop(inputs, **kwargs.pop('net_args', {})) else: (z, _) = self.amortizer(inputs, **kwargs.pop('net_args', {})) return plot_latent_space_2d(z, **kwargs.pop('plot_args', {})) else: raise NotImplementedError('Latent space diagnostics are only available for type AmortizedPosterior!') def diagnose_sbc_histograms(self, inputs=None, n_samples=None, **kwargs): if (type(self.amortizer) is AmortizedPosterior): if (inputs is None): if (self.simulation_memory is None): raise ConfigurationError('You should either ') else: inputs = self.simulation_memory.get_memory() else: inputs = self.configurator(inputs, **kwargs.pop('conf_args', {})) if (n_samples is None): if (type(inputs) is list): n_sim = np.sum([inp['parameters'].shape[0] for inp in inputs]) n_samples = int(np.ceil((n_sim / 20))) else: n_samples = int(np.ceil((inputs['parameters'].shape[0] / 20))) if (type(inputs) is list): post_samples = self.amortizer.sample_loop(inputs, n_samples=n_samples, **kwargs.pop('net_args', {})) prior_samples = np.concatenate([inp['parameters'] for inp in inputs], axis=0) else: post_samples = self.amortizer(inputs, n_samples, n_samples, **kwargs.pop('net_args', {})) prior_samples = inputs['parameters'] plot_kwargs = kwargs.pop('plot_args', {}) if ((type(self.generative_model) is GenerativeModel) and (plot_kwargs.get('param_names') is None)): plot_kwargs['param_names'] = self.generative_model.param_names return plot_sbc_histograms(post_samples, prior_samples, **plot_kwargs) else: raise NotImplementedError('SBC diagnostics are only available for type AmortizedPosterior!') def load_pretrained_network(self): if ((self.manager is None) or (self.checkpoint is None)): return False status = self.checkpoint.restore(self.manager.latest_checkpoint) return status def train_online(self, epochs, iterations_per_epoch, batch_size, save_checkpoint=True, optimizer=None, reuse_optimizer=False, early_stopping=False, use_autograph=True, validation_sims=None, **kwargs): assert (self.generative_model is not None), 'No generative model found. Only offline training is possible!' if use_autograph: _backprop_step = tf.function(backprop_step, reduce_retracing=True) else: _backprop_step = backprop_step self._setup_optimizer(optimizer, epochs, iterations_per_epoch) self.loss_history.start_new_run() validation_sims = self._config_validation(validation_sims, **kwargs.pop('val_model_args', {})) early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs) for ep in range(1, (epochs + 1)): with tqdm(total=iterations_per_epoch, desc=f'Training epoch {ep}', mininterval=TQDM_MININTERVAL) as p_bar: for it in range(1, (iterations_per_epoch + 1)): loss = self._train_step(batch_size, update_step=_backprop_step, **kwargs) self.loss_history.add_entry(ep, loss) avg_dict = self.loss_history.get_running_losses(ep) lr = extract_current_lr(self.optimizer) disp_str = format_loss_string(ep, it, loss, avg_dict, lr=lr) p_bar.set_postfix_str(disp_str, refresh=False) p_bar.update(1) self._save_trainer(save_checkpoint) self._validation(ep, validation_sims, **kwargs) if self._check_early_stopping(early_stopper): break if (not reuse_optimizer): self.optimizer = None return self.loss_history.get_plottable() def train_offline(self, simulations_dict, epochs, batch_size, save_checkpoint=True, optimizer=None, reuse_optimizer=False, early_stopping=False, validation_sims=None, use_autograph=True, **kwargs): if use_autograph: _backprop_step = tf.function(backprop_step, reduce_retracing=True) else: _backprop_step = backprop_step if isinstance(self.amortizer, AmortizedModelComparison): data_set = MultiSimulationDataset(simulations_dict, batch_size) else: data_set = SimulationDataset(simulations_dict, batch_size) self._setup_optimizer(optimizer, epochs, data_set.num_batches) self.loss_history.start_new_run() validation_sims = self._config_validation(validation_sims, **kwargs.pop('val_model_args', {})) early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs) for ep in range(1, (epochs + 1)): with tqdm(total=data_set.num_batches, desc='Training epoch {}'.format(ep), mininterval=TQDM_MININTERVAL) as p_bar: for (bi, forward_dict) in enumerate(data_set, start=1): input_dict = self.configurator(forward_dict, **kwargs.pop('conf_args', {})) loss = self._train_step(batch_size, _backprop_step, input_dict, **kwargs) self.loss_history.add_entry(ep, loss) avg_dict = self.loss_history.get_running_losses(ep) lr = extract_current_lr(self.optimizer) disp_str = format_loss_string(ep, bi, loss, avg_dict, lr=lr, it_str='Batch') p_bar.set_postfix_str(disp_str, refresh=False) p_bar.update(1) self._save_trainer(save_checkpoint) self._validation(ep, validation_sims, **kwargs) if self._check_early_stopping(early_stopper): break if (not reuse_optimizer): self.optimizer = None return self.loss_history.get_plottable() def train_from_presimulation(self, presimulation_path, optimizer, save_checkpoint=True, max_epochs=None, reuse_optimizer=False, custom_loader=None, early_stopping=False, validation_sims=None, use_autograph=True, **kwargs): self.optimizer = optimizer if use_autograph: _backprop_step = tf.function(backprop_step, reduce_retracing=True) else: _backprop_step = backprop_step self.loss_history.start_new_run() validation_sims = self._config_validation(validation_sims, **kwargs.pop('val_model_args', {})) early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs) file_list = os.listdir(presimulation_path) if (custom_loader is None): custom_loader = self._default_loader file_list = [f for f in file_list if f.endswith('.pkl')] if (max_epochs is not None): if (len(file_list) > max_epochs): file_list = file_list[:max_epochs] elif (len(file_list) < max_epochs): file_list = (file_list * int(np.ceil((max_epochs / len(file_list))))) file_list = file_list[:max_epochs] for (ep, current_filename) in enumerate(file_list, start=1): file_path = os.path.join(presimulation_path, current_filename) epoch_data = custom_loader(file_path) if isinstance(epoch_data, dict): index_list = list(epoch_data.keys()) elif isinstance(epoch_data, list): index_list = np.arange(len(epoch_data)) else: raise ValueError(f'Loading a simulation file resulted in a {type(epoch_data)}. Must be a dictionary or a list!') with tqdm(total=len(index_list), desc=f'Training epoch {ep}', mininterval=TQDM_MININTERVAL) as p_bar: for (it, index) in enumerate(index_list, start=1): input_dict = self.configurator(epoch_data[index]) if isinstance(self.amortizer, AmortizedModelComparison): batch_size = input_dict[DEFAULT_KEYS['summary_conditions']].shape[0] else: batch_size = epoch_data[index][DEFAULT_KEYS['sim_data']].shape[0] loss = self._train_step(batch_size, _backprop_step, input_dict, **kwargs) self.loss_history.add_entry(ep, loss) avg_dict = self.loss_history.get_running_losses(ep) lr = extract_current_lr(self.optimizer) disp_str = format_loss_string(ep, it, loss, avg_dict, lr=lr) p_bar.set_postfix_str(disp_str, refresh=False) p_bar.update(1) self._save_trainer(save_checkpoint) self._validation(ep, validation_sims, **kwargs) if self._check_early_stopping(early_stopper): break if (not reuse_optimizer): self.optimizer = None return self.loss_history.get_plottable() def train_experience_replay(self, epochs, iterations_per_epoch, batch_size, save_checkpoint=True, optimizer=None, reuse_optimizer=False, buffer_capacity=1000, early_stopping=False, use_autograph=True, validation_sims=None, **kwargs): assert (self.generative_model is not None), 'No generative model found. Only offline training is possible!' if use_autograph: _backprop_step = tf.function(backprop_step, reduce_retracing=True) else: _backprop_step = backprop_step self._setup_optimizer(optimizer, epochs, iterations_per_epoch) self.loss_history.start_new_run() if (self.replay_buffer is None): self.replay_buffer = MemoryReplayBuffer(buffer_capacity) validation_sims = self._config_validation(validation_sims) early_stopper = self._config_early_stopping(early_stopping, validation_sims, **kwargs) for ep in range(1, (epochs + 1)): with tqdm(total=iterations_per_epoch, desc=f'Training epoch {ep}', mininterval=TQDM_MININTERVAL) as p_bar: for it in range(1, (iterations_per_epoch + 1)): input_dict = self._forward_inference(batch_size, **kwargs.pop('conf_args', {}), **kwargs.pop('model_args', {})) self.replay_buffer.store(input_dict) input_dict = self.replay_buffer.sample() loss = _backprop_step(input_dict, self.amortizer, self.optimizer, **kwargs.pop('net_args', {})) self.loss_history.add_entry(ep, loss) avg_dict = self.loss_history.get_running_losses(ep) lr = extract_current_lr(self.optimizer) disp_str = format_loss_string(ep, it, loss, avg_dict, lr=lr) p_bar.set_postfix_str(disp_str, refresh=False) p_bar.update(1) self._save_trainer(save_checkpoint) self._validation(ep, validation_sims, **kwargs) if self._check_early_stopping(early_stopper): break if (not reuse_optimizer): self.optimizer = None return self.loss_history.get_plottable() def train_rounds(self, rounds, sim_per_round, epochs, batch_size, save_checkpoint=True, optimizer=None, reuse_optimizer=False, early_stopping=False, use_autograph=True, validation_sims=None, **kwargs): assert (self.generative_model is not None), 'No generative model found. Only offline training is possible!' logger = logging.getLogger() batches_per_sim = np.ceil((sim_per_round / batch_size)) sum_total = ((rounds + (rounds ** 2)) / 2) iterations_per_epoch = int((batches_per_sim * sum_total)) self._setup_optimizer(optimizer, epochs, iterations_per_epoch) validation_sims = self._config_validation(validation_sims) first_round = True for r in range(1, (rounds + 1)): if first_round: logger.info(f'Simulating initial {sim_per_round} data sets for training...') simulations_dict = self._forward_inference(sim_per_round, configure=False, **kwargs) first_round = False else: logger.info(f'Simulating new {sim_per_round} data sets and appending to previous...') logger.info(f'New total number of simulated data sets for training: {(sim_per_round * r)}') simulations_dict_r = self._forward_inference(sim_per_round, configure=False, **kwargs) for k in simulations_dict.keys(): if (simulations_dict[k] is not None): simulations_dict[k] = np.concatenate((simulations_dict[k], simulations_dict_r[k]), axis=0) _ = self.train_offline(simulations_dict, epochs, batch_size, save_checkpoint, reuse_optimizer=True, early_stopping=early_stopping, use_autograph=use_autograph, validation_sims=validation_sims, **kwargs) if (not reuse_optimizer): self.optimizer = None return self.loss_history.get_plottable() def mmd_hypothesis_test(self, observed_data, reference_data=None, num_reference_simulations=1000, num_null_samples=100, bootstrap=False): if (reference_data is None): if (self.generative_model is None): raise ArgumentError('If you do not provide reference data, your trainer must have a generative model!') reference_data = self.configurator(self.generative_model(num_reference_simulations)) if ((type(reference_data) == dict) and ('summary_conditions' in reference_data.keys())): reference_summary = self.amortizer.summary_net(reference_data['summary_conditions']) else: reference_summary = self.amortizer.summary_net(reference_data) if ((type(observed_data) == dict) and ('summary_conditions' in observed_data.keys())): observed_summary = self.amortizer.summary_net(observed_data['summary_conditions']) else: observed_summary = self.amortizer.summary_net(observed_data) num_observed = observed_summary.shape[0] num_reference = reference_summary.shape[0] mmd_null_samples = np.empty(num_null_samples, dtype=np.float32) for i in tqdm(range(num_null_samples), mininterval=TQDM_MININTERVAL): if bootstrap: bootstrap_idx = np.random.randint(0, num_reference, size=num_observed) simulated_summary = tf.gather(reference_summary, bootstrap_idx, axis=0) else: simulated_data = self.configurator(self.generative_model(num_observed)) simulated_summary = self.amortizer.summary_net(simulated_data['summary_conditions']) mmd_null_samples[i] = np.sqrt(maximum_mean_discrepancy(reference_summary, simulated_summary).numpy()) mmd_observed = np.sqrt(maximum_mean_discrepancy(reference_summary, observed_summary).numpy()) return (mmd_null_samples, mmd_observed) def _config_validation(self, validation_sims, **kwargs): logger = logging.getLogger() if (validation_sims is None): return None if (type(validation_sims) is dict): return validation_sims if (type(validation_sims) is int): if (self.generative_model is not None): vals = self.generative_model(validation_sims, **kwargs) logger.info(f'Generated {validation_sims} simulations for validation.') return vals else: logger.warning(('Validation simulations can only be generated if the Trainer is initialized ' + 'with a generative model.')) return None logger.warning('Type of argument "validation_sims" not understood. No validation simulations were created.') def _config_early_stopping(self, early_stopping, validation_sims, **kwargs): if early_stopping: if (validation_sims is not None): early_stopper = EarlyStopper(**kwargs.pop('early_stopping_args', {})) else: logger = logging.getLogger() logger.warning('No early stopping will be used, since validation_sims were not provided.') early_stopper = None return early_stopper return None def _setup_optimizer(self, optimizer, epochs, iterations_per_epoch): if (optimizer is None): if (self.optimizer is None): schedule = tf.keras.optimizers.schedules.CosineDecay(self.default_lr, (iterations_per_epoch * epochs), name='lr_decay') self.optimizer = tf.keras.optimizers.Adam(schedule, **OPTIMIZER_DEFAULTS) else: pass else: self.optimizer = optimizer def _save_trainer(self, save_checkpoint): if ((self.manager is not None) and save_checkpoint): self.manager.save() self.loss_history.save_to_file(file_path=self.checkpoint_path, max_to_keep=self.max_to_keep) if (self.simulation_memory is not None): self.simulation_memory.save_to_file(file_path=self.checkpoint_path) def _validation(self, ep, validation_sims, **kwargs): if (validation_sims is not None): conf = self.configurator(validation_sims, **kwargs.pop('val_conf_args', {})) val_loss = self.amortizer.compute_loss(conf, **kwargs.pop('net_args', {})) self.loss_history.add_val_entry(ep, val_loss) val_loss_str = loss_to_string(ep, val_loss) logger = logging.getLogger() logger.info(val_loss_str) def _check_early_stopping(self, early_stopper): if (early_stopper is not None): if early_stopper.update_and_recommend(self.loss_history.last_total_val_loss()): logger = logging.getLogger() logger.info('Early stopping triggered.') return True return False def _train_step(self, batch_size, update_step, input_dict=None, **kwargs): if (input_dict is None): input_dict = self._forward_inference(batch_size, **kwargs.pop('conf_args', {}), **kwargs.pop('model_args', {})) if (self.simulation_memory is not None): self.simulation_memory.store(input_dict) loss = update_step(input_dict, self.amortizer, self.optimizer, **kwargs.pop('net_args', {})) return loss def _forward_inference(self, n_sim, configure=True, **kwargs): if (self.generative_model is None): raise SimulationError('No generative model specified. Only offline learning is available!') out_dict = self.generative_model(n_sim, **kwargs.pop('model_args', {})) if configure: out_dict = self.configurator(out_dict, **kwargs.pop('conf_args', {})) return out_dict def _manage_configurator(self, config_fun, **kwargs): if callable(config_fun): return config_fun else: if isinstance(self.amortizer, AmortizedPosterior): default_config = DefaultPosteriorConfigurator() elif isinstance(self.amortizer, AmortizedLikelihood): default_config = DefaultLikelihoodConfigurator() elif isinstance(self.amortizer, AmortizedPosteriorLikelihood): default_config = DefaultJointConfigurator() elif isinstance(self.amortizer, AmortizedModelComparison): if (kwargs.get('num_models') is None): raise ConfigurationError(('Either your generative model or amortizer should have "num_models" attribute, or ' + 'you need initialize Trainer with num_models explicitly!')) default_config = DefaultModelComparisonConfigurator(kwargs.get('num_models')) else: raise NotImplementedError((f'Could not initialize configurator based on ' + f'amortizer type {type(self.amortizer)}!')) return default_config def _check_consistency(self): logger = logging.getLogger() logger.setLevel(logging.INFO) if (self.generative_model is not None): _n_sim = 2 try: logger.info('Performing a consistency check with provided components...') _ = self.amortizer.compute_loss(self.configurator(self.generative_model(_n_sim))) logger.info('Done.') except Exception as err: raise ConfigurationError(('Could not carry out computations of generative_model ->' + f'''configurator -> amortizer -> loss! Error trace: {err}''')) def _default_loader(self, file_path): with open(file_path, 'rb+') as f: loaded_file = pickle_load(f) return loaded_file
def train(args): print('training start') (features, labels) = prepare_data(args) print('prepared') train_data = FeatureDataset(features, labels) print(f" max_label {args['number_class']} *** ignore_label {args['ignore_label']} ") print(f' Current number data {len(features)} ') train_loader = DataLoader(dataset=train_data, batch_size=args['batch_size'], shuffle=True, drop_last=True) print(((' Current dataloader length ' + str(len(train_loader))) + ' ')) for MODEL_NUMBER in range(args['start_model_num'], args['model_num'], 1): gc.collect() classifier = pixel_classifier(numpy_class=args['number_class'], dim=args['dim'][(- 1)]) classifier.init_weights() classifier = nn.DataParallel(classifier).to(dev()) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001) classifier.train() iteration = 0 break_count = 0 best_loss = stop_sign = 0 for epoch in range(100): print('Epoch: ', epoch) for (X_batch, y_batch) in train_loader: (X_batch, y_batch) = (X_batch.to(dev()), y_batch.to(dev())) y_batch = y_batch.type(torch.long) X_batch = X_batch[(y_batch != args['ignore_label'])] y_batch = y_batch[(y_batch != args['ignore_label'])] optimizer.zero_grad() y_pred = classifier(X_batch) loss = criterion(y_pred, y_batch) acc = multi_acc(y_pred, y_batch) loss.backward() optimizer.step() iteration += 1 if ((iteration % 1000) == 0): print('Epoch : ', str(epoch), 'iteration', iteration, 'loss', loss.item(), 'acc', acc) if (epoch > 3): if (loss.item() < best_loss): best_loss = loss.item() break_count = 0 else: break_count += 1 if (break_count > 50): stop_sign = 1 print(' Break, Total iters,', iteration, ', at epoch', str(epoch), '') break if (stop_sign == 1): break model_path = os.path.join(args['exp_dir'], (('model_' + str(MODEL_NUMBER)) + '.pth')) MODEL_NUMBER += 1 print('save to:', model_path) torch.save({'model_state_dict': classifier.state_dict()}, model_path)
def check_hexapod_controller(conf): includes_check = ['/usr/local/include', '/usr/include'] libs_check = ['/usr/local/lib', '/usr/lib'] if ('RESIBOTS_DIR' in os.environ): includes_check = ([(os.environ['RESIBOTS_DIR'] + '/include')] + includes_check) libs_check = ([(os.environ['RESIBOTS_DIR'] + '/lib')] + libs_check) if conf.options.controller: includes_check = [(conf.options.controller + '/include')] libs_check = [(conf.options.controller + '/lib')] try: conf.start_msg('Checking for hexapod_controller includes') res = conf.find_file('hexapod_controller/hexapod_controller_simple.hpp', includes_check) conf.end_msg('ok') conf.start_msg('Checking for hexapod_controller libs') res = (res and conf.find_file('libhexapod_controller_simple.a', libs_check)) conf.end_msg('ok') conf.env.INCLUDES_HEXAPOD_CONTROLLER = includes_check conf.env.STLIBPATH_HEXAPOD_CONTROLLER = libs_check conf.env.STLIB_HEXAPOD_CONTROLLER = ['hexapod_controller_simple'] except: conf.end_msg('Not found', 'RED') return return 1
def load_model(log_dir, cp_num, n_classes=5994): model = background_resnet(num_classes=n_classes) print('=> loading checkpoint') checkpoint = torch.load((((log_dir + '/checkpoint_') + str(cp_num).zfill(3)) + '.pth')) model.load_state_dict(checkpoint['state_dict']) return model
def load_output(path): with open(path) as f: elems = [json.loads(l.rstrip()) for l in f] for elem in elems: elem['true'] = torch.tensor(elem['true']).long() elem['logits'] = torch.tensor(elem['logits']).float() return elems
def extract_resource_usage_plot_data(args): free_json_path = f'{args.prefix}/free_rusage.json' if (not os.path.exists(free_json_path)): free_json_path += '.xz' if (not os.path.exists(free_json_path)): logging.warning(f'Unable to find memory resource usage data at {free_json_path}.') return shadow_json_path = f'{args.prefix}/shadow_rusage.json' if (not os.path.exists(shadow_json_path)): shadow_json_path += '.xz' if (not os.path.exists(shadow_json_path)): logging.warning(f'Unable to find memory resource usage data at {shadow_json_path}.') return free_data = load_json_data(free_json_path) shadow_data = load_json_data(shadow_json_path) __extract_resource_usage(args, free_data, shadow_data)
class PegasusConfig(PretrainedConfig): model_type = 'pegasus' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=False, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs) def num_attention_heads(self) -> int: return self.encoder_attention_heads def hidden_size(self) -> int: return self.d_model
def test_unionarray(): layout = ak.from_buffers({'class': 'UnionArray', 'tags': 'i8', 'index': 'i64', 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, {'class': 'NumpyArray', 'primitive': 'datetime64[D]', 'form_key': 'node2'}], 'form_key': 'node0'}, 3, {'node0-tags': np.array([0, 0, 1], dtype=np.int8), 'node0-index': np.array([0, 1, 0], dtype=np.int64), 'node1-data': PlaceholderArray(numpy, (3,), np.int64), 'node2-data': PlaceholderArray(numpy, (6,), np.dtype('datetime64[D]'))}, highlevel=False) assert (layout.length == 3) assert (layout.contents[0].length == 2) assert (layout.contents[1].length == 1) layout = ak.from_buffers({'class': 'UnionArray', 'tags': 'i8', 'index': 'i64', 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, {'class': 'NumpyArray', 'primitive': 'datetime64[D]', 'form_key': 'node2'}], 'form_key': 'node0'}, 3, {'node0-tags': PlaceholderArray(numpy, (3,), np.int8), 'node0-index': np.array([0, 1, 0], dtype=np.int64), 'node1-data': np.array([0, 1, 2], np.int64), 'node2-data': np.array([0, 1, 2, 3, 4, 5, 6], np.dtype('datetime64[D]'))}, highlevel=False) assert (layout.length == 3) assert (layout.contents[0].length is unknown_length) assert (layout.contents[1].length is unknown_length) layout = ak.from_buffers({'class': 'UnionArray', 'tags': 'i8', 'index': 'i64', 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, {'class': 'NumpyArray', 'primitive': 'datetime64[D]', 'form_key': 'node2'}], 'form_key': 'node0'}, 3, {'node0-tags': np.array([0, 0, 1], dtype=np.int8), 'node0-index': PlaceholderArray(numpy, (3,), np.int64), 'node1-data': np.array([0, 1, 2], np.int64), 'node2-data': np.array([0, 1, 2, 3, 4, 5, 6], np.dtype('datetime64[D]'))}, highlevel=False) assert (layout.length == 3) assert (layout.contents[0].length is unknown_length) assert (layout.contents[1].length is unknown_length) layout = ak.from_buffers({'class': 'UnionArray', 'tags': 'i8', 'index': 'i64', 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, {'class': 'NumpyArray', 'primitive': 'datetime64[D]', 'form_key': 'node2'}], 'form_key': 'node0'}, 3, {'node0-tags': np.array([0, 0, 1], dtype=np.int8), 'node0-index': np.array([0, 1, 0], dtype=np.int64), 'node1-data': PlaceholderArray(numpy, (unknown_length,), np.int64), 'node2-data': PlaceholderArray(numpy, (6,), np.dtype('datetime64[D]'))}, highlevel=False) assert (layout.length == 3) assert (layout.contents[0].length == 2) assert (layout.contents[1].length == 1) layout = ak.from_buffers({'class': 'UnionArray', 'tags': 'i8', 'index': 'i64', 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'form_key': 'node1'}, {'class': 'NumpyArray', 'primitive': 'datetime64[D]', 'form_key': 'node2'}], 'form_key': 'node0'}, 3, {'node0-tags': PlaceholderArray(numpy, (3,), np.int8), 'node0-index': PlaceholderArray(numpy, (3,), np.int64), 'node1-data': PlaceholderArray(numpy, (3,), np.int64), 'node2-data': PlaceholderArray(numpy, (6,), np.dtype('datetime64[D]'))}, highlevel=False) assert (layout.length == 3) assert (layout.contents[0].length is unknown_length) assert (layout.contents[1].length is unknown_length)
def get_male_female_topicsDF(data_dict, gender): dataDF = pd.DataFrame.from_dict(data_dict[gender], orient='index') outlet_gender_topicsDF = pd.json_normalize(dataDF['topic_mean']) outlet_gender_topicsDF.index = dataDF.index outlet_gender_topicsDF = outlet_gender_topicsDF.sort_index() outlet_gender_topicsDF = outlet_gender_topicsDF.transpose() return outlet_gender_topicsDF
class AlignPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_expand3(): x = Symbol('x') y = Symbol('y') assert (((((1 / (x * y)) - (x * y)) + 2) * (1 + (x * y))).expand() == (((3 + (1 / (x * y))) + (x * y)) - ((x * y) ** 2)))
def html_to_float(c): if ((not c) or (c[0] != '#')): raise ValueError(("'%s' must be a valid HTML hex color (e.g., '#f07' or '#d6e7da')" % c)) h = c[1:] if (len(h) == 3): h = ('%s%s%s%s%s%s' % (h[0], h[0], h[1], h[1], h[2], h[2])) elif (len(h) != 6): raise ValueError(("color hex string (= '%s') must have length 3 or 6" % h)) return tuple([(int(h[i:(i + 2)], base=16) / 255) for i in [0, 2, 4]])
class RandomGrayscale(object): def __init__(self, p=0.1): self.p = p def __call__(self, img): num_output_channels = (1 if (img.mode == 'L') else 3) if (random.random() < self.p): return F.to_grayscale(img, num_output_channels=num_output_channels) return img def __repr__(self): return (self.__class__.__name__ + '(p={0})'.format(self.p))
def FMA_processor(fma_path): tracks = fma_load(os.path.join(fma_path, 'fma_metadata/tracks.csv')) genres = fma_load(os.path.join(fma_path, 'fma_metadata/genres.csv')) (small_track_split, df_genre_top) = get_track_split(tracks, 'small') total_track = ((small_track_split['train_track'] + small_track_split['valid_track']) + small_track_split['test_track']) pool = multiprocessing.Pool(multiprocessing.cpu_count()) pool.map(fma_resampler, total_track) error_samples = [] error_dir = os.path.join(DATASET, 'fma', 'error') for dirs in os.listdir(error_dir): error_samples.extend(os.listdir(os.path.join(error_dir, dirs))) error_fnames = [int(i.split('.npy')[0]) for i in error_samples] tracks = tracks.drop(error_fnames, axis=0) (annotation_dict, df_filter) = get_annotation(tracks, df_genre_top, fma_path) get_tag_info(df_filter, fma_path) filtered_small = {'train_track': [track_id for track_id in small_track_split['train_track'] if (track_id in annotation_dict.keys())], 'valid_track': [track_id for track_id in small_track_split['valid_track'] if (track_id in annotation_dict.keys())], 'test_track': [track_id for track_id in small_track_split['test_track'] if (track_id in annotation_dict.keys())]} _json_dump(os.path.join(fma_path, 'track_split.json'), filtered_small) print('finish fma extraction', ((len(filtered_small['train_track']) + len(filtered_small['valid_track'])) + len(filtered_small['test_track'])))
def seq(args, sep=',', space=True): nl = line_break() if (not space): nl.space = '' r = [] r.append(args[0]) num = len(args) for i in range((num - 1)): r.append(to_format(sep)) r.append(nl) r.append(args[(i + 1)]) return compose(r)
class pAdicValuation_int(pAdicValuation_base): def _repr_(self): return ('%s-adic valuation' % self.p()) def _call_(self, x): if x.is_zero(): return infinity return x.valuation(self._p) def uniformizer(self): return self.domain()(self.p()) def residue_ring(self): from sage.rings.finite_rings.finite_field_constructor import GF return GF(self.p()) def _ge_(self, other): if other.is_trivial(): return other.is_discrete_valuation() if isinstance(other, pAdicValuation_int): return (self.p() == other.p()) return super(pAdicValuation_base, self)._ge_(other) def _relative_size(self, x): x = self.domain().coerce(x) return ((x.numerator().nbits() + x.denominator().nbits()) // self.p().nbits()) def simplify(self, x, error=None, force=False, size_heuristic_bound=32): if ((not force) and (self._relative_size(x) <= size_heuristic_bound)): return x x = self.domain().coerce(x) v = self(x) if (error is None): error = v from sage.rings.infinity import infinity if (error is infinity): return x if (error < v): return self.domain().zero() from sage.rings.rational_field import QQ from sage.rings.padics.factory import Qp precision_ring = Qp(self.p(), ((QQ(error).floor() + 1) - v)) reduced = precision_ring(x) lift = (reduced >> v).lift() best = (self.domain()(lift) * (self.p() ** v)) if (self._relative_size(x) < self._relative_size(best)): best = x from sage.categories.fields import Fields m = (self.p() ** ((QQ(error).floor() + 1) - v)) if (self.domain() in Fields()): r = (m, lift) s = (0, 1) while r[1]: (qq, rr) = r[0].quo_rem(r[1]) r = (r[1], rr) s = (s[1], (s[0] - (qq * s[1]))) from sage.arith.misc import GCD as gcd if ((s[1] != 0) and (gcd(s[1], r[1]) == 1)): rational = ((self.domain()(r[1]) / self.domain()(s[1])) * (self.p() ** v)) if (self._relative_size(rational) < self._relative_size(best)): best = rational assert (self((x - best)) > error) return best def inverse(self, x, precision): if (not x.is_zero()): y = (~ x) if (y in self.domain()): return self.domain()(y) if (precision <= 0): return self.domain().one() from sage.rings.infinity import infinity if ((self(x) > 0) or (precision is infinity)): raise ValueError('element has no approximate inverse in this ring') from sage.rings.integer_ring import ZZ from sage.rings.rational_field import QQ return self.domain()(ZZ(x).inverse_mod((self.p() ** QQ(precision).ceil())))
class Seeds(models.Model): uid = models.CharField('ID', max_length=20, unique=True, blank=False) is_crawled = models.IntegerField('', default=0) other_crawled = models.IntegerField('', default=0) home_crawled = models.IntegerField('', default=0) def __str__(self): return self.uid class Meta(): db_table = 'seed_ids' verbose_name = '' verbose_name_plural = '' app_label = 'weibo_config'
def create_one_set_of_data_for_retraining(dir_name: str, indices_to_remove: List[int]) -> None: with open(constants.MNLI_TRAIN_FILE_NAME) as f: lines = f.readlines() if (not os.path.isdir(dir_name)): os.makedirs(dir_name) else: raise ValueError with open(os.path.join(dir_name, 'train.tsv'), 'w') as f: lines_to_write = [l for (i, l) in enumerate(lines) if ((i - 1) not in indices_to_remove)] f.write(''.join(lines_to_write)) print(f'Wrote {len(lines_to_write)} to {dir_name}') shutil.copyfile(constants.MNLI_EVAL_MATCHED_FILE_NAME, os.path.join(dir_name, 'dev_matched.tsv')) shutil.copyfile(constants.MNLI_EVAL_MISMATCHED_FILE_NAME, os.path.join(dir_name, 'dev_mismatched.tsv'))
def _concat_files(in_paths: List[str], out_path: str): with open(out_path, 'w') as f_o: for p in in_paths: with open(p) as f: for r in f: f_o.write(r)
class LoopBasedReplacementTransformation(NodeTransformer): def __init__(self, ast): self.count = 0 ParentScopeAssigner().visit(ast) self.scope_vars = ScopeVarsDeclarations() self.scope_vars.visit(ast) self.rvals = [] def func_name() -> str: pass def _initialize(self): pass def _parse_call_expr_node(self, node: ast_internal_classes.Call_Expr_Node): pass def _summarize_args(self, exec_node: ast_internal_classes.Execution_Part_Node, node: ast_internal_classes.FNode, new_func_body: List[ast_internal_classes.FNode]): pass def _initialize_result(self, node: ast_internal_classes.FNode) -> Optional[ast_internal_classes.BinOp_Node]: pass def _generate_loop_body(self, node: ast_internal_classes.FNode) -> ast_internal_classes.BinOp_Node: pass def _skip_result_assignment(self): return False "\n When replacing Fortran's AST reference to an intrinsic function, we set a dummy variable with VOID type.\n The reason is that at the point, we do not know the types of arguments. For many intrinsics, the return\n type will depend on the input types.\n\n When transforming the AST, we gather all scopes and variable declarations in that scope.\n Then, we can query the types of input arguments and properly determine the return type.\n\n Both the type of the variable and its corresponding Var_Decl_node need to be updated!\n " def _update_result_type(self, var: ast_internal_classes.Name_Node): pass def _parse_array(self, node: ast_internal_classes.Execution_Part_Node, arg: ast_internal_classes.FNode) -> ast_internal_classes.Array_Subscript_Node: if isinstance(arg, ast_internal_classes.Name_Node): array_node = ast_internal_classes.Array_Subscript_Node(parent=arg.parent) array_node.name = arg dims = len(self.scope_vars.get_var(node.parent, arg.name).sizes) array_node.indices = ([ast_internal_classes.ParDecl_Node(type='ALL')] * dims) return array_node if isinstance(arg, ast_internal_classes.Array_Subscript_Node): return arg def _parse_binary_op(self, node: ast_internal_classes.Call_Expr_Node, arg: ast_internal_classes.BinOp_Node) -> Tuple[(ast_internal_classes.Array_Subscript_Node, Optional[ast_internal_classes.Array_Subscript_Node], ast_internal_classes.BinOp_Node)]: if (not isinstance(arg, ast_internal_classes.BinOp_Node)): return False first_array = self._parse_array(node, arg.lval) second_array = self._parse_array(node, arg.rval) has_two_arrays = ((first_array is not None) and (second_array is not None)) if (not has_two_arrays): dominant_array = first_array if (dominant_array is None): dominant_array = second_array cond = copy.deepcopy(arg) if (first_array is not None): cond.lval = dominant_array if (second_array is not None): cond.rval = dominant_array return (dominant_array, None, cond) if (len(first_array.indices) != len(second_array.indices)): raise TypeError("Can't parse Fortran binary op with different array ranks!") for (left_idx, right_idx) in zip(first_array.indices, second_array.indices): if (left_idx.type != right_idx.type): raise TypeError("Can't parse Fortran binary op with different array ranks!") cond = copy.deepcopy(arg) cond.lval = first_array cond.rval = second_array return (first_array, second_array, cond) def _adjust_array_ranges(self, node: ast_internal_classes.FNode, array: ast_internal_classes.Array_Subscript_Node, loop_ranges_main: list, loop_ranges_array: list): for i in range(len(array.indices)): idx_var = array.indices[i] start_loop = loop_ranges_main[i][0] end_loop = loop_ranges_array[i][0] difference = (int(end_loop.value) - int(start_loop.value)) if (difference != 0): new_index = ast_internal_classes.BinOp_Node(lval=idx_var, op='+', rval=ast_internal_classes.Int_Literal_Node(value=str(difference)), line_number=node.line_number) array.indices[i] = new_index def visit_Execution_Part_Node(self, node: ast_internal_classes.Execution_Part_Node): newbody = [] for child in node.execution: lister = LoopBasedReplacementVisitor(self.func_name()) lister.visit(child) res = lister.nodes if ((res is None) or (len(res) == 0)): newbody.append(self.visit(child)) continue self.loop_ranges = [] self._initialize() for i in mywalk(child.rval): if (isinstance(i, ast_internal_classes.Call_Expr_Node) and (i.name.name == self.func_name())): self._parse_call_expr_node(i) self._summarize_args(node, child, newbody) self._update_result_type(child.lval) init_stm = self._initialize_result(child) if (init_stm is not None): newbody.append(init_stm) body = self._generate_loop_body(child) range_index = 0 for i in self.loop_ranges: initrange = i[0] finalrange = i[1] init = ast_internal_classes.BinOp_Node(lval=ast_internal_classes.Name_Node(name=('tmp_parfor_' + str((self.count + range_index)))), op='=', rval=initrange, line_number=child.line_number) cond = ast_internal_classes.BinOp_Node(lval=ast_internal_classes.Name_Node(name=('tmp_parfor_' + str((self.count + range_index)))), op='<=', rval=finalrange, line_number=child.line_number) iter = ast_internal_classes.BinOp_Node(lval=ast_internal_classes.Name_Node(name=('tmp_parfor_' + str((self.count + range_index)))), op='=', rval=ast_internal_classes.BinOp_Node(lval=ast_internal_classes.Name_Node(name=('tmp_parfor_' + str((self.count + range_index)))), op='+', rval=ast_internal_classes.Int_Literal_Node(value='1')), line_number=child.line_number) current_for = ast_internal_classes.Map_Stmt_Node(init=init, cond=cond, iter=iter, body=ast_internal_classes.Execution_Part_Node(execution=[body]), line_number=child.line_number) body = current_for range_index += 1 newbody.append(body) self.count = (self.count + range_index) return ast_internal_classes.Execution_Part_Node(execution=newbody)
def narcissus_gen(dataset_path=dataset_path, lab=lab): noise_size = 32 l_inf_r = (16 / 255) surrogate_model = ResNet18_201().cuda() generating_model = ResNet18_201().cuda() surrogate_epochs = 200 generating_lr_warmup = 0.1 warmup_round = 5 generating_lr_tri = 0.01 gen_round = 1000 train_batch_size = 350 patch_mode = 'add' transform_surrogate_train = transforms.Compose([transforms.Resize(32), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) ori_train = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=False, transform=transform_train) ori_test = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=False, transform=transform_test) outter_trainset = torchvision.datasets.ImageFolder(root=(dataset_path + 'tiny-imagenet-200/train/'), transform=transform_surrogate_train) train_label = [get_labels(ori_train)[x] for x in range(len(get_labels(ori_train)))] test_label = [get_labels(ori_test)[x] for x in range(len(get_labels(ori_test)))] train_target_list = list(np.where((np.array(train_label) == lab))[0]) train_target = Subset(ori_train, train_target_list) concoct_train_dataset = concoct_dataset(train_target, outter_trainset) surrogate_loader = torch.utils.data.DataLoader(concoct_train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=16) poi_warm_up_loader = torch.utils.data.DataLoader(train_target, batch_size=train_batch_size, shuffle=True, num_workers=16) trigger_gen_loaders = torch.utils.data.DataLoader(train_target, batch_size=train_batch_size, shuffle=True, num_workers=16) condition = True noise = torch.zeros((1, 3, noise_size, noise_size), device=device) surrogate_model = surrogate_model criterion = torch.nn.CrossEntropyLoss() surrogate_opt = torch.optim.SGD(params=surrogate_model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005) surrogate_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(surrogate_opt, T_max=surrogate_epochs) print('Training the surrogate model') for epoch in range(0, surrogate_epochs): surrogate_model.train() loss_list = [] for (images, labels) in surrogate_loader: (images, labels) = (images.cuda(), labels.cuda()) surrogate_opt.zero_grad() outputs = surrogate_model(images) loss = criterion(outputs, labels) loss.backward() loss_list.append(float(loss.data)) surrogate_opt.step() surrogate_scheduler.step() ave_loss = np.average(np.array(loss_list)) print(('Epoch:%d, Loss: %.03f' % (epoch, ave_loss))) save_path = (('./checkpoint/surrogate_pretrain_' + str(surrogate_epochs)) + '.pth') torch.save(surrogate_model.state_dict(), save_path) poi_warm_up_model = generating_model poi_warm_up_model.load_state_dict(surrogate_model.state_dict()) poi_warm_up_opt = torch.optim.RAdam(params=poi_warm_up_model.parameters(), lr=generating_lr_warmup) poi_warm_up_model.train() for param in poi_warm_up_model.parameters(): param.requires_grad = True for epoch in range(0, warmup_round): poi_warm_up_model.train() loss_list = [] for (images, labels) in poi_warm_up_loader: (images, labels) = (images.cuda(), labels.cuda()) poi_warm_up_model.zero_grad() poi_warm_up_opt.zero_grad() outputs = poi_warm_up_model(images) loss = criterion(outputs, labels) loss.backward(retain_graph=True) loss_list.append(float(loss.data)) poi_warm_up_opt.step() ave_loss = np.average(np.array(loss_list)) print(('Epoch:%d, Loss: %e' % (epoch, ave_loss))) for param in poi_warm_up_model.parameters(): param.requires_grad = False batch_pert = torch.autograd.Variable(noise.cuda(), requires_grad=True) batch_opt = torch.optim.RAdam(params=[batch_pert], lr=generating_lr_tri) for minmin in tqdm.notebook.tqdm(range(gen_round)): loss_list = [] for (images, labels) in trigger_gen_loaders: (images, labels) = (images.cuda(), labels.cuda()) new_images = torch.clone(images) clamp_batch_pert = torch.clamp(batch_pert, ((- l_inf_r) * 2), (l_inf_r * 2)) new_images = torch.clamp(apply_noise_patch(clamp_batch_pert, new_images.clone(), mode=patch_mode), (- 1), 1) per_logits = poi_warm_up_model.forward(new_images) loss = criterion(per_logits, labels) loss_regu = torch.mean(loss) batch_opt.zero_grad() loss_list.append(float(loss_regu.data)) loss_regu.backward(retain_graph=True) batch_opt.step() ave_loss = np.average(np.array(loss_list)) ave_grad = np.sum(abs(batch_pert.grad).detach().cpu().numpy()) print('Gradient:', ave_grad, 'Loss:', ave_loss) if (ave_grad == 0): break noise = torch.clamp(batch_pert, ((- l_inf_r) * 2), (l_inf_r * 2)) best_noise = noise.clone().detach().cpu() plt.imshow(np.transpose(noise[0].detach().cpu(), (1, 2, 0))) plt.show() print('Noise max val:', noise.max()) return best_noise
def test_dimension_methods(): movieLensDataHandler = AEDataHandler('MovieLensSmall', train_data_path, validation_input_data_path, validation_output_data_path, test_input_data_path, test_output_data_path) assert (8936 == movieLensDataHandler.get_input_dim()) assert (8936 == movieLensDataHandler.get_output_dim())
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None, **kwargs): assert (ignore_index is None), 'BCE loss does not support ignore_index' assert ((reduction == 'mean') and (avg_factor is None)) num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[(inds, label)].squeeze(1) if ('mask_weights' in kwargs): return F.binary_cross_entropy_with_logits(pred_slice, target, weight=kwargs['mask_weights'], reduction='mean')[None] else: return F.binary_cross_entropy_with_logits(pred_slice, target, weight=class_weight, reduction='mean')[None]
def test_default_backend(): pyhf.set_backend('jax', default=True) assert (pyhf.default_backend.name == 'jax') assert (pyhf.tensorlib.name == 'jax')
def cython(filename, verbose=0, compile_message=False, use_cache=False, create_local_c_file=False, annotate=True, sage_namespace=True, create_local_so_file=False): if (not filename.endswith('pyx')): print('Warning: file (={}) should have extension .pyx'.format(filename), file=sys.stderr) if create_local_so_file: (base, ext) = os.path.splitext(os.path.basename(filename)) else: base = os.path.abspath(filename) base = sanitize(base) target_dir = os.path.join(spyx_tmp(), base) build_dir = os.path.join(target_dir, 'build') if os.path.exists(target_dir): if use_cache: from importlib.machinery import EXTENSION_SUFFIXES for f in os.listdir(target_dir): for suffix in EXTENSION_SUFFIXES: if f.endswith(suffix): prev_file = os.path.join(target_dir, f) prev_name = f[:(- len(suffix))] break else: continue if (os.path.getmtime(filename) <= os.path.getmtime(prev_file)): return (prev_name, target_dir) for F in os.listdir(target_dir): G = os.path.join(target_dir, F) if os.path.isdir(G): continue try: os.unlink(G) except OSError: pass else: os.makedirs(target_dir, exist_ok=True) if create_local_so_file: name = base else: global sequence_number if (base not in sequence_number): sequence_number[base] = 0 name = ('%s_%s' % (base, sequence_number[base])) sequence_number[base] += 1 if compile_message: sys.stderr.write('Compiling {}...\n'.format(filename)) sys.stderr.flush() pyxfile = os.path.join(target_dir, (name + '.pyx')) shutil.copy(filename, pyxfile) (standard_libs, standard_libdirs, standard_includes, aliases) = _standard_libs_libdirs_incdirs_aliases() includes = ([os.getcwd()] + standard_includes) from Cython.Build import cythonize from Cython.Compiler.Errors import CompileError import Cython.Compiler.Options try: from setuptools.dist import Distribution from setuptools.extension import Extension except ImportError: from distutils.dist import Distribution from distutils.core import Extension from distutils.log import set_verbosity set_verbosity(verbose) Cython.Compiler.Options.annotate = annotate Cython.Compiler.Options.embed_pos_in_docstring = True Cython.Compiler.Options.pre_import = ('sage.all' if sage_namespace else None) extra_compile_args = ['-w'] extra_link_args = [] ext = Extension(name, sources=[pyxfile], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=standard_libs, library_dirs=standard_libdirs) directives = dict(language_level=3, cdivision=True) try: with restore_cwd(target_dir): try: from sage.misc.package_dir import cython_namespace_package_support with cython_namespace_package_support(): (ext,) = cythonize([ext], aliases=aliases, include_path=includes, compiler_directives=directives, quiet=(verbose <= 0), errors_to_stderr=False, use_listing_file=True) finally: try: with open((name + '.lis')) as f: cython_messages = f.read() except OSError: cython_messages = 'Error compiling Cython file' except CompileError: raise RuntimeError(cython_messages.strip()) if (verbose >= 0): cython_messages = re.sub("^.*The keyword 'nogil' should appear at the end of the function signature line. Placing it before 'except' or 'noexcept' will be disallowed in a future version of Cython.\n", '', cython_messages, 0, re.MULTILINE) sys.stderr.write(cython_messages) sys.stderr.flush() if create_local_c_file: shutil.copy(os.path.join(target_dir, ext.sources[0]), os.curdir) if annotate: shutil.copy(os.path.join(target_dir, (name + '.html')), os.curdir) class Distribution_no_finalize_distribution_options(Distribution): def _removed(ep): return True dist = Distribution_no_finalize_distribution_options() dist.ext_modules = [ext] dist.include_dirs = includes buildcmd = dist.get_command_obj('build') buildcmd.build_base = build_dir buildcmd.build_lib = target_dir try: with open(os.path.join(target_dir, (name + '.err')), 'w+') as errfile: try: sys.stderr.flush() with redirection(2, errfile, close=False): dist.run_command('build') finally: errfile.seek(0) distutils_messages = errfile.read() except Exception as msg: msg = ((str(msg) + '\n') + distutils_messages) raise RuntimeError(msg.strip()) if (verbose >= 0): sys.stderr.write(distutils_messages) sys.stderr.flush() if create_local_so_file: from importlib.machinery import EXTENSION_SUFFIXES for ext in EXTENSION_SUFFIXES: path = os.path.join(target_dir, (name + ext)) if os.path.exists(path): shutil.copy(path, os.curdir) return (name, target_dir)
def register_Ns3SpectrumWifiPhyHelper_methods(root_module, cls): cls.add_constructor([param('ns3::SpectrumWifiPhyHelper const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Default', 'ns3::SpectrumWifiPhyHelper', [], is_static=True) cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::SpectrumChannel >', 'channel')]) cls.add_method('SetChannel', 'void', [param('std::string', 'channelName')]) cls.add_method('Create', 'ns3::Ptr< ns3::WifiPhy >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::NetDevice >', 'device')], is_const=True, visibility='private', is_virtual=True) return
def build_transformer(num_classes: int, d_model: int, d_ff: int, num_heads: int, input_dim: int, num_encoder_layers: int, num_decoder_layers: int, extractor: str, dropout_p: float, device: torch.device, pad_id: int=0, sos_id: int=1, eos_id: int=2, joint_ctc_attention: bool=False, max_length: int=400) -> nn.DataParallel: if (dropout_p < 0.0): raise ParameterError('dropout probability should be positive') if (input_dim < 0): raise ParameterError('input_size should be greater than 0') if (num_encoder_layers < 0): raise ParameterError('num_layers should be greater than 0') if (num_decoder_layers < 0): raise ParameterError('num_layers should be greater than 0') return nn.DataParallel(SpeechTransformer(input_dim=input_dim, num_classes=num_classes, extractor=extractor, d_model=d_model, d_ff=d_ff, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, num_heads=num_heads, encoder_dropout_p=dropout_p, decoder_dropout_p=dropout_p, pad_id=pad_id, sos_id=sos_id, eos_id=eos_id, max_length=max_length, joint_ctc_attention=joint_ctc_attention)).to(device)
(scope='session') def bucket(region_tag): iface = setup_bucket(region_tag) (yield iface) iface.delete_bucket()
def load_lw_tree(dataset: str, model_name: str) -> Tuple[(Estimator, Dict[(str, Any)])]: model_file = ((MODEL_ROOT / dataset) / f'{model_name}.pkl') L.info(f'load model from {model_file} ...') with open(model_file, 'rb') as f: state = pickle.load(f) args = state['args'] model = state['model'] table = load_table(dataset, state['version']) pg_est = Postgres(table, args.bins, state['seed']) estimator = LWTree(model, model_name, pg_est, table) return (estimator, state)
def export_model_v1(model, outdir): (input_signature=[tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')]) def serving(input_image): image = tf.image.resize(input_image, [640, 640]) image -= tf.constant([(0.485 * 255), (0.456 * 255), (0.406 * 255)]) image /= tf.constant([(0.229 * 255), (0.224 * 255), (0.225 * 255)]) image = tf.reshape(image, [640, 640, 3]) outputs = model(image[tf.newaxis]) features = tf.math.l2_normalize(outputs[0]) return {'global_descriptor': tf.identity(features, name='global_descriptor')} tf.saved_model.save(obj=model, export_dir=outdir, signatures={'serving_default': serving})
def is_counted_as_token(token): if (not all(((t in ((string.ascii_letters + string.digits) + '.')) for t in token))): return False if (all(((t in string.ascii_letters) for t in token)) and (len(token) == 1)): return False if (token in stopwords.words('english')): return False return True
class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def _list_of_elem_or_str_to_json(trace): assert trace result = [] for e in trace: if isinstance(e, Element): result.append(e.to_json()) else: result.append(e) return result
def load_image(img_path): image = cv2.imread(img_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = np.transpose(image, (2, 0, 1)) image = image.astype(np.float32, copy=False) image /= 255 image = torch.as_tensor(image) return image
def _border(border): if isinstance(border, tuple): if (len(border) == 2): (left, top) = (right, bottom) = border elif (len(border) == 4): (left, top, right, bottom) = border else: left = top = right = bottom = border return (left, top, right, bottom)
class MultiResHashGrid(nn.Module): def __init__(self, dim: int, n_levels: int=16, n_features_per_level: int=2, log2_hashmap_size: int=15, base_resolution: int=16, finest_resolution: int=512): super().__init__() self.dim = dim self.n_levels = n_levels self.n_features_per_level = n_features_per_level self.log2_hashmap_size = log2_hashmap_size self.base_resolution = base_resolution self.finest_resolution = finest_resolution b = math.exp(((math.log(finest_resolution) - math.log(base_resolution)) / (base_resolution - 1))) levels = [] for level_idx in range(n_levels): resolution = math.floor((base_resolution * (b ** level_idx))) hashmap_size = min((resolution ** dim), (2 ** log2_hashmap_size)) levels.append(_HashGrid(dim=dim, n_features=n_features_per_level, hashmap_size=hashmap_size, resolution=resolution)) self.levels = nn.ModuleList(levels) self.input_dim = dim self.output_dim = (n_levels * n_features_per_level) def forward(self, x: torch.Tensor): return torch.cat([level(x) for level in self.levels], dim=(- 1))
def encode_fixed_dt(Times, Xreal, length, deltaT=(1.0 / 180.0)): N = len(Times) assert ((N > 0) and (len(Xreal) == N)) if (len(Xreal[0].shape) == 1): D = 1 else: D = len(Xreal[0][0]) X = np.zeros((N, length, D)) Xobs = np.zeros((N, length, 1)) for n in range(N): assert (len(Xreal[n]) == len(Times[n])) X[(n, 0)] = Xreal[n][0] Xobs[(n, 0)] = 1 Tn = len(Xreal[n]) t_ix = (- 1) for i in range(1, Tn): if (Times[n][i] < Times[n][(i - 1)]): continue t = (Times[n][i] - Times[n][0]) t_ix = int(round((t / deltaT))) if (t_ix >= length): break X[(n, t_ix)] = Xreal[n][i] Xobs[(n, t_ix)] = 1 return (X, Xobs)
class CmdFileLineBreakpoint(Breakpoint): type = 'file-line(cmd)' pattern = re.compile('^::[0-9]+') def __init__(self, text, cond=None, index=(- 1)) -> None: super().__init__(text, cond, index) self.file_line = int(text[2:])
def test_check_is_fitted(): with pytest.raises(TypeError): check_is_fitted(ARDRegression) with pytest.raises(TypeError): check_is_fitted('SVR') ard = ARDRegression() svr = SVR() try: with pytest.raises(NotFittedError): check_is_fitted(ard) with pytest.raises(NotFittedError): check_is_fitted(svr) except ValueError: assert False, 'check_is_fitted failed with ValueError' msg = 'Random message %(name)s, %(name)s' match = 'Random message ARDRegression, ARDRegression' with pytest.raises(ValueError, match=match): check_is_fitted(ard, msg=msg) msg = 'Another message %(name)s, %(name)s' match = 'Another message SVR, SVR' with pytest.raises(AttributeError, match=match): check_is_fitted(svr, msg=msg) ard.fit(*make_blobs()) svr.fit(*make_blobs()) assert (check_is_fitted(ard) is None) assert (check_is_fitted(svr) is None)
def test_unknown_column_type(): example1 = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5]) data_frame = ak.to_rdataframe({'one_float': example1}) compiler('\n struct TwoInts {\n int a, b;\n };\n\n template<typename T>\n ROOT::RDF::RNode MyTransformation_to_TwoInts(ROOT::RDF::RNode df) {\n auto myFunc = [](T x){ return TwoInts{(int)x, (int)2*x};};\n return df.Define("two_ints", myFunc, {"one_float"});\n }\n ') data_frame_transformed = ROOT.MyTransformation_to_TwoInts[data_frame.GetColumnType('one_float')](ROOT.RDF.AsRNode(data_frame)) with pytest.raises(TypeError, match="column's type"): ak.from_rdataframe(data_frame_transformed, columns=('two_ints',))
_arg_scope def stack_blocks_dense(net, blocks, output_stride=None, outputs_collections=None): current_stride = 1 rate = 1 for block in blocks: with tf.variable_scope(block.scope, 'block', [net]) as sc: for (i, unit) in enumerate(block.args): if ((output_stride is not None) and (current_stride > output_stride)): raise ValueError('The target output_stride cannot be reached.') with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): if ((output_stride is not None) and (current_stride == output_stride)): net = block.unit_fn(net, rate=rate, **dict(unit, stride=1)) rate *= unit.get('stride', 1) else: net = block.unit_fn(net, rate=1, **unit) current_stride *= unit.get('stride', 1) net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) if ((output_stride is not None) and (current_stride != output_stride)): raise ValueError('The target output_stride cannot be reached.') return net
class Classifier_Module2(nn.Module): def __init__(self, inplanes, dilation_series, padding_series, num_classes, droprate=0.1, use_se=True): super(Classifier_Module2, self).__init__() self.conv2d_list = nn.ModuleList() self.conv2d_list.append(nn.Sequential(*[nn.Conv2d(inplanes, 256, kernel_size=1, stride=1, padding=0, dilation=1, bias=True), nn.GroupNorm(num_groups=32, num_channels=256, affine=True), nn.ReLU(inplace=True)])) for (dilation, padding) in zip(dilation_series, padding_series): self.conv2d_list.append(nn.Sequential(*[nn.Conv2d(inplanes, 256, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.GroupNorm(num_groups=32, num_channels=256, affine=True), nn.ReLU(inplace=True)])) if use_se: self.bottleneck = nn.Sequential(*[SEBlock((256 * (len(dilation_series) + 1))), nn.Conv2d((256 * (len(dilation_series) + 1)), 256, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.GroupNorm(num_groups=32, num_channels=256, affine=True)]) else: self.bottleneck = nn.Sequential(*[nn.Conv2d((256 * (len(dilation_series) + 1)), 256, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.GroupNorm(num_groups=32, num_channels=256, affine=True)]) self.head = nn.Sequential(*[nn.Dropout2d(droprate), nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=False)]) for m in self.conv2d_list: if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') m.bias.data.zero_() elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d) or isinstance(m, nn.GroupNorm)): m.weight.data.fill_(1) m.bias.data.zero_() for m in self.bottleneck: if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') m.bias.data.zero_() elif isinstance(m, nn.Linear): torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_out') m.bias.data.zero_() elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d) or isinstance(m, nn.GroupNorm) or isinstance(m, nn.LayerNorm)): m.weight.data.fill_(1) m.bias.data.zero_() for m in self.head: if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, 0.001) def forward(self, x, get_feat=False): out = self.conv2d_list[0](x) for i in range((len(self.conv2d_list) - 1)): out = torch.cat((out, self.conv2d_list[(i + 1)](x)), 1) out = self.bottleneck(out) if get_feat: out_dict = {} out = self.head[0](out) out_dict['feat'] = out out = self.head[1](out) out_dict['out'] = out return out_dict else: out = self.head(out) return out
class AnswerString(): string: str def get_normalized_answer(self) -> str: def remove_articles(text: str): return re.sub('\\b(a|an|the)\\b', ' ', text) def white_space_fix(text: str): return ' '.join(text.split()) def remove_punc(text: str): exclude = set(string.punctuation) return ''.join((ch for ch in text if (ch not in exclude))) def lower(text: str): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(self.string)))) def get_tokens(self) -> List[str]: return whitespace_tokenize(self.get_normalized_answer())
def load_temporarl_edgelist(fname): edgelist = open(fname, 'r') lines = list(edgelist.readlines()) edgelist.close() cur_t = 0 G_times = [] G = nx.Graph() for i in range(0, len(lines)): line = lines[i] values = re.findall('[-+]?\\d*\\.\\d+|[-+]?\\d+', line) t = int(values[0]) u = int(values[1]) v = int(values[2]) if (t != cur_t): G_times.append(G) G = nx.Graph() cur_t = t G.add_edge(u, v) G_times.append(G) print(('maximum time stamp is ' + str(len(G_times)))) return G_times
def predict(trainer: Trainer, model: torch.nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])]) -> Tuple[(np.ndarray, np.ndarray, Optional[float])]: if (trainer.args.past_index >= 0): raise ValueError has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels'])) for (k, v) in inputs.items(): if isinstance(v, torch.Tensor): inputs[k] = v.to(trainer.args.device) step_eval_loss = None with torch.no_grad(): outputs = model(**inputs) if has_labels: (step_eval_loss, logits) = outputs[:2] else: logits = outputs[0] preds = logits.detach() preds = preds.cpu().numpy() if (inputs.get('labels') is not None): label_ids = inputs['labels'].detach() label_ids = label_ids.cpu().numpy() if (step_eval_loss is not None): step_eval_loss = step_eval_loss.mean().item() return (preds, label_ids, step_eval_loss)
def parse_json_config(args, config=None, first=False): if (config is None): config = args.config with open(config, 'r') as f: output = json.load(f) def fix_base_cfg_path(base_config_path, is_relative): if is_relative: return os.path.join(os.path.dirname(config), base_config_path) return base_config_path if (first and args.base_config_path): output['base_config_path'] = args.base_config_path if ('base_config_path' in output): base_config_path = output.get('base_config_path') is_relative = output.get('base_config_path_is_relative', True) if isinstance(base_config_path, list): for i in base_config_path: parse_json_config(args, config=fix_base_cfg_path(i, is_relative)) else: parse_json_config(args, config=fix_base_cfg_path(base_config_path, is_relative)) if (args.base_config_path != base_config_path): warnings.warn('Config path changed by child') if (not os.path.exists(config)): raise ValueError(f'Config {config} does not exists') add_parsed_config_to_args(args, output)
def train(model=model): for epoch in range(num_epochs): if ((epoch % 10) == 0): train_set = torchvision.datasets.CIFAR100(root='../CIFAR100', train=True, transform=transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize]), download=True) data_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) for (i, (x, y)) in enumerate(data_loader): if ((i + epoch) == 0): print('load model') x = x.to(device) y = y.to(device) model.train() output = model(x) criterion = nn.CrossEntropyLoss() criterion = criterion.to(device) loss = criterion(output, y) optimizer.zero_grad() loss.backward() optimizer.step() accuracy_result = accuracy(output, y) if (((i + 1) % int((50000 / (args.batch * 20)))) == 0): print('Epoch[{}/{}], Step [{}/{}], Reconst Loss: {:.4f}, acc: {:.4f}'.format((epoch + 1), num_epochs, (i + 1), len(data_loader), loss.item(), accuracy_result.item())) torch.save(model, 'BottleNetPlusPlus_VGG.pkl') if ((epoch % 3) == 0): test(epoch)
class TFMobileBertForPreTraining(): def __init__(self, *args, **kwargs): requires_tf(self)
class DataArguments(): data_path: str = field(default=None, metadata={'help': 'Path to the training data.'})
_model def hrnet_w40(pretrained=True, **kwargs): return _create_model('hrnet_w40', pretrained, kwargs)
class AMSGrad(optimizer.Optimizer): def __init__(self, learning_rate=0.01, beta1=0.9, beta2=0.99, epsilon=1e-08, use_locking=False, name='AMSGrad'): super(AMSGrad, self).__init__(use_locking, name) self._lr = learning_rate self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon self._lr_t = None self._beta1_t = None self._beta2_t = None self._epsilon_t = None self._beta1_power = None self._beta2_power = None def _create_slots(self, var_list): first_var = min(var_list, key=(lambda x: x.name)) create_new = (self._beta1_power is None) if ((not create_new) and context.in_graph_mode()): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name='beta1_power', trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name='beta2_power', trainable=False) for v in var_list: self._zeros_slot(v, 'm', self._name) self._zeros_slot(v, 'v', self._name) self._zeros_slot(v, 'vhat', self._name) def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr) self._beta1_t = ops.convert_to_tensor(self._beta1) self._beta2_t = ops.convert_to_tensor(self._beta2) self._epsilon_t = ops.convert_to_tensor(self._epsilon) def _apply_dense(self, grad, var): beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype) beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype) lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype) lr = ((lr_t * math_ops.sqrt((1 - beta2_power))) / (1 - beta1_power)) m = self.get_slot(var, 'm') m_scaled_g_values = (grad * (1 - beta1_t)) m_t = state_ops.assign(m, ((beta1_t * m) + m_scaled_g_values), use_locking=self._use_locking) v = self.get_slot(var, 'v') v_scaled_g_values = ((grad * grad) * (1 - beta2_t)) v_t = state_ops.assign(v, ((beta2_t * v) + v_scaled_g_values), use_locking=self._use_locking) vhat = self.get_slot(var, 'vhat') vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat)) v_sqrt = math_ops.sqrt(vhat_t) var_update = state_ops.assign_sub(var, ((lr * m_t) / (v_sqrt + epsilon_t)), use_locking=self._use_locking) return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) def _resource_apply_dense(self, grad, var): var = var.handle beta1_power = math_ops.cast(self._beta1_power, grad.dtype.base_dtype) beta2_power = math_ops.cast(self._beta2_power, grad.dtype.base_dtype) lr_t = math_ops.cast(self._lr_t, grad.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, grad.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, grad.dtype.base_dtype) epsilon_t = math_ops.cast(self._epsilon_t, grad.dtype.base_dtype) lr = ((lr_t * math_ops.sqrt((1 - beta2_power))) / (1 - beta1_power)) m = self.get_slot(var, 'm').handle m_scaled_g_values = (grad * (1 - beta1_t)) m_t = state_ops.assign(m, ((beta1_t * m) + m_scaled_g_values), use_locking=self._use_locking) v = self.get_slot(var, 'v').handle v_scaled_g_values = ((grad * grad) * (1 - beta2_t)) v_t = state_ops.assign(v, ((beta2_t * v) + v_scaled_g_values), use_locking=self._use_locking) vhat = self.get_slot(var, 'vhat').handle vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat)) v_sqrt = math_ops.sqrt(vhat_t) var_update = state_ops.assign_sub(var, ((lr * m_t) / (v_sqrt + epsilon_t)), use_locking=self._use_locking) return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) def _apply_sparse_shared(self, grad, var, indices, scatter_add): beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype) beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype) lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype) lr = ((lr_t * math_ops.sqrt((1 - beta2_power))) / (1 - beta1_power)) m = self.get_slot(var, 'm') m_scaled_g_values = (grad * (1 - beta1_t)) m_t = state_ops.assign(m, (m * beta1_t), use_locking=self._use_locking) with ops.control_dependencies([m_t]): m_t = scatter_add(m, indices, m_scaled_g_values) v = self.get_slot(var, 'v') v_scaled_g_values = ((grad * grad) * (1 - beta2_t)) v_t = state_ops.assign(v, (v * beta2_t), use_locking=self._use_locking) with ops.control_dependencies([v_t]): v_t = scatter_add(v, indices, v_scaled_g_values) vhat = self.get_slot(var, 'vhat') vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat)) v_sqrt = math_ops.sqrt(vhat_t) var_update = state_ops.assign_sub(var, ((lr * m_t) / (v_sqrt + epsilon_t)), use_locking=self._use_locking) return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) def _apply_sparse(self, grad, var): return self._apply_sparse_shared(grad.values, var, grad.indices, (lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))) def _resource_scatter_add(self, x, i, v): with ops.control_dependencies([resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_apply_sparse(self, grad, var, indices): return self._apply_sparse_shared(grad, var, indices, self._resource_scatter_add) def _finish(self, update_ops, name_scope): with ops.control_dependencies(update_ops): with ops.colocate_with(self._beta1_power): update_beta1 = self._beta1_power.assign((self._beta1_power * self._beta1_t), use_locking=self._use_locking) update_beta2 = self._beta2_power.assign((self._beta2_power * self._beta2_t), use_locking=self._use_locking) return control_flow_ops.group(*(update_ops + [update_beta1, update_beta2]), name=name_scope)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = BatchNorm(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = BatchNorm(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class TimeLimit(TimeLimitBase): def reset(self, reset_params=True): self._episode_started_at = time.time() self._elapsed_steps = 0 return self.env.reset(reset_params)