code
stringlengths
101
5.91M
def torch_argmin(tensor): flat_tensor = tensor.view(tensor.numel()) (_, argmin) = flat_tensor.min(0) return np.unravel_index(int(argmin), tensor.shape)
def _get_test_opt(): parser = argparse.ArgumentParser(description='Evaluate performance of SARPN on NYU-D v2 test set') parser.add_argument('--backbone', default='SENet154', help='select a network as backbone') parser.add_argument('--testlist_path', required=True, help='the path of testlist') parser.add_argument('--batch_size', type=int, default=1, help='testing batch size') parser.add_argument('--root_path', required=True, help='the root path of dataset') parser.add_argument('--loadckpt', required=True, help='the path of the loaded model') parser.add_argument('--threshold', type=float, default=1.0, help='threshold of the pixels on edges') parser.add_argument('--pretrained_dir', type=str, required=True, help='the path of pretrained models') return parser.parse_args()
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): line = clean_lines.elided[linenum] if (not line): return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return if Match('^\\s*#\\s*(?:ifdef|elif|else|endif)\\b', line): include_state.ResetSection() fullname = os.path.abspath(filename).replace('\\', '/') match = Search('(\\bnew\\s+)?\\b(int|float|double|bool|char|int32|uint32|int64|uint64)(\\([^)].*)', line) if match: matched_new = match.group(1) matched_type = match.group(2) matched_funcptr = match.group(3) if ((matched_new is None) and (not (Match('^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\(', line) or Search('\\bMockCallback<.*>', line) or Search('\\bstd::function<.*>', line))) and (not (matched_funcptr and Match('\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\(', matched_funcptr)))): if ((linenum < 2) or (not (Match('^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$', clean_lines.elided[(linenum - 1)]) or Match('^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$', clean_lines.elided[(linenum - 2)])))): error(filename, linenum, 'readability/casting', 4, ('Using deprecated casting style. Use static_cast<%s>(...) instead' % matched_type)) CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'static_cast', '\\((int|float|double|bool|char|u?int(16|32|64))\\)', error) if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'const_cast', '\\((char\\s?\\*+\\s?)\\)\\s*"', error): pass else: CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'reinterpret_cast', '\\((\\w+\\s?\\*+\\s?)\\)', error) match = Search('(?:&\\(([^)]+)\\)[\\w(])|(?:&(static|dynamic|down|reinterpret)_cast\\b)', line) if (match and (match.group(1) != '*')): error(filename, linenum, 'runtime/casting', 4, 'Are you taking an address of a cast? This is dangerous: could be a temp var. Take the address before doing the cast, rather than after') if ((linenum + 1) < clean_lines.NumLines()): extended_line = (line + clean_lines.elided[(linenum + 1)]) else: extended_line = line match = Match('((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)', line) if (match and (not Search('\\boperator\\W', line)) and (not Match('\\s*(<.*>)?(::[a-zA-Z0-9_]+)?\\s*\\(([^"]|$)', match.group(3)))): error(filename, linenum, 'runtime/string', 4, ('For a static/global string constant, use a C style string instead: "%schar %s[]".' % (match.group(1), match.group(2)))) if Search('\\b([A-Za-z0-9_]*_)\\(\\1\\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') if (file_extension == 'h'): pass if Search('\\bshort port\\b', line): if (not Search('\\bunsigned short port\\b', line)): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search('\\b(short|long(?! +double)|long long)\\b', line) if match: error(filename, linenum, 'runtime/int', 4, ('Use int16/int64/etc, rather than the C type %s' % match.group(1))) match = Search('snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,', line) if (match and (match.group(2) != '0')): error(filename, linenum, 'runtime/printf', 3, ('If you can, use sizeof(%s) instead of %s as the 2nd arg to snprintf.' % (match.group(1), match.group(2)))) if Search('\\bsprintf\\b', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search('\\b(strcpy|strcat)\\b', line) if match: error(filename, linenum, 'runtime/printf', 4, ('Almost always, snprintf is better than %s' % match.group(1))) if Search('\\boperator\\s*&\\s*\\(\\s*\\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') if Search('\\}\\s*if\\s*\\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') printf_args = _GetTextInside(line, '(?i)\\b(string)?printf\\s*\\(') if printf_args: match = Match('([\\w.\\->()]+)$', printf_args) if (match and (match.group(1) != '__VA_ARGS__')): function_name = re.search('\\b((?:string)?printf)\\s*\\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, ('Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1)))) match = Search('memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)', line) if (match and (not Match("^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)))): error(filename, linenum, 'runtime/memset', 4, ('Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2)))) if Search('\\busing namespace\\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. Use using-declarations instead.') match = Match('\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];', line) if (match and (match.group(2) != 'return') and (match.group(2) != 'delete') and (match.group(3).find(']') == (- 1))): tokens = re.split('\\s|\\+|\\-|\\*|\\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search('sizeof\\(.+\\)', tok): continue if Search('arraysize\\(\\w+\\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if (not tok): continue if Match('\\d+', tok): continue if Match('0[xX][0-9a-fA-F]+', tok): continue if Match('k[A-Z0-9]\\w*', tok): continue if Match('(.+::)?k[A-Z0-9]\\w*', tok): continue if Match('(.+::)?[A-Z][A-Z0-9_]*', tok): continue if tok.startswith('sizeof'): skip_next = True continue is_const = False break if (not is_const): error(filename, linenum, 'runtime/arrays', 1, "Do not use variable-length arrays. Use an appropriately named ('k' followed by CamelCase) compile-time constant for the size.") match = Match('\\s*(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))\\(.*\\);$', line) if (match and ((linenum + 1) < clean_lines.NumLines())): next_line = clean_lines.elided[(linenum + 1)] if (not Search('^\\s*}[\\w\\*,\\s]*;', next_line)): error(filename, linenum, 'readability/constructors', 3, (match.group(1) + ' should be the last thing in the class')) if ((file_extension == 'h') and Search('\\bnamespace\\s*{', line) and (line[(- 1)] != '\\')): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See for more information.')
class Clause_Rate(object): def __init__(self, sentence_objs): self.sentence_objs = sentence_objs def handle(self): tot_num_clauses = 0 for so in self.sentence_objs: tot_num_clauses += num_clauses(so.const_pt) return (tot_num_clauses / len(self.sentence_objs))
class _unzip_overlays(dist_build): description = 'Unzip downloaded overlays' user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): cmd = self.get_finalized_command('build_py') for (package, f, build_dir, _) in cmd.data_files: for (dirpath, dirnames, filenames) in os.walk(build_dir): for f in filenames: if f.endswith('.zip'): zip_path = ((dirpath + '/') + f) print(('Extracting ' + zip_path)) unzip_to_same_folder(zip_path)
class HardData(tx.data.DatasetBase[(Example, Example)]): def __init__(self, hparams=None, device: Optional[torch.device]=None): self._hparams = HParams(hparams, self.default_hparams()) data_source = HardDataSource(self._hparams.dataset.files, compression_type=self._hparams.dataset.compression_type) super().__init__(data_source, hparams, device=device) def default_hparams(): return {**tx.data.DatasetBase.default_hparams(), 'dataset': {'files': 'data.txt', 'compression_type': None, 'vocab_file': 'vocab.txt'}} def process(self, raw_example): (evt_a, evt_b, evt_c, evt_d) = (raw_example[0], raw_example[1], raw_example[2], raw_example[3]) evt_a = map_evt_to_tokens(evt_a) evt_a_ids = tokenizer.map_text_to_id(evt_a) evt_b = map_evt_to_tokens(evt_b) evt_b_ids = tokenizer.map_text_to_id(evt_b) evt_c = map_evt_to_tokens(evt_c) evt_c_ids = tokenizer.map_text_to_id(evt_c) evt_d = map_evt_to_tokens(evt_d) evt_d_ids = tokenizer.map_text_to_id(evt_d) return {'evt_a': evt_a, 'evt_a_ids': evt_a_ids, 'evt_b': evt_b, 'evt_b_ids': evt_b_ids, 'evt_c': evt_c, 'evt_c_ids': evt_c_ids, 'evt_d': evt_d, 'evt_d_ids': evt_d_ids} def collate(self, examples: List[Example]) -> tx.data.Batch: evt_a = [ex['evt_a'] for ex in examples] (evt_a_ids, evt_a_lengths) = tx.data.padded_batch([ex['evt_a_ids'] for ex in examples], pad_value=pad_token_id) evt_b = [ex['evt_b'] for ex in examples] (evt_b_ids, evt_b_lengths) = tx.data.padded_batch([ex['evt_b_ids'] for ex in examples], pad_value=pad_token_id) evt_c = [ex['evt_c'] for ex in examples] (evt_c_ids, evt_c_lengths) = tx.data.padded_batch([ex['evt_c_ids'] for ex in examples], pad_value=pad_token_id) evt_d = [ex['evt_d'] for ex in examples] (evt_d_ids, evt_d_lengths) = tx.data.padded_batch([ex['evt_d_ids'] for ex in examples], pad_value=pad_token_id) return tx.data.Batch(len(examples), evt_a=evt_a, evt_a_ids=torch.from_numpy(evt_a_ids), evt_a_lengths=torch.tensor(evt_a_lengths), evt_b=evt_b, evt_b_ids=torch.from_numpy(evt_b_ids), evt_b_lengths=torch.tensor(evt_b_lengths), evt_c=evt_c, evt_c_ids=torch.from_numpy(evt_c_ids), evt_c_lengths=torch.tensor(evt_c_lengths), evt_d=evt_d, evt_d_ids=torch.from_numpy(evt_d_ids), evt_d_lengths=torch.tensor(evt_d_lengths))
class NetFlowCoarse(nn.Module): def __init__(self, kernelSize): super(NetFlowCoarse, self).__init__() assert ((kernelSize % 2) == 1) self.conv1 = conv3x3((kernelSize * kernelSize), 512) self.bn1 = nn.BatchNorm2d(512, eps=1e-05) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(512, 256) self.bn2 = nn.BatchNorm2d(256, eps=1e-05) self.conv3 = conv3x3(256, 128) self.bn3 = nn.BatchNorm2d(128, eps=1e-05) self.conv4 = conv3x3(128, (kernelSize * kernelSize)) self.kernelSize = kernelSize self.paddingSize = (kernelSize // 2) self.gridY = torch.arange((- self.paddingSize), (self.paddingSize + 1)).view(1, 1, (- 1), 1).expand(1, 1, self.kernelSize, self.kernelSize).contiguous().view(1, (- 1), 1, 1).type(torch.FloatTensor) self.gridX = torch.arange((- self.paddingSize), (self.paddingSize + 1)).view(1, 1, 1, (- 1)).expand(1, 1, self.kernelSize, self.kernelSize).contiguous().view(1, (- 1), 1, 1).type(torch.FloatTensor) self.softmax = torch.nn.Softmax(dim=1) for m in self.modules(): if isinstance(m, nn.Conv2d): if ((m.in_channels != m.out_channels) or (m.out_channels != m.groups) or (m.bias is not None)): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') else: print('Not initializing') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def cuda(self): super().cuda() (self.gridX, self.gridY) = (self.gridX.cuda(), self.gridY.cuda()) def do_forward(self, coef, up8X): (n, c, w, h) = coef.size() x = self.conv1(coef) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.conv3(x) x = self.bn3(x) x = self.relu(x) x = self.conv4(x) x = self.softmax(x) flowX = ((torch.sum((x * self.gridX), dim=1, keepdim=True) / h) * 2) flowY = ((torch.sum((x * self.gridY), dim=1, keepdim=True) / w) * 2) flow = torch.cat((flowX, flowY), dim=1) flow = (F.upsample_bilinear(flow, size=None, scale_factor=8) if up8X else flow) return flow def forward(self, coef, up8X=True): if self.training: flow = self.do_forward(coef, up8X) else: with torch.no_grad(): flow = self.do_forward(coef, up8X) return flow
class InputFeatures(object): def __init__(self, input_ids, input_mask, segment_ids, label_id, tokens, baseline_ids=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.baseline_ids = baseline_ids self.tokens = tokens
def test_digits_sqrt_modular_sparse(): model = GraphCutSelection(100, 'precomputed', optimizer='modular', random_state=0) model.fit(X_digits_cosine_sparse) assert_array_equal(model.ranking, digits_cosine_modular_ranking) assert_array_almost_equal(model.gains, digits_cosine_modular_gains, 4)
def rand_init_delta(delta, x, ord, eps, clip_min, clip_max): if isinstance(eps, torch.Tensor): assert (len(eps) == len(delta)) if (ord == np.inf): delta.data.uniform_((- 1), 1) delta.data = batch_multiply(eps, delta.data) elif (ord == 2): delta.data.uniform_(clip_min, clip_max) delta.data = (delta.data - x) delta.data = clamp_by_pnorm(delta.data, ord, eps) elif (ord == 1): ini = laplace.Laplace(loc=delta.new_tensor(0), scale=delta.new_tensor(1)) delta.data = ini.sample(delta.data.shape) delta.data = normalize_by_pnorm(delta.data, p=1) ray = uniform.Uniform(0, eps).sample() delta.data *= ray delta.data = (clamp((x.data + delta.data), clip_min, clip_max) - x.data) else: error = 'Only ord = inf, ord = 1 and ord = 2 have been implemented' raise NotImplementedError(error) delta.data = (clamp((x + delta.data), min=clip_min, max=clip_max) - x) return delta.data
def create_agent(sess, environment, summary_writer=None): if (not FLAGS.debug_mode): summary_writer = None if (FLAGS.agent_name == 'dqn'): return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'rainbow'): return rainbow_agent.RainbowAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'implicit_quantile'): return implicit_quantile_agent.ImplicitQuantileAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'rpg'): return rpg_agent.RPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'epg'): return epg_agent.EPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'lpg'): return lpg_agent.LPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'repg'): return repg_agent.REPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'dqnrpg'): return dqnrpg_agent.DQNRPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'rainbowrpg'): return rainbowrpg_agent.RainbowRPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif (FLAGS.agent_name == 'implicit_quantilerpg'): return implicit_quantilerpg_agent.ImplicitQuantileRPGAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) else: raise ValueError('Unknown agent: {}'.format(FLAGS.agent_name))
def test_connector__step_blocked(connector: Connector, state: State, path0: int, path1: int, path2: int, targ0: int, targ1: int, targ2: int, posi0: int, posi1: int, posi2: int) -> None: step_fn = jax.jit(connector.step) actions = jnp.array([[constants.LEFT, constants.LEFT, constants.RIGHT], [constants.DOWN, constants.DOWN, constants.UP], [constants.RIGHT, constants.LEFT, constants.UP], [constants.NOOP, constants.DOWN, constants.LEFT], [constants.NOOP, constants.RIGHT, constants.LEFT]]) for action in actions: (state, timestep) = step_fn(state, action) expected_grid = jnp.array([[EMPTY, EMPTY, targ0, posi2, path2, path2], [EMPTY, path0, path0, path0, path0, path2], [EMPTY, path0, posi0, targ2, path2, path2], [targ1, path1, path1, EMPTY, path2, EMPTY], [path1, path1, path1, EMPTY, path2, EMPTY], [path1, posi1, path1, EMPTY, EMPTY, EMPTY]]) assert jnp.array_equal(state.grid, expected_grid) assert (timestep.step_type == StepType.LAST) assert jnp.array_equal(timestep.discount, jnp.asarray(0)) assert all(is_head_on_grid(state.agents, state.grid)) assert all(is_target_on_grid(state.agents, state.grid))
class CorpusDataset(Dataset): def __init__(self, corpus: List[str], tokenizer: PreTrainedTokenizer, max_seq_length: int): self.corpus = corpus self.tokenizer = tokenizer self.max_token_len = (max_seq_length - 2) logging.getLogger('transformers.tokenization_utils_base').setLevel(logging.ERROR) def __len__(self): return len(self.corpus) def __getitem__(self, item): if isinstance(self.corpus[item], str): text = self.corpus[item] cache_input_ids = self.tokenizer(text, add_special_tokens=False, return_attention_mask=False, return_token_type_ids=False)['input_ids'] self.corpus[item] = np.array(cache_input_ids, dtype=np.uint16) input_ids = self.corpus[item].tolist() if (len(input_ids) > self.max_token_len): start_pos = random.randint(0, (len(input_ids) - self.max_token_len)) input_ids = input_ids[start_pos:(start_pos + self.max_token_len)] batch_encoding = self.tokenizer.prepare_for_model(input_ids, add_special_tokens=True, return_special_tokens_mask=True) return batch_encoding
class MergeLayer(Module): def __init__(self, dense: bool=False): self.dense = dense def forward(self, x): return (torch.cat([x, x.orig], dim=1) if self.dense else (x + x.orig))
def applyGrad(losses, AIM, optim, tape): var_AutoEencoder = (AIM.encoder.variables + AIM.decoder.variables) var_E = AIM.encoder.variables var_G = AIM.decoder.variables var_dz = AIM.dis_z.variables var_dimg = AIM.dis_img.variables var_age = AIM.age_classifier.variables variables = [var_AutoEencoder, var_dz, var_E, var_dimg, var_G, var_age, var_age, var_dz, var_G] grads = tape.gradient(losses, variables) for (g, v) in zip(grads, variables): optim.apply_gradients(M.zip_grad(g, v))
class AutoModelForImageSegmentation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
class nlvr_dataset(Dataset): def __init__(self, ann_file, transform, image_root): self.ann = [] for f in ann_file: self.ann += json.load(open(f, 'r')) self.transform = transform self.image_root = image_root self.max_words = 30 def __len__(self): return len(self.ann) def __getitem__(self, index): ann = self.ann[index] image0_path = os.path.join(self.image_root, ann['images'][0]) image0 = Image.open(image0_path).convert('RGB') image0 = self.transform(image0) image1_path = os.path.join(self.image_root, ann['images'][1]) image1 = Image.open(image1_path).convert('RGB') image1 = self.transform(image1) sentence = pre_caption(ann['sentence'], self.max_words) if (ann['label'] == 'True'): label = 1 else: label = 0 return (image0, image1, sentence, label)
def fast_rcnn_losses(cls_score, bbox_pred, label_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights): device_id = cls_score.get_device() rois_label = Variable(torch.from_numpy(label_int32.astype('int64'))).cuda(device_id) loss_cls = F.cross_entropy(cls_score, rois_label) bbox_targets = Variable(torch.from_numpy(bbox_targets)).cuda(device_id) bbox_inside_weights = Variable(torch.from_numpy(bbox_inside_weights)).cuda(device_id) bbox_outside_weights = Variable(torch.from_numpy(bbox_outside_weights)).cuda(device_id) sl1_loss_bbox = net_utils.smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights) (iou_loss_bbox, giou_loss_bbox) = net_utils.compute_giou(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, transform_weights=cfg.MODEL.BBOX_REG_WEIGHTS) if (cfg.MODEL.LOSS_TYPE == 'smooth_l1'): loss_bbox = sl1_loss_bbox elif (cfg.MODEL.LOSS_TYPE == 'iou'): loss_bbox = iou_loss_bbox elif (cfg.MODEL.LOSS_TYPE == 'giou'): loss_bbox = giou_loss_bbox elif (cfg.MODEL.LOSS_TYPE == 'diou'): (_, diou_loss_bbox) = net_utils.compute_diou(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, transform_weights=cfg.MODEL.BBOX_REG_WEIGHTS) loss_bbox = diou_loss_bbox elif (cfg.MODEL.LOSS_TYPE == 'ciou'): (_, ciou_loss_bbox) = net_utils.compute_ciou(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, transform_weights=cfg.MODEL.BBOX_REG_WEIGHTS) loss_bbox = ciou_loss_bbox else: raise ValueError(('Invalid loss type: ' + cfg.MODEL.LOSS_TYPE)) cls_preds = cls_score.max(dim=1)[1].type_as(rois_label) accuracy_cls = cls_preds.eq(rois_label).float().mean(dim=0) return (loss_cls, loss_bbox, accuracy_cls, sl1_loss_bbox, iou_loss_bbox, giou_loss_bbox)
def test_unet_valid(): batch_size = 1 in_channels = 1 out_channels = 2 input_spatial_dim = 572 expected_spatial_dim = 388 unet = UNet(in_channels=in_channels, out_channels=out_channels, n_blocks=5, start_filters=32, activation=ActivationFunction.RELU, normalization=NormalizationLayer.BATCH, conv_mode=ConvMode.VALID, dim=Dimensions.TWO, up_mode=UpMode.TRANSPOSED) inp = torch.rand(size=(batch_size, in_channels, input_spatial_dim, input_spatial_dim), dtype=torch.float32) out = unet(inp) assert (out.shape == (batch_size, out_channels, expected_spatial_dim, expected_spatial_dim))
def parse_args(): parser = argparse.ArgumentParser(description='Convert benchmark model json to script') parser.add_argument('txt_path', type=str, help='txt path output by benchmark_filter') parser.add_argument('--run', action='store_true', help='run script directly') parser.add_argument('--out', type=str, help='path to save model benchmark script') args = parser.parse_args() return args
.very_slow def test_run_molecule_pmapped(mocker, tmp_path): vmc_nchains = (3 * jax.local_device_count()) eval_nchains = (2 * jax.local_device_count()) mocker.patch('os.curdir', tmp_path) config = _get_config(vmc_nchains, eval_nchains, True) _run_and_check_output_files(mocker, tmp_path, config)
def _get_test_ions(): ion_pos = jnp.array([[(- 4.0), 0.0], [0.0, 0.0], [2.0, 1.0]]) ion_charges = jnp.array([1.0, 2.0, 3.0]) return (ion_pos, ion_charges)
class BatchCollator(object): def __init__(self, dataset, append_ind=False): self.dataset = dataset self.test_mode = self.dataset.test_mode self.task = self.dataset.task self.data_names = self.dataset.data_names self.append_ind = append_ind def __call__(self, batch): if (not isinstance(batch, list)): batch = list(batch) max_shape = tuple((max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))) max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch]) max_masks = max([data[self.data_names.index('masks')].shape[0] for data in batch]) if (self.test_mode and (self.task == 'QA2R')): max_question_length = max([len(q) for data in batch for q in data[self.data_names.index('question')]]) else: max_question_length = max([len(data[self.data_names.index('question')]) for data in batch]) if ('answer_choices' in self.data_names): max_answer_length = max([len(answer) for data in batch for answer in data[self.data_names.index('answer_choices')]]) if ('answer' in self.data_names): max_answer_length = max([len(data[self.data_names.index('answer')]) for data in batch]) if ('rationale_choices' in self.data_names): max_rationale_length = max([len(rationale) for data in batch for rationale in data[self.data_names.index('rationale_choices')]]) if ('rationale' in self.data_names): max_rationale_length = max([len(data[self.data_names.index('rationale')]) for data in batch]) if ('question_align_matrix' in self.data_names): if (self.test_mode and (self.task == 'QA2R')): max_q_align_length = max([m.shape[0] for data in batch for m in data[self.data_names.index('question_align_matrix')]]) else: max_q_align_length = max([data[self.data_names.index('question_align_matrix')].shape[0] for data in batch]) if ('answer_align_matrix' in self.data_names): if (isinstance(batch[0][self.data_names.index('answer_align_matrix')], list) or (batch[0][self.data_names.index('answer_align_matrix')].dim() == 3)): max_a_align_length = max([m.shape[0] for data in batch for m in data[self.data_names.index('answer_align_matrix')]]) elif (batch[0][self.data_names.index('answer_align_matrix')].dim() == 2): max_a_align_length = max([data[self.data_names.index('answer_align_matrix')].shape[0] for data in batch]) else: raise ValueError('invalid dims of answer_align_matrix') if ('rationale_align_matrix' in self.data_names): if (isinstance(batch[0][self.data_names.index('rationale_align_matrix')], list) or (batch[0][self.data_names.index('rationale_align_matrix')].dim() == 3)): max_r_align_length = max([m.shape[0] for data in batch for m in data[self.data_names.index('rationale_align_matrix')]]) elif (batch[0][self.data_names.index('rationale_align_matrix')].dim() == 2): max_r_align_length = max([data[self.data_names.index('rationale_align_matrix')].shape[0] for data in batch]) else: raise ValueError('invalid dims of rationale_align_matrix!') for (i, ibatch) in enumerate(batch): out = {} image = ibatch[self.data_names.index('image')] out['image'] = clip_pad_images(image, max_shape, pad=0) boxes = ibatch[self.data_names.index('boxes')] out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=(- 1)) masks = ibatch[self.data_names.index('masks')] (mask_height, mask_width) = masks.shape[1:] out['masks'] = clip_pad_boxes(masks.view(masks.shape[0], (- 1)), max_masks, pad=(- 1)).view((- 1), mask_height, mask_width) question = ibatch[self.data_names.index('question')] if (self.test_mode and (self.task == 'QA2R')): out['question'] = torch.stack(tuple((clip_pad_2d(q, (max_question_length, len(q[0])), pad=(- 2)) for q in question)), dim=0) if ('question_align_matrix' in self.data_names): q_align_matrix = ibatch[self.data_names.index('question_align_matrix')] out['question_align_matrix'] = torch.stack(tuple((clip_pad_2d(m, (max_q_align_length, max_question_length), pad=0) for m in q_align_matrix)), dim=0) else: out['question'] = clip_pad_2d(question, (max_question_length, len(question[0])), pad=(- 2)) if ('question_align_matrix' in self.data_names): q_align_matrix = ibatch[self.data_names.index('question_align_matrix')] out['question_align_matrix'] = clip_pad_2d(q_align_matrix, (max_q_align_length, max_question_length), pad=0) if ('answer' in self.data_names): answer = ibatch[self.data_names.index('answer')] out['answer'] = clip_pad_2d(answer, (max_answer_length, len(answer[0])), pad=(- 2)) if ('answer_choices' in self.data_names): answer_choices = ibatch[self.data_names.index('answer_choices')] out['answer_choices'] = torch.stack(tuple((clip_pad_2d(answer, (max_answer_length, len(answer[0])), pad=(- 2)) for answer in answer_choices)), dim=0) if ('answer_align_matrix' in self.data_names): a_align_matrix = ibatch[self.data_names.index('answer_align_matrix')] if (isinstance(a_align_matrix, list) or (a_align_matrix.dim() == 3)): out['answer_align_matrix'] = torch.stack(tuple((clip_pad_2d(m, (max_a_align_length, max_answer_length), pad=0) for m in a_align_matrix)), dim=0) elif (a_align_matrix.dim() == 2): out['answer_align_matrix'] = clip_pad_2d(a_align_matrix, (max_a_align_length, max_answer_length), pad=0) if ('rationale' in self.data_names): rationale = ibatch[self.data_names.index('rationale')] out['rationale'] = clip_pad_2d(rationale, (max_rationale_length, len(rationale[0])), pad=(- 2)) if ('rationale_choices' in self.data_names): rationale_choices = ibatch[self.data_names.index('rationale_choices')] out['rationale_choices'] = torch.stack(tuple((clip_pad_2d(rationale, (max_rationale_length, len(rationale[0])), pad=(- 2)) for rationale in rationale_choices)), dim=0) if ('rationale_align_matrix' in self.data_names): r_align_matrix = ibatch[self.data_names.index('rationale_align_matrix')] if (isinstance(r_align_matrix, list) or (r_align_matrix.dim() == 3)): out['rationale_align_matrix'] = torch.stack(tuple((clip_pad_2d(m, (max_r_align_length, max_rationale_length), pad=0) for m in r_align_matrix)), dim=0) elif (r_align_matrix.dim() == 2): out['rationale_align_matrix'] = clip_pad_2d(r_align_matrix, (max_r_align_length, max_rationale_length), pad=0) if ('answer_label' in self.data_names): out['answer_label'] = ibatch[self.data_names.index('answer_label')] if ('rationale_label' in self.data_names): out['rationale_label'] = ibatch[self.data_names.index('rationale_label')] out['im_info'] = ibatch[self.data_names.index('im_info')] batch[i] = tuple((out[data_name] for data_name in self.data_names)) if self.append_ind: batch[i] += (torch.tensor(i, dtype=torch.int64),) out_tuple = () for items in zip(*batch): if isinstance(items[0], torch.Tensor): out_tuple += (torch.stack(tuple(items), dim=0),) else: out_tuple += (list(items),) return out_tuple
def train(net, X, lbls, train_idx, optimizer, epoch): net.train() st = time.time() optimizer.zero_grad() outs = net(X) (outs, lbls) = (outs[train_idx], lbls[train_idx]) loss = F.cross_entropy(outs, lbls) loss.backward() optimizer.step() print(f'Epoch: {epoch}, Time: {(time.time() - st):.5f}s, Loss: {loss.item():.5f}') return loss.item()
class QLinear_o(nn.Linear): def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False, measure=False): super(QLinear_o, self).__init__(in_features, out_features, bias) self.num_bits = num_bits self.num_bits_weight = (num_bits_weight or num_bits) self.num_bits_grad = num_bits_grad self.biprecision = biprecision self.quantize_input = QuantMeasure(self.num_bits, measure=measure) self.measure = measure def forward(self, input): qinput = self.quantize_input(input) weight_qparams = calculate_qparams(self.weight, num_bits=self.num_bits_weight, flatten_dims=(1, (- 1)), reduce_dim=None) qweight = (quantize(self.weight, qparams=weight_qparams) if (not self.measure) else self.weight) if (self.bias is not None): qbias = (self.bias if self.measure else quantize(self.bias, num_bits=(self.num_bits_weight + self.num_bits), flatten_dims=(0, (- 1)))) else: qbias = None if ((not self.biprecision) or (self.num_bits_grad is None)): output = F.linear(qinput, qweight, qbias) if (self.num_bits_grad is not None): output = quantize_grad(output, num_bits=self.num_bits_grad) else: output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) return output
def create_dataset(dataset_opt): mode = dataset_opt['mode'] if (mode == 'LR'): from data.LR_dataset import LRDataset as D elif (mode == 'LQGT'): from data.LQGT_dataset import LQGTDataset as D else: raise NotImplementedError('Dataset [{:s}] is not recognized.'.format(mode)) dataset = D(dataset_opt) logger = logging.getLogger('base') logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__, dataset_opt['name'])) return dataset
def add_NNServiceServicer_to_server(servicer, server): rpc_method_handlers = {'train': grpc.unary_unary_rpc_method_handler(servicer.train, request_deserializer=nn__service__pb2.TrainRequest.FromString, response_serializer=nn__service__pb2.TrainResponse.SerializeToString), 'evaluate': grpc.unary_unary_rpc_method_handler(servicer.evaluate, request_deserializer=nn__service__pb2.EvaluateRequest.FromString, response_serializer=nn__service__pb2.EvaluateResponse.SerializeToString), 'predict': grpc.unary_unary_rpc_method_handler(servicer.predict, request_deserializer=nn__service__pb2.PredictRequest.FromString, response_serializer=nn__service__pb2.PredictResponse.SerializeToString), 'upload_meta': grpc.unary_unary_rpc_method_handler(servicer.upload_meta, request_deserializer=nn__service__pb2.UploadMetaRequest.FromString, response_serializer=nn__service__pb2.UploadMetaResponse.SerializeToString), 'upload_file': grpc.stream_unary_rpc_method_handler(servicer.upload_file, request_deserializer=nn__service__pb2.ByteChunk.FromString, response_serializer=nn__service__pb2.UploadMetaResponse.SerializeToString), 'save_server_model': grpc.unary_unary_rpc_method_handler(servicer.save_server_model, request_deserializer=nn__service__pb2.SaveModelRequest.FromString, response_serializer=nn__service__pb2.SaveModelResponse.SerializeToString), 'load_server_model': grpc.unary_unary_rpc_method_handler(servicer.load_server_model, request_deserializer=nn__service__pb2.LoadModelRequest.FromString, response_serializer=nn__service__pb2.LoadModelResponse.SerializeToString)} generic_handler = grpc.method_handlers_generic_handler('nn.NNService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
class FlaxViTModelTester(unittest.TestCase): def __init__(self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = ViTConfig(image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range) return (config, pixel_values) def create_and_check_model(self, config, pixel_values, labels): model = FlaxViTModel(config=config) result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, (num_patches + 1), self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, pixel_values) = config_and_inputs inputs_dict = {'pixel_values': pixel_values} return (config, inputs_dict)
class TrainerMemoryTracker(): stages = {'__init__': 'init', 'train': 'train', 'evaluate': 'eval', 'predict': 'test'} def __init__(self, skip_memory_metrics=False): self.skip_memory_metrics = skip_memory_metrics if (not is_psutil_available()): self.skip_memory_metrics = True if self.skip_memory_metrics: return import psutil if is_torch_cuda_available(): import torch self.torch = torch self.gpu = {} else: self.torch = None self.process = psutil.Process() self.cur_stage = None self.cpu = {} self.init_reported = False def derive_stage(self): caller = inspect.currentframe().f_back.f_back.f_code.co_name if (caller in self.stages): return self.stages[caller] else: raise ValueError(f'was called from {caller}, but only expect to be called from one of {self.stages.keys()}') def cpu_mem_used(self): return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_mem_used_peak = (- 1) while True: self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) if (not self.peak_monitoring): break def start(self): if self.skip_memory_metrics: return stage = self.derive_stage() if ((self.cur_stage is not None) and (self.cur_stage != stage)): return self.cur_stage = stage gc.collect() if (self.torch is not None): self.torch.cuda.reset_peak_memory_stats() self.torch.cuda.empty_cache() if (self.torch is not None): self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() self.cpu_mem_used_at_start = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() def stop(self, stage): if ((self.cur_stage is not None) and (self.cur_stage != stage)): return self.peak_monitoring = False gc.collect() if (self.torch is not None): self.torch.cuda.empty_cache() if (self.torch is not None): self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() self.gpu[self.cur_stage] = dict(begin=self.gpu_mem_used_at_start, end=self.gpu_mem_used_now, alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start), peaked=max(0, (self.gpu_mem_used_peak - self.gpu_mem_used_now))) self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = dict(begin=self.cpu_mem_used_at_start, end=self.cpu_mem_used_now, alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start), peaked=max(0, (self.cpu_mem_used_peak - self.cpu_mem_used_now))) self.cur_stage = None def update_metrics(self, stage, metrics): if self.skip_memory_metrics: return if ((self.cur_stage is not None) and (self.cur_stage != stage)): return stages = [stage] if (not self.init_reported): stages.insert(0, 'init') self.init_reported = True for stage in stages: for t in ['alloc', 'peaked']: if ((stage in self.cpu) and (t in self.cpu[stage])): metrics[f'{stage}_mem_cpu_{t}_delta'] = self.cpu[stage][t] if ((self.torch is not None) and (stage in self.gpu) and (t in self.gpu[stage])): metrics[f'{stage}_mem_gpu_{t}_delta'] = self.gpu[stage][t] if (stages[0] == 'init'): metrics['before_init_mem_cpu'] = self.cpu['init']['begin'] if (self.torch is not None): metrics['before_init_mem_gpu'] = self.gpu['init']['begin'] def stop_and_update_metrics(self, metrics=None): if self.skip_memory_metrics: return stage = self.derive_stage() self.stop(stage) if (metrics is not None): self.update_metrics(stage, metrics)
def truncate_or_pad(sequence, block_size, pad_token_id): if (len(sequence) > block_size): return sequence[:block_size] else: sequence.extend(([pad_token_id] * (block_size - len(sequence)))) return sequence
class DistributedArguments(): multinode: bool = field(default=False, metadata={'help': 'Whether to use the mutltinode mode.'}) worker: str = field(default=None, metadata={'help': 'List of node ip addressesg, using comma to split.'}) task_index: int = field(default=0, metadata={'help': 'Worker index, and worker with index 0 as the main worker for more works, such as saving model.'})
class planarDissipativeForce(planarForce): def __init__(self, amp, ro=None, vo=None, amp_units=None): planarForce.__init__(self, amp=amp, ro=ro, vo=vo) _physical_input _conversion('force', pop=True) def Rforce(self, R, phi=0.0, t=0.0, v=None): return self._Rforce_nodecorator(R, phi=phi, t=t, v=v) def _Rforce_nodecorator(self, R, phi=0.0, t=0.0, v=None): try: return (self._amp * self._Rforce(R, phi=phi, t=t, v=v)) except AttributeError: from .Potential import PotentialError raise PotentialError("'_Rforce' function not implemented for this planarDissipativeForce") _physical_input _conversion('force', pop=True) def phitorque(self, R, phi=0.0, t=0.0, v=None): return self._phitorque_nodecorator(R, phi=phi, t=t, v=v) def _phitorque_nodecorator(self, R, phi=0.0, t=0.0, v=None): try: return (self._amp * self._phitorque(R, phi=phi, t=t, v=v)) except AttributeError: if self.isNonAxi: from .Potential import PotentialError raise PotentialError("'_phitorque' function not implemented for this DissipativeForce") return 0.0
def conv_layer(inDim, outDim, ks, s, p, norm_layer='none'): conv = nn.Conv2d(inDim, outDim, kernel_size=ks, stride=s, padding=p) relu = nn.ReLU(True) assert (norm_layer in ('batch', 'instance', 'none')) if (norm_layer == 'none'): seq = nn.Sequential(*[conv, relu]) else: if (norm_layer == 'instance'): norm = nn.InstanceNorm2d(outDim, affine=False, track_running_stats=False) else: momentum = 0.1 norm = nn.BatchNorm2d(outDim, momentum=momentum, affine=True, track_running_stats=True) seq = nn.Sequential(*[conv, norm, relu]) return seq
def quaddobl_start_diagonal_cascade(gamma=0, tasks=0): from phcpy.phcpy2c3 import py2c_create_quaddobl_homotopy from phcpy.phcpy2c3 import py2c_create_quaddobl_homotopy_with_gamma from phcpy.phcpy2c3 import py2c_solve_by_quaddobl_homotopy_continuation from phcpy.phcpy2c3 import py2c_solcon_clear_quaddobl_solutions from phcpy.phcpy2c3 import py2c_syscon_clear_quaddobl_system from phcpy.phcpy2c3 import py2c_copy_quaddobl_target_solutions_to_container from phcpy.phcpy2c3 import py2c_copy_quaddobl_target_system_to_container from phcpy.interface import load_quaddobl_solutions from phcpy.interface import load_quaddobl_system if (gamma == 0): py2c_create_quaddobl_homotopy() else: py2c_create_quaddobl_homotopy_with_gamma(gamma.real, gamma.imag) py2c_solve_by_quaddobl_homotopy_continuation(tasks) py2c_solcon_clear_quaddobl_solutions() py2c_syscon_clear_quaddobl_system() py2c_copy_quaddobl_target_solutions_to_container() py2c_copy_quaddobl_target_system_to_container() tsys = load_quaddobl_system() sols = load_quaddobl_solutions() return (tsys, sols)
def test__init_custom(): cnn = CNN(model_config=CustomModel(model=EfficientNet(), transform=EfficientNet.transform, name=EfficientNet.name)) assert (cnn.model_config.name == EfficientNet.name) cnn = CNN(model_config=CustomModel(model=ViT(), transform=ViT.transform, name=ViT.name)) assert (cnn.model_config.name == ViT.name)
class DoubleConv(torch.nn.Module): def __init__(self, in_channels, out_channels, mid_channels=None): super(DoubleConv, self).__init__() if (not mid_channels): mid_channels = out_channels self.double_conv = torch.nn.Sequential(torch.nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), torch.nn.BatchNorm2d(mid_channels), torch.nn.ReLU(True), torch.nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), torch.nn.BatchNorm2d(out_channels), torch.nn.ReLU(True)) def forward(self, x): return self.double_conv(x)
def create_plane(location: Tuple[(float, float, float)]=(0.0, 0.0, 0.0), rotation: Tuple[(float, float, float)]=(0.0, 0.0, 0.0), size: float=2.0, name: Optional[str]=None) -> bpy.types.Object: bpy.ops.mesh.primitive_plane_add(size=size, location=location, rotation=rotation) current_object = bpy.context.object if (name is not None): current_object.name = name return current_object
class CosineDistance(Distance): def __init__(self, reference_point: []): self.reference_point = reference_point def get_distance(self, list1: [], list2: []): total = sum(numpy.multiply([(x - r) for (x, r) in zip(list1, self.reference_point)], [(y - r) for (y, r) in zip(list2, self.reference_point)])) a = distance.cosine([(x - y) for (x, y) in zip(list1, self.reference_point)], [(x - y) for (x, y) in zip(list2, self.reference_point)]) b = (total / (self.__sum_of_distances_to_reference_point(list1) * self.__sum_of_distances_to_reference_point(list2))) return b def __sum_of_distances_to_reference_point(self, l: []): return sum([pow((x - y), 2.0) for (x, y) in zip(l, self.reference_point)])
class LanguagePairDataset(FairseqDataset): def __init__(self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, max_source_positions=1024, max_target_positions=1024, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False): if (tgt_dict is not None): assert (src_dict.pad() == tgt_dict.pad()) assert (src_dict.eos() == tgt_dict.eos()) assert (src_dict.unk() == tgt_dict.unk()) self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = (np.array(tgt_sizes) if (tgt_sizes is not None) else None) self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target def __getitem__(self, index): tgt_item = (self.tgt[index] if (self.tgt is not None) else None) src_item = self.src[index] if self.append_eos_to_target: eos = (self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()) if (self.tgt and (self.tgt[index][(- 1)] != eos)): tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.remove_eos_from_source: eos = self.src_dict.eos() if (self.src[index][(- 1)] == eos): src_item = self.src[index][:(- 1)] return {'id': index, 'source': src_item, 'target': tgt_item} def __len__(self): return len(self.src) def collater(self, samples): return collate(samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(), left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding) def num_tokens(self, index): return max(self.src_sizes[index], (self.tgt_sizes[index] if (self.tgt_sizes is not None) else 0)) def size(self, index): return (self.src_sizes[index], (self.tgt_sizes[index] if (self.tgt_sizes is not None) else 0)) def ordered_indices(self): if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) if (self.tgt_sizes is not None): indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')] return indices[np.argsort(self.src_sizes[indices], kind='mergesort')] def supports_prefetch(self): return (getattr(self.src, 'supports_prefetch', False) and (getattr(self.tgt, 'supports_prefetch', False) or (self.tgt is None))) def prefetch(self, indices): self.src.prefetch(indices) if (self.tgt is not None): self.tgt.prefetch(indices)
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, n_class, do_lower_case, output_mode, is_multi_choice=True): print('#examples', len(examples)) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i if is_multi_choice: features = [[]] else: features = [] for (ex_index, example) in enumerate(examples): if ((ex_index % 10000) == 0): logger.info(('Writing example %d of %d' % (ex_index, len(examples)))) tokens_a = tokenizer.tokenize((example.text_a.lower() if do_lower_case else example.text_a)) tokens_b = None tokens_c = None if example.text_b: tokens_b = tokenizer.tokenize((example.text_b.lower() if do_lower_case else example.text_b)) if example.text_c: tokens_c = tokenizer.tokenize((example.text_c.lower() if do_lower_case else example.text_c)) if tokens_c: _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, (max_seq_length - 4)) tokens_b = ((tokens_c + ['[SEP]']) + tokens_b) elif tokens_b: _truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3)) elif (len(tokens_a) > (max_seq_length - 2)): tokens_a = tokens_a[0:(max_seq_length - 2)] tokens = ((['[CLS]'] + tokens_a) + ['[SEP]']) segment_ids = ((len(tokens_a) + 2) * [0]) if tokens_b: tokens += (tokens_b + ['[SEP]']) segment_ids += ([1] * (len(tokens_b) + 1)) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = ([1] * len(input_ids)) pad_length = (max_seq_length - len(input_ids)) input_ids += ([0] * pad_length) input_mask += ([0] * pad_length) segment_ids += ([0] * pad_length) assert (len(input_ids) == max_seq_length) assert (len(input_mask) == max_seq_length) assert (len(segment_ids) == max_seq_length) if (output_mode in ['classification', 'multi-choice']): label_id = label_map[example.label] elif (output_mode == 'regression'): label_id = float(example.label) else: raise KeyError(output_mode) if is_multi_choice: features[(- 1)].append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) if (len(features[(- 1)]) == n_class): features.append([]) else: features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) if is_multi_choice: if (len(features[(- 1)]) == 0): features = features[:(- 1)] print('#features', len(features)) return features
def scalability(): threads = np.array([1, 2, 4, 8, 16, 32], dtype=np.float64) times = np.empty_like(threads) for (i, j) in enumerate(threads): data = read_only_json_in_dir(f'output_parallel/mt{int(j)}') times[i] = data['time_solve'] (fig, ax) = plt.subplots(figsize=(8, 5)) ax.set_xticks([1, 8, 16, 32]) ax.plot(threads, times, '-o', label='SANM Solving Time') idealx = np.linspace(threads[0], threads[(- 1)], 100) ax.plot(idealx, (times[0] / idealx), '--', label='Ideal Parallelism') ax.set_ylim((0, (np.max(times) * 1.1))) ax.grid(axis='y') ax.set_xlabel('# threads') ax.set_ylabel('Time (seconds)') ax.legend(loc='best', fancybox=True, framealpha=0.9, borderpad=1, frameon=True) fig.tight_layout() fig.savefig('output/scalability.pdf', metadata={'CreationDate': None})
def accuracy(output, labels, batch=False): preds = output.max(1)[1].type_as(labels) correct = preds.eq(labels).double() correct = correct.sum() if (batch == True): return correct return (correct / len(labels))
class FashionMNIST(DATASET): _target_: str = 'dataset_loaders.load_fashion_mnist' name: str = 'FashionMNIST' IN_CHANNEL: int = 1 N_CLASSES: int = 10 IMG_SIZE: Tuple[int] = field(default_factory=(lambda : (28, 28)))
def _process_image(directory, split, name): filename = os.path.join(directory, 'image_2', (name + '.png')) image_data = tf.gfile.FastGFile(filename, 'r').read() img = cv2.imread(filename) shape = np.shape(img) label_list = [] type_list = [] bbox_x1_list = [] bbox_y1_list = [] bbox_x2_list = [] bbox_y2_list = [] if re.findall('train', split): filename = os.path.join(directory, 'label_2', (name + '.txt')) with open(filename) as anno_file: objects = anno_file.readlines() for object in objects: obj_anno = object.split(' ') type_txt = obj_anno[0].encode('ascii') if (type_txt in CLASSES): label_list.append(CLASSES[type_txt]) type_list.append(type_txt) bbox_x1 = float(obj_anno[4]) bbox_y1 = float(obj_anno[5]) bbox_x2 = float(obj_anno[6]) bbox_y2 = float(obj_anno[7]) bbox_x1_list.append(bbox_x1) bbox_y1_list.append(bbox_y1) bbox_x2_list.append(bbox_x2) bbox_y2_list.append(bbox_y2) image_format = b'PNG' example = tf.train.Example(features=tf.train.Features(feature={'image/encoded': bytes_feature(image_data), 'image/height': int64_feature(shape[0]), 'image/width': int64_feature(shape[1]), 'image/channels': int64_feature(shape[2]), 'image/shape': int64_feature(shape), 'image/object/bbox/xmin': float_feature(bbox_x1_list), 'image/object/bbox/xmax': float_feature(bbox_x2_list), 'image/object/bbox/ymin': float_feature(bbox_y1_list), 'image/object/bbox/ymax': float_feature(bbox_y2_list), 'image/object/bbox/label': int64_feature(label_list), 'image/object/bbox/label_text': bytes_feature(type_list)})) return example
class Scorer(object): def __init__(self, args): self.data = {'src': self.load_text_file(args.source), 'tgt': self.load_text_file(args.target)} self.data_type = args.data_type self.eval_latency_unit = args.eval_latency_unit self.sacrebleu_tokenizer = args.sacrebleu_tokenizer self.no_space = args.no_space if ((self.data_type == 'speech') and (self.eval_latency_unit == 'char')): logger.error('Character level latency for speech-to-text model is not supported at the moment. We will update this feature very soon.') sys.exit(1) logger.info(f'Evaluating on {self.data_type}') logger.info(f'Source: {os.path.abspath(args.source)}') logger.info(f'Target: {os.path.abspath(args.target)}') logger.info(f'Number of sentences: {len(self)}') self.instances = {} if (self.data_type == 'text'): self.instance_class = TextInstance elif (self.data_type == 'speech'): self.instance_class = AudioInstance else: if (self.data_type is None): logger.error('Please specify the data type (text or speech).\n') else: logger.error(f'''{self.data_type} is not supported, please choose from text or speech. ''') sys.exit(1) self.reset() def get_info(self): return {'num_sentences': len(self), 'data_type': self.data_type} def send_src(self, instance_id, segment_size): dict_to_return = self.instances[instance_id].send_src(segment_size=segment_size) dict_to_return['instance_id'] = instance_id return dict_to_return def recv_hyp(self, instance_id, list_of_tokens): self.instances[instance_id].recv_hypo(list_of_tokens, self.eval_latency_unit) def reset(self): if (len(self.instances) > 0): logger.warning('Resetting scorer') for (i, (src, tgt)) in enumerate(zip(self.data['src'], self.data['tgt'])): self.instances[i] = self.instance_class(i, src, tgt, self.eval_latency_unit) def gather_translation(self): not_finish_write_id = [i for i in range(len(self)) if (not self.instances[i].finish_hypo)] empty_hypo_id = [str(i) for i in range(len(self)) if (len(self.instances[i].prediction(no_space=self.no_space)) == 0)] if (len(not_finish_write_id) > 0): print("Warning: these hypothesis don't have EOS in predictions", file=sys.stderr) print(', '.join((str(x) for x in not_finish_write_id)), file=sys.stderr) for idx in not_finish_write_id: self.instances[idx].sentence_level_eval() if (len(empty_hypo_id) > 0): print('Warning: these hypothesis are empty', file=sys.stderr) print(', '.join(empty_hypo_id), file=sys.stderr) translations = [self.instances[i].prediction(eos=False, no_space=self.no_space) for i in range(len(self))] return translations def get_quality_score(self): translations = self.gather_translation() try: bleu_score = sacrebleu.corpus_bleu(translations, [self.data['tgt']], tokenize=self.sacrebleu_tokenizer).score except Exception as e: print(e, file=sys.stderr) bleu_score = 0 return {'BLEU': bleu_score} def get_latency_score(self): results = {} for metric in ['AL', 'AP', 'DAL']: results[metric] = mean([seg.metrics['latency'][metric] for seg in self.instances.values()]) if ('latency_ca' in self.instances[0].metrics): results[(metric + '_CA')] = mean([seg.metrics['latency_ca'][metric] for seg in self.instances.values()]) return results def score(self): return {'Quality': self.get_quality_score(), 'Latency': self.get_latency_score()} def load_text_file(file, split=False): with open(file) as f: if split: return [r.strip().split() for r in f] else: return [r.strip() for r in f] def __len__(self): return len(self.data['tgt'])
def main(): parser = argparse.ArgumentParser() parser.add_argument('--max_epsilon', default=32.0, type=float, help='Maximum size of adversarial perturbation.') parser.add_argument('--num_iter', default=10, type=int, help='Number of iterations.') parser.add_argument('--batch_size', default=256, type=int, help='How many images process at one time.') parser.add_argument('--momentum', default=1.0, type=float, help='Momentum.') parser.add_argument('--dataset', type=str, default='cifar') parser.add_argument('--start', type=int, default=0) parser.add_argument('--end', type=int, default=100) parser.add_argument('--n_iter', type=int, default=1000) parser.add_argument('--transfer', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--sweep', action='store_true') parser.add_argument('--wandb', action='store_true', default=False, help='Use wandb for logging') parser.add_argument('--ensemble_adv_trained', action='store_true') parser.add_argument('--test_batch_size', type=int, default=32, metavar='S') parser.add_argument('--train_set', default='test', choices=['train_and_test', 'test', 'train'], help='add the test set in the training set') parser.add_argument('--modelIn', type=str, default='../pretrained_classifiers/cifar/res18/model_0.pt') parser.add_argument('--robust_model_path', type=str, default='../madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt') parser.add_argument('--dir_test_models', type=str, default='../', help='The path to the directory containing the classifier models for evaluation.') parser.add_argument('--max_test_model', type=int, default=2, help='The maximum number of pretrained classifiers to use for testing.') parser.add_argument('--train_on_madry', default=False, action='store_true', help='Train using Madry tf grad') parser.add_argument('--train_on_list', default=False, action='store_true', help='train on a list of classifiers') parser.add_argument('--attack_ball', type=str, default='Linf', choices=['L2', 'Linf']) parser.add_argument('--source_arch', default='res18', help='The architecture we want to attack on CIFAR.') parser.add_argument('--target_arch', default=None, help='The architecture we want to blackbox transfer to on CIFAR.') parser.add_argument('--transform_prob', type=float, default=0.5, metavar='M', help='Randomly apply input Transformation') parser.add_argument('--resize_factor', type=float, default=1.1, metavar='M', help='Resize Factor for Random Resizing') parser.add_argument('--epsilon', type=float, default=0.1, metavar='M', help='Epsilon for Delta (default: 0.1)') parser.add_argument('--train_with_critic_path', type=str, default=None, help='Train generator with saved critic model') parser.add_argument('--model', help='path to model') parser.add_argument('--adv_models', nargs='*', help='path to adv model(s)') parser.add_argument('--type', type=int, default=0, help='Model type (default: 0)') parser.add_argument('--namestr', type=str, default='NoBox', help='additional info in output filename to describe experiments') args = parser.parse_args() args.dev = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) (train_loader, test_loader) = create_loaders(args, root='../data') if os.path.isfile('../settings.json'): with open('../settings.json') as f: data = json.load(f) args.wandb_apikey = data.get('wandbapikey') if args.wandb: os.environ['WANDB_API_KEY'] = args.wandb_apikey wandb.init(project='NoBox-sweeps', name='AutoAttack-{}'.format(args.dataset)) adv_models = None if (args.dataset == 'cifar'): (args.nc, args.h, args.w) = (3, 32, 32) (model, l_test_classif_paths) = load_all_classifiers(args, load_archs=[args.source_arch]) model_type = args.source_arch if (args.target_arch is not None): (model_target, l_test_classif_paths) = load_all_classifiers(args, load_archs=[args.target_arch]) model_type = args.target_arch del model_target torch.cuda.empty_cache() if args.ensemble_adv_trained: adv_model_names = args.adv_models l_test_classif_paths = [] adv_models = ([None] * len(adv_model_names)) for i in range(len(adv_model_names)): adv_path = os.path.join(args.dir_test_models, 'pretrained_classifiers', args.dataset, 'ensemble_adv_trained', (adv_model_names[i] + '.pt')) (init_func, _) = ARCHITECTURES[adv_model_names[i]] temp_model = init_func().to(args.dev) adv_models[i] = nn.DataParallel(temp_model) adv_models[i].load_state_dict(torch.load(adv_path)) l_test_classif_paths.append([adv_path]) model_type = 'Ensemble Adversarial' elif (args.dataset == 'mnist'): (args.nc, args.h, args.w) = (1, 28, 28) if (args.source_arch == 'natural'): (model, l_test_classif_paths) = load_all_classifiers(args, load_archs=['natural']) model_type = 'natural' elif ((args.source_arch == 'ens_adv') or args.ensemble_adv_trained): adv_model_names = args.adv_models adv_models = ([None] * len(adv_model_names)) for i in range(len(adv_model_names)): type = get_model_type(adv_model_names[i]) adv_models[i] = load_model(args, adv_model_names[i], type=type).to(args.dev) path = os.path.join(args.dir_test_models, 'pretrained_classifiers', args.dataset, 'ensemble_adv_trained', args.model) (model, l_test_classif_paths) = load_all_classifiers(args, load_archs=['natural']) l_test_classif_paths = [path] model_type = 'Ensemble Adversarial' model.to(args.dev) model.eval() print(('Testing on %d Test Classifiers with Source Model %s' % (len(l_test_classif_paths), args.source_arch))) l = [x.unsqueeze(0) for (x, y) in test_loader.dataset] x_test = torch.cat(l, 0).to(args.dev) l = [y for (x, y) in test_loader.dataset] y_test = torch.Tensor(l).long().to(args.dev) device_count = torch.cuda.device_count() if (device_count > 1): print(('CUDA Device Count is %d, Error might happen. Use export CUDA_VISIBLE_DEVICES=0' % device_count)) stack_kernel_list = [] for i in range(0, args.nc): kernel = gkern(15, 3).astype(np.float32) stack_kernel = np.stack([kernel, kernel, kernel]).swapaxes(2, 0) stack_kernel_list.append(np.expand_dims(stack_kernel, 3)) stack_kernel = np.stack(stack_kernel_list).squeeze() stack_kernel = torch.tensor(stack_kernel).permute(0, 3, 1, 2).to(args.dev) attacker = TIM(args, stack_kernel=stack_kernel, model=model, attack_ball=args.attack_ball, eps=args.epsilon, n_iter=args.n_iter, decay_factor=args.momentum, eps_iter=0.01) advcorrect = 0 with ctx_noparamgrad_and_eval(model): adv_complete_list = [] if (args.dataset == 'cifar'): for (batch_idx, (x_batch, y_batch)) in enumerate(test_loader): if (((batch_idx + 1) * args.test_batch_size) > args.batch_size): break (x_batch, y_batch) = (x_batch.to(args.dev), y_batch.to(args.dev)) adv_complete_list.append(attacker.perturb(x_batch, y_batch)) adv_complete = torch.cat(adv_complete_list) else: adv_complete = attacker.perturb(x_test[:args.batch_size], y_test[:args.batch_size]) if args.transfer: adv_img_list = [] y_orig = y_test[:args.batch_size] for i in range(0, len(adv_complete)): adv_img_list.append([adv_complete[i].unsqueeze(0), y_orig[i]]) del model torch.cuda.empty_cache() baseline_transfer(args, attacker, 'TI-DI-Attack', model_type, adv_img_list, l_test_classif_paths, adv_models)
def get_lr_policy(lr_schedule): d = {'constant': constant_schedule, 'cosine': cosine_schedule, 'step': step_schedule} return d[lr_schedule]
def is_private(estimator): return isinstance(estimator, (DPExplainableBoostingClassifier, DPExplainableBoostingRegressor))
class positive_odd_int_or_none(_ParseType): _none def __call__(self, string: str) -> (int | None): num = int(string) if ((num <= 0) or (not (num % 2))): msg = f"'{string}' needs to be a positive odd integer." raise argparse.ArgumentTypeError(msg) return num
def update_neural_insights_workload_accuracy_data(workload_uuid: str, baseline_accuracy: float, optimized_accuracy: float) -> None: try: from neural_insights import NeuralInsights from neural_insights.utils.consts import WORKDIR_LOCATION neural_insights = NeuralInsights(workdir_location=WORKDIR_LOCATION) neural_insights.update_workload_accuracy_data(workload_uuid, baseline_accuracy, optimized_accuracy) except ImportError: logger.info('Neural Insights not found.') except Exception as err: logger.warning(f'Could not update workload accuracy data: {err}.')
(name='test_batting_stats_html') def _test_batting_stats_html(get_data_file_contents: Callable[([str], str)]) -> str: return get_data_file_contents('batting_leaders.html')
class ControlClass(ABC): def reset(self): pass def step(self, state: np.ndarray, setpoint: np.ndarray) -> np.ndarray: pass
class CariSegmentation(BaseDataset): NUM_CLASS = 11 def __init__(self, root='dataset/cari/', split='train', mode=None, transform=None, target_transform=None): super(CariSegmentation, self).__init__(root, split, mode, transform, target_transform, base_size=256, crop_size=256) _mask_dir = os.path.join(root, 'label') _image_dir = os.path.join(root, 'image') if (self.mode == 'train'): _split_f = os.path.join(root, 'train.txt') elif (self.mode == 'val'): _split_f = os.path.join(root, 'val.txt') elif (self.mode == 'testval'): _split_f = os.path.join(root, 'val.txt') elif (self.mode == 'test'): _split_f = os.path.join(root, 'test.txt') else: raise RuntimeError('Unknown dataset split.') self.images = [] self.masks = [] self.names = [] self.crop_size_h = self.crop_size self.crop_size_w = self.crop_size with open(os.path.join(_split_f), 'r') as lines: for line in tqdm(lines): _image = os.path.join(_image_dir, ((self.split + '/') + line.rstrip('\n'))) assert os.path.isfile(_image) self.images.append(_image) self.names.append(line.rstrip('\n')) if (self.mode != 'test'): _mask = os.path.join(_mask_dir, (((self.split + '/') + line.rstrip('\n')[:(- 3)]) + 'png')) assert os.path.isfile(_mask) self.masks.append(_mask) if (self.mode != 'test'): assert (len(self.images) == len(self.masks)) def _val_sync_transform(self, img, mask): (w, h) = img.size oh = self.crop_size_h ow = int((((1.0 * w) * oh) / h)) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) (w, h) = img.size x1 = int(round(((w - self.crop_size_w) / 2.0))) y1 = int(round(((h - self.crop_size_h) / 2.0))) img = img.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h))) mask = mask.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h))) return (img, self._mask_transform(mask)) def _sync_transform(self, img, mask): if (random.random() < 0.5): img = img.transpose(Image.FLIP_LEFT_RIGHT) mask = mask.transpose(Image.FLIP_LEFT_RIGHT) mask = swap_N(mask, 2, 3) mask = swap_N(mask, 4, 5) short_size = random.randint(int((self.base_size * 0.5)), int((self.base_size * 2.0))) (w, h) = img.size oh = short_size ow = int((((1.0 * w) * oh) / h)) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) deg = random.uniform((- 10), 10) img = img.rotate(deg, resample=Image.BILINEAR) mask = mask.rotate(deg, resample=Image.NEAREST) if (oh < self.crop_size_h): padh = ((self.crop_size_h - oh) if (oh < self.crop_size_h) else 0) padw = ((self.crop_size_w - ow) if (ow < self.crop_size_w) else 0) img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) (w, h) = img.size x1 = random.randint(0, (w - self.crop_size_w)) y1 = random.randint(0, (h - self.crop_size_h)) img = img.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h))) mask = mask.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h))) if (random.random() < 0.5): img = img.filter(ImageFilter.GaussianBlur(radius=random.random())) return (img, self._mask_transform(mask)) def __getitem__(self, index): img = Image.open(self.images[index]).convert('RGB') if (self.mode == 'test'): if (self.transform is not None): img = self.transform(img) return (img, os.path.basename(self.images[index])) target = Image.open(self.masks[index]) img = img.resize((self.crop_size_w, self.crop_size_h), Image.BILINEAR) target = target.resize((self.crop_size_w, self.crop_size_h), Image.NEAREST) if (self.mode == 'train'): (img, target) = self._sync_transform(img, target) elif (self.mode == 'val'): (img, target) = self._val_sync_transform(img, target) else: assert (self.mode == 'testval') target = self._mask_transform(target) if (self.transform is not None): img = self.transform(img) if (self.target_transform is not None): target = self.target_transform(target) if (self.mode == 'testval'): return (img, target, self.names[index]) return (img, target) def __len__(self): return len(self.images)
class MLP(nn.Module): def __init__(self, num_classes): super(MLP, self).__init__() self.fc1 = nn.Linear(768, 100) self.relu1 = nn.Tanh() self.fc2 = nn.Linear(100, num_classes) def forward(self, x): x = self.fc1(x) x = self.relu1(x) x = self.fc2(x) output = torch.softmax(x, dim=1) return output
def _import_class_0(name): components = name.split('.') mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) return mod
def load_waveforms_from_paths(paths, sample_rate): progress_bar = tqdm(paths, desc='Loading waveforms...') return [Waveform(path=p, sample_rate=sample_rate) for p in progress_bar]
class ASPP(nn.Module): def __init__(self, C, depth, num_classes, conv=nn.Conv2d, norm=nn.BatchNorm2d, momentum=0.0003, mult=1, phase='train'): super(ASPP, self).__init__() self._C = C self._depth = depth self._num_classes = num_classes self.phase = phase self.global_pooling = nn.AdaptiveAvgPool2d(1) self.relu = nn.ReLU(inplace=True) self.aspp1 = conv(C, depth, kernel_size=1, stride=1, bias=False) self.aspp2 = conv(C, depth, kernel_size=3, stride=1, dilation=int((6 * mult)), padding=int((6 * mult)), bias=False) self.aspp3 = conv(C, depth, kernel_size=3, stride=1, dilation=int((12 * mult)), padding=int((12 * mult)), bias=False) self.aspp4 = conv(C, depth, kernel_size=3, stride=1, dilation=int((18 * mult)), padding=int((18 * mult)), bias=False) self.aspp5 = conv(C, depth, kernel_size=1, stride=1, bias=False) self.aspp1_bn = norm(depth, momentum) self.aspp2_bn = norm(depth, momentum) self.aspp3_bn = norm(depth, momentum) self.aspp4_bn = norm(depth, momentum) self.aspp5_bn = norm(depth, momentum) self.conv2 = conv((depth * 5), depth, kernel_size=1, stride=1, bias=False) self.bn2 = norm(depth, momentum) self.dropout = nn.Dropout2d(p=0.5) self.conv3 = nn.Conv2d(depth, num_classes, kernel_size=1, stride=1) def forward(self, x): x1 = self.aspp1(x) x1 = self.aspp1_bn(x1) x1 = self.relu(x1) x2 = self.aspp2(x) x2 = self.aspp2_bn(x2) x2 = self.relu(x2) x3 = self.aspp3(x) x3 = self.aspp3_bn(x3) x3 = self.relu(x3) x4 = self.aspp4(x) x4 = self.aspp4_bn(x4) x4 = self.relu(x4) x5 = self.global_pooling(x) x5 = self.aspp5(x5) x5 = self.aspp5_bn(x5) x5 = self.relu(x5) x5 = nn.Upsample((x.shape[2], x.shape[3]), mode='bilinear', align_corners=True)(x5) x = torch.cat((x1, x2, x3, x4, x5), 1) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) if (self.phase == 'test'): x = self.dropout(x) x = self.conv3(x) return x
class _ROIAlign(Function): def forward(ctx, input, rois, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(rois) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() output = C_ROIPooling.roi_align_forward(input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio) return output _differentiable def backward(ctx, grad_output): (rois,) = ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio (bs, ch, h, w) = ctx.input_shape grad_input = C_ROIPooling.roi_align_backward(grad_output, rois, spatial_scale, output_size[0], output_size[1], bs, ch, h, w, sampling_ratio) return (grad_input, None, None, None, None)
def reduce_model(model_path): from kito import reduce_keras_model from keras.models import load_model m = load_model(model_path) m_red = reduce_keras_model(m) m_red.save((model_path[:(- 3)] + '_reduced.h5'))
class PredictorFactory(object): def __init__(self, sess, model, towers): self.sess = sess self.model = model self.towers = towers self.tower_built = False def get_predictor(self, input_names, output_names, tower): if (not self.tower_built): self._build_predict_tower() tower = self.towers[(tower % len(self.towers))] raw_input_vars = get_tensors_by_names(input_names) output_names = [('{}{}/'.format(PREDICT_TOWER, tower) + n) for n in output_names] output_vars = get_tensors_by_names(output_names) return OnlinePredictor(self.sess, raw_input_vars, output_vars) def _build_predict_tower(self): tf.get_variable_scope().reuse_variables() with tf.name_scope(None), freeze_collection(SUMMARY_BACKUP_KEYS): fn = (lambda _: self.model.build_graph(self.model.get_input_vars())) build_multi_tower_prediction_graph(fn, self.towers) self.tower_built = True
def initialize_exp(params, *args, dump_params=True): if dump_params: pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb')) params.dump_checkpoints = os.path.join(params.dump_path, 'checkpoints') if ((not params.rank) and (not os.path.isdir(params.dump_checkpoints))): os.mkdir(params.dump_checkpoints) training_stats = PD_Stats(os.path.join(params.dump_path, (('stats' + str(params.rank)) + '.pkl')), args) logger = create_logger(os.path.join(params.dump_path, 'train.log'), rank=params.rank) logger.info(' Initialized logger ') logger.info('\n'.join((('%s: %s' % (k, str(v))) for (k, v) in sorted(dict(vars(params)).items())))) logger.info(('The experiment will be stored in %s\n' % params.dump_path)) logger.info('') return (logger, training_stats)
class modelClassifier(): def __init__(self): self.learning_rate = FIXED_PARAMETERS['learning_rate'] self.display_epoch_freq = 1 self.display_step = config.display_step self.eval_step = config.eval_step self.save_step = config.eval_step self.embedding_dim = FIXED_PARAMETERS['word_embedding_dim'] self.dim = FIXED_PARAMETERS['hidden_embedding_dim'] self.batch_size = FIXED_PARAMETERS['batch_size'] self.emb_train = FIXED_PARAMETERS['emb_train'] self.keep_rate = FIXED_PARAMETERS['keep_rate'] self.sequence_length = FIXED_PARAMETERS['seq_length'] self.config = config logger.Log(('Building model from %s.py' % model)) self.model = MyModel(self.config, seq_length=self.sequence_length, emb_dim=self.embedding_dim, hidden_dim=self.dim, embeddings=loaded_embeddings, emb_train=self.emb_train) self.global_step = self.model.global_step if config.use_lr_decay: self.learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, 1000, config.lr_decay_rate, staircase=True) tf.summary.scalar('learning_rate', self.learning_rate) if (not config.test): tvars = tf.trainable_variables() (grads, _) = tf.clip_by_global_norm(tf.gradients(self.model.total_cost, tvars), 1.0) if config.use_adagrad_optimizer: opt = tf.train.AdagradOptimizer(self.learning_rate) elif config.user_adadeltaOptimizer: opt = tf.train.AdadeltaOptimizer(self.learning_rate) elif config.use_yellow_fin_optimizer: opt = YFOptimizer(learning_rate=self.learning_rate, momentum=0.0) else: opt = tf.train.GradientDescentOptimizer(self.learning_rate) self.optimizer = opt.apply_gradients(zip(grads, tvars), global_step=self.global_step) self.tb_writer = tf.summary.FileWriter(config.tbpath) logger.Log('Initializing variables') self.init = tf.global_variables_initializer() self.sess = None self.saver = tf.train.Saver() '{"sentence1_part_of_speech_tagging": "MD PRP VB NNP NNP IN NNP .", "sentence1_binary_parse": "Can you use Vanilla Visa on Amazon ?", "sentence2_parse": "What are some problems with a Vanilla Visa ?", "sentence2_NER_feature": [], "sentence2_token_exact_match_with_s1": [6, 7, 8], "sentence2_binary_parse": "What are some problems with a Vanilla Visa ?", "pairID": "333035", "sentence2": "What are some problems with a Vanilla Visa?", "sentence1_parse": "Can you use Vanilla Visa on Amazon ?", "sentence1_NER_feature": [[6, 0]], "gold_label": "neutral", "sentence2_part_of_speech_tagging": "WP VBP DT NNS IN DT NNP NNP .", "sentence1_token_exact_match_with_s2": [3, 4, 7], "sentence1": "Can you use Vanilla Visa on Amazon?"}' def get_minibatch(self, dataset, start_index, end_index, training=False): indices = range(start_index, end_index) genres = [['quora'] for i in indices] labels = [dataset[i]['label'] for i in indices] pairIDs = np.array([dataset[i]['pairID'] for i in indices]) if (config.random_crop_or_pad_sentence_by_seqlen and training): premise_pad_crop_pair = generate_crop_pad_pairs([dataset[i]['sentence1_binary_parse_index_sequence'][:] for i in indices]) hypothesis_pad_crop_pair = generate_crop_pad_pairs([dataset[i]['sentence2_binary_parse_index_sequence'][:] for i in indices]) else: premise_pad_crop_pair = hypothesis_pad_crop_pair = ([(0, 0)] * len(indices)) premise_vectors = fill_feature_vector_with_cropping_or_padding([dataset[i]['sentence1_binary_parse_index_sequence'][:] for i in indices], premise_pad_crop_pair, 1) hypothesis_vectors = fill_feature_vector_with_cropping_or_padding([dataset[i]['sentence2_binary_parse_index_sequence'][:] for i in indices], hypothesis_pad_crop_pair, 1) premise_pos_vectors = generate_quora_pos_feature_tensor([dataset[i]['sentence1_part_of_speech_tagging'][:] for i in indices], premise_pad_crop_pair) hypothesis_pos_vectors = generate_quora_pos_feature_tensor([dataset[i]['sentence2_part_of_speech_tagging'][:] for i in indices], hypothesis_pad_crop_pair) premise_char_vectors = fill_feature_vector_with_cropping_or_padding([dataset[i]['sentence1_binary_parse_char_index'][:] for i in indices], premise_pad_crop_pair, 2, column_size=config.char_in_word_size) hypothesis_char_vectors = fill_feature_vector_with_cropping_or_padding([dataset[i]['sentence2_binary_parse_char_index'][:] for i in indices], hypothesis_pad_crop_pair, 2, column_size=config.char_in_word_size) premise_exact_match = construct_one_hot_feature_tensor([dataset[i]['sentence1_token_exact_match_with_s2'][:] for i in indices], premise_pad_crop_pair, 1) hypothesis_exact_match = construct_one_hot_feature_tensor([dataset[i]['sentence2_token_exact_match_with_s1'][:] for i in indices], hypothesis_pad_crop_pair, 1) premise_exact_match = np.expand_dims(premise_exact_match, 2) hypothesis_exact_match = np.expand_dims(hypothesis_exact_match, 2) premise_inverse_term_frequency = hypothesis_inverse_term_frequency = np.zeros((len(indices), config.seq_length, 1)) premise_antonym_feature = hypothesis_antonym_feature = premise_inverse_term_frequency premise_NER_feature = construct_one_hot_feature_tensor([dataset[i]['sentence1_NER_feature'][:] for i in indices], premise_pad_crop_pair, 2, 7) hypothesis_NER_feature = construct_one_hot_feature_tensor([dataset[i]['sentence2_NER_feature'][:] for i in indices], hypothesis_pad_crop_pair, 2, 7) return (premise_vectors, hypothesis_vectors, labels, genres, premise_pos_vectors, hypothesis_pos_vectors, pairIDs, premise_char_vectors, hypothesis_char_vectors, premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature) def train(self, train_quora, dev_quora): sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) self.sess.run(self.init) self.step = 0 self.epoch = 0 self.best_dev_mat = 0.0 self.best_mtrain_acc = 0.0 self.last_train_acc = [0.001, 0.001, 0.001, 0.001, 0.001] self.best_step = 0 self.train_dev_set = False ckpt_file = (os.path.join(FIXED_PARAMETERS['ckpt_path'], modname) + '.ckpt') if os.path.isfile((ckpt_file + '.meta')): if os.path.isfile((ckpt_file + '_best.meta')): self.saver.restore(self.sess, (ckpt_file + '_best')) self.completed = False (self.best_dev_mat, dev_cost_mat, confmx) = evaluate_classifier(self.classify, dev_quora, self.batch_size) (self.best_mtrain_acc, mtrain_cost, _) = evaluate_classifier(self.classify, train_quora[0:5000], self.batch_size) logger.Log('Confusion Matrix on dev-quora\n{}'.format(confmx)) logger.Log(('Restored best Quora Validation acc: %f\n Restored best Quora train acc: %f' % (self.best_dev_mat, self.best_mtrain_acc))) self.saver.restore(self.sess, ckpt_file) logger.Log(('Model restored from file: %s' % ckpt_file)) logger.Log('Training...') while True: training_data = train_quora random.shuffle(training_data) avg_cost = 0.0 total_batch = int((len(training_data) / self.batch_size)) self.completed = False for i in range(total_batch): (minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, minibatch_pre_pos, minibatch_hyp_pos, pairIDs, premise_char_vectors, hypothesis_char_vectors, premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature) = self.get_minibatch(training_data, (self.batch_size * i), (self.batch_size * (i + 1)), True) feed_dict = {self.model.premise_x: minibatch_premise_vectors, self.model.hypothesis_x: minibatch_hypothesis_vectors, self.model.y: minibatch_labels, self.model.keep_rate_ph: self.keep_rate, self.model.is_train: True, self.model.premise_pos: minibatch_pre_pos, self.model.hypothesis_pos: minibatch_hyp_pos, self.model.premise_char: premise_char_vectors, self.model.hypothesis_char: hypothesis_char_vectors, self.model.premise_exact_match: premise_exact_match, self.model.hypothesis_exact_match: hypothesis_exact_match} if ((self.step % self.display_step) == 0): if config.print_gradient: grads = [] varss = [] for (grad, var) in self.gvs: if (grad is not None): grads.append(grad) varss.append(var) gradients = self.sess.run(grads, feed_dict) for (i, grad) in enumerate(grads): logger.Log('Gradient for {}'.format(varss[i].name)) logger.Log(gradients[i]) (_, c, summary) = self.sess.run([self.optimizer, self.model.total_cost, self.model.summary], feed_dict) self.tb_writer.add_summary(summary, self.step) logger.Log('Step: {} completed'.format(self.step)) else: (_, c) = self.sess.run([self.optimizer, self.model.total_cost], feed_dict) if ((self.step % self.eval_step) == 0): if config.print_variables: varss = [] for (grad, var) in self.gvs: varss.append(var) variable_values = self.sess.run(varss[2:], feed_dict) for (i, grad) in enumerate(varss[2:]): logger.Log('variable value for {}'.format(varss[2:][i].name)) logger.Log(variable_values[i]) (dev_acc_mat, dev_cost_mat, confmx) = evaluate_classifier(self.classify, dev_quora, self.batch_size) logger.Log('Confusion Matrix on dev-matched\n{}'.format(confmx)) (mtrain_acc, mtrain_cost, _) = evaluate_classifier(self.classify, train_quora[0:5000], self.batch_size) logger.Log(('Step: %i\t Quora Val acc: %f\t Quora train acc: %f' % (self.step, dev_acc_mat, mtrain_acc))) logger.Log(('Step: %i\t Quora Val cost: %f\t Quora train cost: %f' % (self.step, dev_cost_mat, mtrain_cost))) if ((self.step % self.save_step) == 0): self.saver.save(self.sess, ckpt_file) best_test = (100 * (1 - (self.best_dev_mat / dev_acc_mat))) if (best_test > 0.02): self.saver.save(self.sess, (ckpt_file + '_best')) self.best_dev_mat = dev_acc_mat self.best_mtrain_acc = mtrain_acc self.best_step = self.step logger.Log(('Checkpointing with new best matched-dev accuracy: %f' % self.best_dev_mat)) if (self.best_dev_mat > 0.88): self.eval_step = 200 self.save_step = 200 self.step += 1 avg_cost += (c / total_batch) if ((self.epoch % self.display_epoch_freq) == 0): logger.Log(('Epoch: %i\t Avg. Cost: %f' % ((self.epoch + 1), avg_cost))) self.epoch += 1 self.last_train_acc[((self.epoch % 5) - 1)] = mtrain_acc progress = (1000 * ((sum(self.last_train_acc) / (5 * min(self.last_train_acc))) - 1)) if ((progress < 0.1) or (self.step > (self.best_step + 30000))): logger.Log(('Best matched-dev accuracy: %s' % self.best_dev_mat)) logger.Log(('MultiNLI Train accuracy: %s' % self.best_mtrain_acc)) self.completed = True break def classify(self, examples): if ((test == True) or (self.completed == True)): best_path = (os.path.join(FIXED_PARAMETERS['ckpt_path'], modname) + '.ckpt_best') self.sess = tf.Session() self.sess.run(self.init) self.saver.restore(self.sess, best_path) logger.Log(('Model restored from file: %s' % best_path)) total_batch = int((len(examples) / self.batch_size)) logits = np.empty(3) genres = [] costs = 0 for i in tqdm(range((total_batch + 1))): if (i != total_batch): (minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, minibatch_pre_pos, minibatch_hyp_pos, pairIDs, premise_char_vectors, hypothesis_char_vectors, premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature) = self.get_minibatch(examples, (self.batch_size * i), (self.batch_size * (i + 1))) else: (minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, minibatch_pre_pos, minibatch_hyp_pos, pairIDs, premise_char_vectors, hypothesis_char_vectors, premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature) = self.get_minibatch(examples, (self.batch_size * i), len(examples)) feed_dict = {self.model.premise_x: minibatch_premise_vectors, self.model.hypothesis_x: minibatch_hypothesis_vectors, self.model.y: minibatch_labels, self.model.keep_rate_ph: 1.0, self.model.is_train: False, self.model.premise_pos: minibatch_pre_pos, self.model.hypothesis_pos: minibatch_hyp_pos, self.model.premise_char: premise_char_vectors, self.model.hypothesis_char: hypothesis_char_vectors, self.model.premise_exact_match: premise_exact_match, self.model.hypothesis_exact_match: hypothesis_exact_match} genres += minibatch_genres (logit, cost) = self.sess.run([self.model.logits, self.model.total_cost], feed_dict) costs += cost logits = np.vstack([logits, logit]) if (test == True): logger.Log('Generating Classification error analysis script') correct_file = open(os.path.join(FIXED_PARAMETERS['log_path'], 'correctly_classified_pairs.txt'), 'w') wrong_file = open(os.path.join(FIXED_PARAMETERS['log_path'], 'wrongly_classified_pairs.txt'), 'w') pred = np.argmax(logits[1:], axis=1) LABEL = ['entailment', 'neutral', 'contradiction'] for i in tqdm(range(pred.shape[0])): if (pred[i] == examples[i]['label']): fh = correct_file else: fh = wrong_file fh.write('S1: {}\n'.format(examples[i]['sentence1'].encode('utf-8'))) fh.write('S2: {}\n'.format(examples[i]['sentence2'].encode('utf-8'))) fh.write('Label: {}\n'.format(examples[i]['gold_label'])) fh.write('Prediction: {}\n'.format(LABEL[pred[i]])) fh.write('confidence: \nentailment: {}\nneutral: {}\ncontradiction: {}\n\n'.format(logits[((1 + i), 0)], logits[((1 + i), 1)], logits[((1 + i), 2)])) correct_file.close() wrong_file.close() return (genres, np.argmax(logits[1:], axis=1), costs) def generate_predictions_with_id(self, path, examples): if ((test == True) or (self.completed == True)): best_path = (os.path.join(FIXED_PARAMETERS['ckpt_path'], modname) + '.ckpt_best') self.sess = tf.Session() self.sess.run(self.init) self.saver.restore(self.sess, best_path) logger.Log(('Model restored from file: %s' % best_path)) total_batch = int((len(examples) / self.batch_size)) logits = np.empty(3) costs = 0 IDs = np.empty(1) for i in tqdm(range((total_batch + 1))): if (i != total_batch): (minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, minibatch_pre_pos, minibatch_hyp_pos, pairIDs, premise_char_vectors, hypothesis_char_vectors, premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature) = self.get_minibatch(examples, (self.batch_size * i), (self.batch_size * (i + 1))) else: (minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, minibatch_pre_pos, minibatch_hyp_pos, pairIDs, premise_char_vectors, hypothesis_char_vectors, premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature) = self.get_minibatch(examples, (self.batch_size * i), len(examples)) feed_dict = {self.model.premise_x: minibatch_premise_vectors, self.model.hypothesis_x: minibatch_hypothesis_vectors, self.model.y: minibatch_labels, self.model.keep_rate_ph: 1.0, self.model.is_train: False, self.model.premise_pos: minibatch_pre_pos, self.model.hypothesis_pos: minibatch_hyp_pos, self.model.premise_char: premise_char_vectors, self.model.hypothesis_char: hypothesis_char_vectors, self.model.premise_exact_match: premise_exact_match, self.model.hypothesis_exact_match: hypothesis_exact_match} logit = self.sess.run(self.model.logits, feed_dict) IDs = np.concatenate([IDs, pairIDs]) logits = np.vstack([logits, logit]) IDs = IDs[1:] logits = np.argmax(logits[1:], axis=1) save_submission(path, IDs, logits)
def _create_dummy_dict_file(dict_file): characters = list('helowrd') with open(dict_file, 'w') as fw: for char in characters: fw.write((char + '\n'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.') parser.add_argument('--lang_type', default=None, type=str, required=True, help='the language type') parser.add_argument('--bert_tokenizer', default=None, type=str, required=True, help='Bert pre-trained tokenizer') parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese.') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--trained_model_dir', default='', type=str, help='Where is the fine-tuned (with the cloze-style LM objective) BERT model?') parser.add_argument('--max_seq_length', default=150, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.') parser.add_argument('--do_test', action='store_true', help='Whether to run eval on the test set.') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--train_batch_size', default=32, type=int, help='Total batch size for training.') parser.add_argument('--eval_batch_size', default=8, type=int, help='Total batch size for eval.') parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.') parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.') parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.') parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available') parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus') parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit') parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n') parser.add_argument('--freeze_bert', action='store_true', help='Whether to freeze BERT') parser.add_argument('--save_all_epochs', action='store_true', help='Whether to save model in each epoch') parser.add_argument('--coarse_tagset', action='store_true', help='Whether to save model in each epoch') parser.add_argument('--supervised_training', action='store_true', help='Only use this for supervised top-line model') args = parser.parse_args() if ((args.local_rank == (- 1)) or args.no_cuda): device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu')) n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16)) if (args.gradient_accumulation_steps < 1): raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps)) args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if (n_gpu > 0): torch.cuda.manual_seed_all(args.seed) if ((not args.do_train) and (not args.do_eval) and (not args.do_test)): raise ValueError('At least one of `do_train` or `do_eval` or `do_test` must be True.') if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train): print('WARNING: Output directory already exists and is not empty.') if (not os.path.exists(args.output_dir)): os.makedirs(args.output_dir) processor = DataProcessor() label_list = processor.get_labels(args.data_dir) num_labels = len(label_list) label_map = {label: i for (i, label) in enumerate(label_list)} idx2label = {i: label for (i, label) in enumerate(label_list)} tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = None model = MyBertForTokenClassification.from_pretrained(args.trained_model_dir, num_labels=num_labels) model.to(device) if (args.do_test and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))): test_examples = processor.get_sep_tgt_test_examples(args.data_dir, args.lang_type) test_features = convert_examples_to_features(test_examples, label_list, args.max_seq_length, tokenizer) logger.info('***** Running final test *****') logger.info(' Num examples = %d', len(test_examples)) logger.info(' Batch size = %d', args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in test_features], dtype=torch.long) all_label_mask = torch.tensor([f.label_mask for f in test_features], dtype=torch.long) all_guids = torch.tensor([f.guid for f in test_features], dtype=torch.long) test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_label_mask, all_guids) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size) model.eval() (test_loss, test_accuracy) = (0, 0) (nb_test_steps, nb_test_examples) = (0, 0) test_output_dict = dict() labels_pred_lst = [] labels_true_lst = [] file_name = os.path.join(args.output_dir, 'results.txt') eval_file = open(file_name, 'w') for (input_ids, input_mask, segment_ids, label_ids, label_mask, guids) in tqdm(test_dataloader, desc='Testing'): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) label_mask = label_mask.to(device) with torch.no_grad(): tmp_test_loss = model(input_ids, segment_ids, input_mask, label_ids, label_mask) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() label_mask = label_mask.to('cpu').numpy() (tmp_test_correct, tmp_test_total) = accuracy(logits, label_ids, label_mask) (trues_lst, preds_lst) = true_and_pred(logits, label_ids, label_mask) labels_pred_lst += preds_lst labels_true_lst += trues_lst for (guid, trues, preds) in zip(guids, trues_lst, preds_lst): test_output_dict[guid.item()] = (test_examples[guid].text, test_examples[guid].label, trues, preds, label_map) for (w, label, true, pred) in zip(test_examples[guid].text, test_examples[guid].label, trues, preds): gtag = idx2label[true] ptag = idx2label[pred] eval_file.write((' '.join([w, gtag, ptag]) + '\n')) eval_file.write('\n') test_loss += tmp_test_loss.mean().item() test_accuracy += tmp_test_correct nb_test_examples += tmp_test_total nb_test_steps += 1 test_loss = (test_loss / nb_test_steps) test_accuracy = (test_accuracy / nb_test_examples) result = {'test_loss': test_loss, 'test_accuracy': test_accuracy, 'test_f1': compute_f1(labels_pred_lst, labels_true_lst, idx2label)} output_test_file = os.path.join(args.output_dir, 'test_results.txt') with open(output_test_file, 'w') as writer: logger.info('***** Test results *****') for key in sorted(result.keys()): logger.info(' %s = %s', key, str(result[key])) writer.write(('%s = %s\n' % (key, str(result[key])))) eval_file.close() pickle.dump(test_output_dict, open(os.path.join(args.output_dir, 'test_output_dict.pkl'), 'wb'))
def _make_batches(x, y, batch_size, test=False): (sample_x, sample_y) = tf.train.slice_input_producer([x, y], shuffle=True) sample = [sample_x, sample_y] (x_batch, y_batch) = tf.train.batch(sample, batch_size) return (x_batch, y_batch)
class T2TAttention(nn.Module): def __init__(self, dim, num_heads=8, in_dim=None, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, attn_cfg=None): super().__init__() self.num_heads = num_heads self.in_dim = (in_dim if (in_dim is not None) else dim) head_dim = (dim // num_heads) self.scale = (qk_scale or (head_dim ** (- 0.5))) self.qkv = nn.Linear(dim, (in_dim * 3), bias=qkv_bias) self.proj = nn.Linear(in_dim, in_dim) self.proj_drop = nn.Dropout(proj_drop) if (attn_cfg is None): self.attention_layer = None self.attn_drop = nn.Dropout(attn_drop) else: self.attention_layer = hydra.utils.instantiate(attn_cfg, softmax_temp=self.scale, _recursive_=False) def forward(self, x): (B, N, C) = x.shape (q, k, v) = self.qkv(x).chunk(3, dim=(- 1)) v_og = v (q, k, v) = [rearrange(x, 'b n (n_head head_dim) -> b n n_head head_dim', n_head=self.num_heads) for x in (q, k, v)] if (self.attention_layer is None): (q, k, v) = [rearrange(x, 'b n n_head head_dim -> b n_head n head_dim') for x in (q, k, v)] attn = ((q * self.scale) k.transpose((- 2), (- 1))) attn = attn.softmax(dim=(- 1)) attn = self.attn_drop(attn) attn_output = (attn v).transpose(1, 2) else: (attn_output, _) = self.attention_layer(q, k, v) x = rearrange(attn_output, 'b n h d -> b n (h d)') x = self.proj(x) x = self.proj_drop(x) x = (v_og + x) return x
class MMD_DIM(Regulariser): def __init__(self): super(MMD_DIM, self).__init__(imq_dim_kernel) self.samples = True self.name = 'mmd_dim' def __call__(self, i1, i2): return self.f(i1, i2)
class HourglassBlock(nn.Module): def __init__(self, block, num_blocks, planes, depth, make_bn): super(HourglassBlock, self).__init__() self.block = block self.layernames = [] self.num_blocks = num_blocks self.planes = planes self.outputs = {} self.make_bn = make_bn self._hour_glass_layers(depth) def _make_blocks(self): layers = [] for i in range(0, self.num_blocks): layers.append(self.block((self.planes * self.block.expansion), self.planes, make_bn=self.make_bn)) return nn.Sequential(*layers) def _hour_glass_layers(self, n): self.layernames.append(('layer%d_1' % n)) setattr(self, self.layernames[(- 1)], self._make_blocks()) self.layernames.append(('mp%d' % n)) setattr(self, self.layernames[(- 1)], nn.MaxPool2d(2, stride=2)) self.layernames.append(('layer%d_2' % n)) setattr(self, self.layernames[(- 1)], self._make_blocks()) if (n == 1): self.layernames.append(('layer%d_4' % n)) setattr(self, self.layernames[(- 1)], self._make_blocks()) else: self._hour_glass_layers((n - 1)) self.layernames.append(('layer%d_3' % n)) setattr(self, self.layernames[(- 1)], self._make_blocks()) self.layernames.append(('up%d' % n)) setattr(self, self.layernames[(- 1)], nn.Upsample(scale_factor=2)) self.layernames.append(('sum%d' % n)) setattr(self, self.layernames[(- 1)], (torch.add, ('layer%d_1' % n))) def forward(self, x): outputs = {} for layer in self.layernames: layerfn = getattr(self, layer) if isinstance(layerfn, tuple): (fn, a) = layerfn x = fn(outputs[a], x) else: x = layerfn(x) if (('layer' in layer) and ('_1' in layer)): outputs[layer] = x return x
class ParallelRunner(): def __init__(self, args, logger): self.args = args self.logger = logger self.batch_size = self.args.batch_size_run (self.parent_conns, self.worker_conns) = zip(*[Pipe() for _ in range(self.batch_size)]) env_fn = env_REGISTRY[self.args.env] self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args)))) for worker_conn in self.worker_conns] for p in self.ps: p.daemon = True p.start() self.parent_conns[0].send(('get_env_info', None)) self.env_info = self.parent_conns[0].recv() self.episode_limit = self.env_info['episode_limit'] self.t = 0 self.t_env = 0 self.train_returns = [] self.test_returns = [] self.train_stats = {} self.test_stats = {} self.log_train_stats_t = (- 100000) def setup(self, scheme, groups, preprocess, mac): self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, (self.episode_limit + 1), preprocess=preprocess, device=self.args.device) self.mac = mac self.scheme = scheme self.groups = groups self.preprocess = preprocess def get_env_info(self): return self.env_info def save_replay(self): pass def close_env(self): for parent_conn in self.parent_conns: parent_conn.send(('close', None)) def reset(self): self.batch = self.new_batch() for parent_conn in self.parent_conns: parent_conn.send(('reset', None)) pre_transition_data = {'state': [], 'avail_actions': [], 'obs': []} for parent_conn in self.parent_conns: data = parent_conn.recv() pre_transition_data['state'].append(data['state']) pre_transition_data['avail_actions'].append(data['avail_actions']) pre_transition_data['obs'].append(data['obs']) self.batch.update(pre_transition_data, ts=0) self.t = 0 self.env_steps_this_run = 0 def run(self, test_mode=False): self.reset() all_terminated = False episode_returns = [0 for _ in range(self.batch_size)] episode_lengths = [0 for _ in range(self.batch_size)] self.mac.init_hidden(batch_size=self.batch_size) terminated = [False for _ in range(self.batch_size)] envs_not_terminated = [b_idx for (b_idx, termed) in enumerate(terminated) if (not termed)] final_env_infos = [] while True: actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode) cpu_actions = actions.to('cpu').numpy() actions_chosen = {'actions': actions.unsqueeze(1)} self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False) action_idx = 0 for (idx, parent_conn) in enumerate(self.parent_conns): if (idx in envs_not_terminated): if (not terminated[idx]): parent_conn.send(('step', cpu_actions[action_idx])) action_idx += 1 envs_not_terminated = [b_idx for (b_idx, termed) in enumerate(terminated) if (not termed)] all_terminated = all(terminated) if all_terminated: break post_transition_data = {'reward': [], 'terminated': []} pre_transition_data = {'state': [], 'avail_actions': [], 'obs': []} for (idx, parent_conn) in enumerate(self.parent_conns): if (not terminated[idx]): data = parent_conn.recv() post_transition_data['reward'].append((data['reward'],)) episode_returns[idx] += data['reward'] episode_lengths[idx] += 1 if (not test_mode): self.env_steps_this_run += 1 env_terminated = False if data['terminated']: final_env_infos.append(data['info']) if (data['terminated'] and (not data['info'].get('episode_limit', False))): env_terminated = True terminated[idx] = data['terminated'] post_transition_data['terminated'].append((env_terminated,)) pre_transition_data['state'].append(data['state']) pre_transition_data['avail_actions'].append(data['avail_actions']) pre_transition_data['obs'].append(data['obs']) self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False) self.t += 1 self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True) if (not test_mode): self.t_env += self.env_steps_this_run for parent_conn in self.parent_conns: parent_conn.send(('get_stats', None)) env_stats = [] for parent_conn in self.parent_conns: env_stat = parent_conn.recv() env_stats.append(env_stat) cur_stats = (self.test_stats if test_mode else self.train_stats) cur_returns = (self.test_returns if test_mode else self.train_returns) log_prefix = ('test_' if test_mode else '') infos = ([cur_stats] + final_env_infos) cur_stats.update({k: sum((d.get(k, 0) for d in infos)) for k in set.union(*[set(d) for d in infos])}) cur_stats['n_episodes'] = (self.batch_size + cur_stats.get('n_episodes', 0)) cur_stats['ep_length'] = (sum(episode_lengths) + cur_stats.get('ep_length', 0)) cur_returns.extend(episode_returns) n_test_runs = (max(1, (self.args.test_nepisode // self.batch_size)) * self.batch_size) if (test_mode and (len(self.test_returns) == n_test_runs)): self._log(cur_returns, cur_stats, log_prefix) elif ((self.t_env - self.log_train_stats_t) >= self.args.runner_log_interval): self._log(cur_returns, cur_stats, log_prefix) if hasattr(self.mac.action_selector, 'epsilon'): self.logger.log_stat('epsilon', self.mac.action_selector.epsilon, self.t_env) self.log_train_stats_t = self.t_env return self.batch def _log(self, returns, stats, prefix): self.logger.log_stat((prefix + 'return_mean'), np.mean(returns), self.t_env) self.logger.log_stat((prefix + 'return_std'), np.std(returns), self.t_env) returns.clear() for (k, v) in stats.items(): if (k != 'n_episodes'): self.logger.log_stat(((prefix + k) + '_mean'), (v / stats['n_episodes']), self.t_env) stats.clear()
def make_latest_self_attn_gnn(): return latest_self_attention_gnn(kq_dim=FLAGS.attn_kq_dim, v_dim=FLAGS.attn_v_dim, concat_heads_output_dim=FLAGS.attn_concat_heads_output_dim, make_mlp_fn=partial(make_mlp_model, FLAGS.gnn_latent_dim, (FLAGS.node_embedding_dim / 2), FLAGS.gnn_num_layers, tf.nn.relu, FLAGS.gnn_l2_regularizer_weight, FLAGS.gnn_bias_init_stddev), train_batch_size=FLAGS.train_batch_size, max_n_node=6, num_heads=FLAGS.attn_num_heads, kq_dim_division=True)
class CARAFENaiveFunction(Function): def symbolic(g, features, masks, kernel_size, group_size, scale_factor): return g.op('mmcv::MMCVCARAFENaive', features, masks, kernel_size_i=kernel_size, group_size_i=group_size, scale_factor_f=scale_factor) def forward(ctx, features, masks, kernel_size, group_size, scale_factor): assert (scale_factor >= 1) assert (masks.size(1) == ((kernel_size * kernel_size) * group_size)) assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor)) assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor)) assert ((features.size(1) % group_size) == 0) assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1)) ctx.kernel_size = kernel_size ctx.group_size = group_size ctx.scale_factor = scale_factor ctx.feature_size = features.size() ctx.mask_size = masks.size() (n, c, h, w) = features.size() output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor))) ext_module.carafe_naive_forward(features, masks, output, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) if (features.requires_grad or masks.requires_grad): ctx.save_for_backward(features, masks) return output def backward(ctx, grad_output): assert grad_output.is_cuda (features, masks) = ctx.saved_tensors kernel_size = ctx.kernel_size group_size = ctx.group_size scale_factor = ctx.scale_factor grad_input = torch.zeros_like(features) grad_masks = torch.zeros_like(masks) ext_module.carafe_naive_backward(grad_output.contiguous(), features, masks, grad_input, grad_masks, kernel_size=kernel_size, group_size=group_size, scale_factor=scale_factor) return (grad_input, grad_masks, None, None, None)
def check_number(model_file, tot_num): cur_num = 0 max_ngram_order = 0 with open(model_file) as model: lines = model.readlines() for line in lines[1:]: if ('=' not in line): return ((cur_num == tot_num), max_ngram_order) cur_num += int(line.split('=')[(- 1)]) max_ngram_order = int(line.split('=')[0].split()[(- 1)])
class VideoMAEForVideoClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def sp_split_paragraph(text, document_store): documents = [] for (c_index, data) in enumerate(text): data.replace('#', ' ') data = re.sub('\\s+', ' ', data) new_doc = SDocument(content=data, meta={'source': c_index}) documents.append(new_doc) document_store.write_documents(documents) return document_store
def ReadFileSL(x_axis, tthread, batchInterval, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity): (w, h) = (3, len(x_axis)) y = [[] for _ in range(w)] for isCyclic in ['true', 'false']: inputEvents = (tthread * batchInterval) op_gs_path = getPathSL('OPGSA', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_gs_path).readlines() throughput_true = float(lines[0].split(': ')[1]) op_gs_path = getPathSL('GSA', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_gs_path).readlines() throughput_false = float(lines[0].split(': ')[1]) print(throughput_true, throughput_false) if (throughput_true > throughput_false): y[0].append(float(throughput_true)) else: y[0].append(float(throughput_false)) for isCyclic in ['true', 'false']: inputEvents = (tthread * batchInterval) op_dfs_path = getPathSL('TStream', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_dfs_path).readlines() throughput = lines[0].split(': ')[1] y[1].append(float(throughput)) for isCyclic in ['true', 'false']: inputEvents = (tthread * batchInterval) op_dfs_path = getPathSL('PAT', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_dfs_path).readlines() throughput = lines[0].split(': ')[1] y[2].append(float(throughput)) print(y) return y
def main(): args = parse_args() assert (not args.provide_description) if args.limit: print('WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.') print(f'Selected Tasks: {args.tasks}') description_dict = {} if args.description_dict_path: with open(args.description_dict_path, 'r') as f: description_dict = json.load(f) success = [] fail = [] model_name = os.path.basename(os.path.realpath(args.pretrained)) output_path = (args.output_path if args.output_path else 'results') for prec in args.precision: prec_arg = parse_precision(prec, args.model) model_args = f'pretrained={args.pretrained},{prec_arg}' if (len(args.model_args) > 0): model_args = f'{model_args},{args.model_args}' for task in args.tasks: task_names = task_map.get(task, task).split(',') num_fewshot = task_to_n_few_shots.get(task, args.num_fewshot) log_dir = f'{output_path}/{model_name}/{args.device}/{prec}/{task}' os.makedirs(log_dir, exist_ok=True) try: results = evaluator.simple_evaluate(model=args.model, model_args=model_args, tasks=task_names, num_fewshot=num_fewshot, batch_size=args.batch_size, max_batch_size=args.max_batch_size, device=args.device, no_cache=args.no_cache, limit=args.limit, description_dict=description_dict, decontamination_ngrams_path=args.decontamination_ngrams_path, check_integrity=args.check_integrity, write_out=args.write_out, output_base_path=log_dir) if (len(results['results']) > 1): average = {} for (_, subtask) in results['results'].items(): for (metric, value) in subtask.items(): average[metric] = (average.get(metric, []) + [value]) for (k, v) in average.items(): average[k] = ((sum(v) / len(v)) if (not k.endswith('_stderr')) else 0) results['results'][task] = average results['versions'][task] = 1 dumped = json.dumps(results, indent=2) print(dumped) if args.output_path: with open(f'{log_dir}/result.json', 'w') as f: f.write(dumped) success.append(results) except Exception as e: fail.append(f'Job config of task={task}, precision={prec} failed. Error Message: {str(e)}') print(f'Job config of task={task}, precision={prec} failed. Error Message: {str(e)}') print('Here are results of all successful tasks:') for results in success: print(results['config']) print(evaluator.make_table(results)) if (len(fail) > 0): raise RuntimeError('\n'.join(fail))
def _Pool_initialize_worker(augseq, seed_start): if (seed_start is None): process_name = multiprocessing.current_process().name if ((sys.version_info[0] == 3) and (sys.version_info[1] >= 7)): seed_offset = time.time_ns() else: seed_offset = (int((time.time() * (10 ** 6))) % (10 ** 6)) seed = (hash(process_name) + seed_offset) seed_global = (ia.SEED_MIN_VALUE + ((seed - (10 ** 9)) % (ia.SEED_MAX_VALUE - ia.SEED_MIN_VALUE))) seed_local = (ia.SEED_MIN_VALUE + (seed % (ia.SEED_MAX_VALUE - ia.SEED_MIN_VALUE))) ia.seed(seed_global) augseq.reseed(seed_local) Pool._WORKER_SEED_START = seed_start Pool._WORKER_AUGSEQ = augseq Pool._WORKER_AUGSEQ.localize_random_state_()
class UnitTestSpace(unittest.TestCase): def setUp(self): p.reset_shapeid_counter() self.s = p.Space() (self.b1, self.b2) = (p.Body(1, 3), p.Body(10, 100)) self.s.add(self.b1, self.b2) self.b1.position = (10, 0) self.b2.position = (20, 0) (self.s1, self.s2) = (p.Circle(self.b1, 5), p.Circle(self.b2, 10)) self.s.add(self.s1, self.s2) def tearDown(self): del self.s del self.b1, self.b2 del self.s1, self.s2 def testProperties(self): s = p.Space(15) self.assertEqual(s.iterations, 15) s.gravity = (10, 2) self.assertEqual(s.gravity.x, 10) s.damping = 3 self.assertEqual(s.damping, 3) s.idle_speed_threshold = 4 self.assertEqual(s.idle_speed_threshold, 4) s.sleep_time_threshold = 5 self.assertEqual(s.sleep_time_threshold, 5) s.collision_slop = 6 self.assertEqual(s.collision_slop, 6) s.collision_bias = 8 self.assertEqual(s.collision_bias, 8) s.collision_persistence = 9 self.assertEqual(s.collision_persistence, 9) self.assertEqual(s.enable_contact_graph, False) s.enable_contact_graph = True self.assertEqual(s.enable_contact_graph, True) def testAddRemove(self): s = self.s s.remove(self.b1) s.add(self.b1) b = p.Body() s3 = p.Circle(b, 2) s.add(s3) b3 = p.Body(1, 1) s.add(b3) self.assertEqual(len(s.bodies), 3) self.assertEqual(len(s.shapes), 3) s.remove(self.s2, self.b1, self.s1) s.remove(s3) self.assertEqual(len(s.bodies), 2) self.assertEqual(len(s.shapes), 0) def testAddInStep(self): s = self.s b = p.Body(1, 2) c = p.Circle(b, 2) def pre_solve(space, arbiter): space.add(b, c) return True s.add_collision_handler(0, 0, pre_solve=pre_solve) s.step(0.1) self.assert_((b in s.bodies)) self.assert_((c in s.shapes)) def testRemoveInStep(self): s = self.s def pre_solve(space, arbiter): space.remove(arbiter.shapes) return True s.add_collision_handler(0, 0, pre_solve=pre_solve) s.step(0.1) self.assert_((self.s1 not in s.bodies)) self.assert_((self.s2 not in s.shapes)) def testPointQueryFirst(self): self.assertEqual(self.s.point_query_first((31, 0)), None) self.assertEqual(self.s.point_query_first((10, 0)), self.s1) self.s1.group = 1 self.assertEqual(self.s.point_query_first((10, 0)), self.s1) self.assertEqual(self.s.point_query_first((10, 0), group=1), None) def testPointQuery(self): b3 = p.Body(1, 1) b3.position = (19, 1) s3 = p.Circle(b3, 10) self.s.add(s3) hits = self.s.point_query((23, 0)) self.assert_((self.s1 not in hits)) self.assert_((self.s2 in hits)) self.assert_((s3 in hits)) def testNearestPointQuery(self): res = self.s.nearest_point_query(((- 10), 0), 20) self.assertEqual(len(res), 1) self.assertEqual(res[0]['distance'], 15) self.assertEqual(res[0]['point'], Vec2d(5, 0)) self.assertEqual(res[0]['shape'], self.s1) res = self.s.nearest_point_query(((- 10), 0), 15) self.assertEqual(len(res), 0) def testNearestPointQueryNearest(self): res = self.s.nearest_point_query_nearest(((- 10), 0), 200) self.assertEqual(res['distance'], 15) self.assertEqual(res['point'], Vec2d(5, 0)) self.assertEqual(res['shape'], self.s1) res = self.s.nearest_point_query_nearest(((- 10), 0), 15) self.assertEqual(res, None) def testBBQuery(self): bb = p.BB((- 7), (- 7), 7, 7) hits = self.s.bb_query(bb) self.assert_((self.s1 in hits)) self.assert_((self.s2 not in hits)) def testShapeQuery(self): b = p.Body() s = p.Circle(b, 2) b.position = (20, 1) hits = self.s.shape_query(s) self.assert_((self.s1 not in hits)) self.assert_((self.s2 in hits)) def testStaticPointQueries(self): b = p.Body() c = p.Circle(b, 10) b.position = ((- 50), (- 50)) self.s.add(c) hit = self.s.point_query_first(((- 50), (- 55))) self.assertEqual(hit, c) hits = self.s.point_query(((- 50), (- 55))) self.assertEqual(hits[0], c) def testReindexStatic(self): b = p.Body() c = p.Circle(b, 10) self.s.add(c) b.position = ((- 50), (- 50)) hit = self.s.point_query_first(((- 50), (- 55))) self.assertEqual(hit, None) self.s.reindex_static() hit = self.s.point_query_first(((- 50), (- 55))) self.assertEqual(hit, c) b.position = (50, 50) self.s.reindex_shape(c) hit = self.s.point_query_first((50, 50)) self.assertEqual(hit, c) def testReindexStaticCollision(self): b1 = p.Body(10, p.inf) c1 = p.Circle(b1, 10) b1.position = (20, 20) b2 = p.Body() s2 = p.Segment(b2, ((- 10), 0), (10, 0), 1) self.s.add(b1, c1) self.s.add(s2) s2.unsafe_set_b((100, 0)) self.s.gravity = (0, (- 100)) for x in range(10): self.s.step(0.1) self.assert_((b1.position.y < 0)) b1.position = (20, 20) b1.velocity = (0, 0) self.s.reindex_static() for x in range(10): self.s.step(0.1) self.assert_((b1.position.y > 10)) def testReindexShape(self): b = p.Body() c = p.Circle(b, 10) self.s.add(c) b.position = ((- 50), (- 50)) hit = self.s.point_query_first(((- 50), (- 55))) self.assertEqual(hit, None) self.s.reindex_shape(c) hit = self.s.point_query_first(((- 50), (- 55))) self.assertEqual(hit, c) def testSegmentQueries(self): self.assertEqual(self.s.segment_query_first((13, 11), (131.01, 12)), None) self.assertEqual(self.s.segment_query_first((13, (- 11)), (131.01, (- 11))), None) r = self.s.segment_query_first((10, (- 100)), (10, 100)) self.assertEqual(r.shape, self.s1) self.assertEqual(r.t, 0.475) self.assertEqual(r.n, Vec2d(0, (- 1))) b3 = p.Body(1, 1) b3.position = (19, 1) s3 = p.Circle(b3, 10) self.s.add(s3) hits = self.s.segment_query((16, (- 100)), (16, 100)) hit_shapes = [hit.shape for hit in hits] self.assert_((self.s1 not in hit_shapes)) self.assert_((self.s2 in hit_shapes)) self.assert_((s3 in hit_shapes)) def testStaticSegmentQueries(self): b = p.Body() c = p.Circle(b, 10) b.position = ((- 50), (- 50)) self.s.add(c) hit = self.s.segment_query_first(((- 70), (- 50)), ((- 30), (- 50))) self.assertEqual(hit.shape, c) hits = self.s.segment_query(((- 70), (- 50)), ((- 30), (- 50))) self.assertEqual(hits[0].shape, c) def testCollisionHandlerBegin(self): self.num_of_begins = 0 def begin(space, arb, data): self.num_of_begins += 1 return True self.b1.position = self.b2.position self.s.add_collision_handler(0, 0, begin, None, None, None, None) self.s.step(0.1) self.s.step(0.1) self.assertEqual(self.num_of_begins, 1) def testCollisionHandlerPreSolve(self): self.begin_shapes = None self.begin_contacts = None self.begin_space = None self.s1.collision_type = 1 self.s2.collision_type = 2 def pre_solve(space, arb, test_self): test_self.begin_shapes = arb.shapes test_self.begin_contacts = arb.contacts test_self.begin_space = space return True for x in range(100): self.s.step(0.1) self.s.add_collision_handler(1, 2, None, pre_solve, None, None, self) self.s.step(0.1) self.assertEqual(self.s1, self.begin_shapes[0]) self.assertEqual(self.s2, self.begin_shapes[1]) self.assertEqual(self.begin_space, self.s) def testCollisionHandlerPostSolve(self): self.first_contact = None def post_solve(space, arb, test_self): self.first_contact = arb.is_first_contact return True self.s.add_collision_handler(0, 0, None, None, post_solve, None, self) self.s.step(0.1) self.assert_(self.first_contact) self.s.step(0.1) self.assertFalse(self.first_contact) def testPostStepCallback(self): self.number_of_calls = 0 def f(obj, shapes, test_self): for shape in shapes: self.s.remove(shape) test_self.number_of_calls += 1 def pre_solve(space, arb): space.add_post_step_callback(f, arb.shapes[0], arb.shapes, test_self=self) return True self.s.add_collision_handler(0, 0, None, pre_solve, None, None) self.s.step(0.1) self.assertEqual(self.s.shapes, []) self.s.add(self.s1, self.s2) self.s.step(0.1) self.assertEqual(self.s.shapes, []) self.s.add(self.s1, self.s2) self.s.add_collision_handler(0, 0, None, None, None, None) self.s.step(0.1) self.assertEqual(self.number_of_calls, 2)
def create_runner(base_dir, create_agent_fn, random_seed, agent_name, game_name, num_iterations): assert (base_dir is not None) if (FLAGS.schedule == 'continuous_train_and_eval'): return run_experiment.Runner(base_dir, create_agent_fn, random_seed, agent_name, game_name, num_iterations) elif (FLAGS.schedule == 'continuous_train'): return run_experiment.TrainRunner(base_dir, create_agent_fn) else: raise ValueError('Unknown schedule: {}'.format(FLAGS.schedule))
def quicksave(true_images, colors, masks, schedule, file_name, quicksave_type): if (np.max(schedule) == 2): schedule = (schedule // 2) recons = (colors * masks).sum(1) true_images = torch.cat([true_images, torch.zeros_like(true_images[:1].to(true_images.device))], dim=0) tmp = np.where((np.cumsum(schedule) < (true_images.shape[0] - 1)), np.cumsum(schedule), (- 1)) true_images = true_images[tmp] full_plot = torch.cat([true_images.unsqueeze(1), recons.unsqueeze(1)], dim=1) if (quicksave_type == 'full'): subimages = (colors * masks) masks = masks.repeat(1, 1, 3, 1, 1) full_plot = torch.cat([full_plot, masks, subimages], dim=1) elif (quicksave_type == 'subimages'): subimages = (colors * masks) full_plot = torch.cat([full_plot, subimages], dim=1) else: raise ValueError("Invalid value '{}' given to quicksave".format(quicksave_type)) create_image_from_subimages(full_plot, file_name)
class NormConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0): super().__init__() self.beta = nn.Parameter(torch.zeros([1, out_channels, 1, 1], dtype=torch.float32)) self.gamma = nn.Parameter(torch.ones([1, out_channels, 1, 1], dtype=torch.float32)) self.conv = weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), name='weight') def forward(self, x): out = self.conv(x) out = ((self.gamma * out) + self.beta) return out
def cifar_model_resnet(conv_layer, linear_layer, init_type, N=5, factor=1, **kwargs): def block(in_filters, out_filters, k, downsample): if (not downsample): k_first = 3 skip_stride = 1 k_skip = 1 else: k_first = 4 skip_stride = 2 k_skip = 2 return [Dense(conv_layer(in_filters, out_filters, k_first, stride=skip_stride, padding=1)), nn.ReLU(), Dense(conv_layer(in_filters, out_filters, k_skip, stride=skip_stride, padding=0), None, conv_layer(out_filters, out_filters, k, stride=1, padding=1)), nn.ReLU()] conv1 = [conv_layer(3, 16, 3, stride=1, padding=1), nn.ReLU()] conv2 = block(16, (16 * factor), 3, False) for _ in range(N): conv2.extend(block((16 * factor), (16 * factor), 3, False)) conv3 = block((16 * factor), (32 * factor), 3, True) for _ in range((N - 1)): conv3.extend(block((32 * factor), (32 * factor), 3, False)) conv4 = block((32 * factor), (64 * factor), 3, True) for _ in range((N - 1)): conv4.extend(block((64 * factor), (64 * factor), 3, False)) layers = ((((conv1 + conv2) + conv3) + conv4) + [Flatten(), linear_layer((((64 * factor) * 8) * 8), 1000), nn.ReLU(), linear_layer(1000, 10)]) model = DenseSequential(*layers) for m in model.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) if (m.bias is not None): m.bias.data.zero_() return model
class FilterVariablesTest(tf.test.TestCase): def _create_variables(self): return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'), tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'), tf.Variable(1.0, name='StackProposalGenerator/weights'), tf.Variable(1.0, name='StackProposalGenerator/biases')] def test_return_all_variables_when_empty_regex(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, ['']) self.assertItemsEqual(out_variables, variables) def test_return_variables_which_do_not_match_single_regex(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, ['FeatureExtractor/.*']) self.assertItemsEqual(out_variables, variables[2:]) def test_return_variables_which_do_not_match_any_regex_in_list(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, ['FeatureExtractor.*biases', 'StackProposalGenerator.*biases']) self.assertItemsEqual(out_variables, [variables[0], variables[2]]) def test_return_variables_matching_empty_regex_list(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, [''], invert=True) self.assertItemsEqual(out_variables, []) def test_return_variables_matching_some_regex_in_list(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, ['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'], invert=True) self.assertItemsEqual(out_variables, [variables[1], variables[3]])
def instrument(name, func, out_dir): name_for_path = name.replace('.', '_') def wrapper(*args, **kwargs): global is_instrumenting if is_instrumenting: return func(*args, **kwargs) out_api_dir = os.path.join(out_dir, name_for_path) os.makedirs(out_api_dir, exist_ok=True) if (len(os.listdir(out_api_dir)) > 1000000): return func(*args, **kwargs) import pickle import traceback is_instrumenting = True save_record = True try: arg_names = ([f'arg_{i}' for i in __builtins__['range'](len(args))] + sorted(kwargs.keys())) if ('out' in arg_names): save_record = False else: arg_is_pos = (([True] * len(args)) + ([False] * len(kwargs))) arg_is_tensor = ([False] * len(arg_is_pos)) arg_tensor_dtypes = ([None] * len(arg_is_pos)) arg_tensor_shapes = ([None] * len(arg_is_pos)) arg_values = ([None] * len(arg_is_pos)) for (i_arg, arg_name) in enumerate(arg_names): arg = (args[i_arg] if (i_arg < len(args)) else kwargs[arg_name]) if (isinstance(arg, torch.Tensor) and (arg.ndim == 0)): arg = arg.item() if (i_arg < len(args)): args[i_arg] = arg else: kwargs[arg_name] = arg if isinstance(arg, torch.Tensor): arg_is_tensor[i_arg] = True arg_tensor_dtypes[i_arg] = str(arg.dtype) arg_tensor_shapes[i_arg] = list(arg.shape) arg_values[i_arg] = arg.detach().cpu().numpy() elif ((isinstance(arg, list) or isinstance(arg, tuple)) and __builtins__['all']((isinstance(x, torch.Tensor) for x in arg))): arg_is_tensor[i_arg] = True arg_tensor_dtypes[i_arg] = [str(x.dtype) for x in arg] arg_tensor_shapes[i_arg] = [list(x.shape) for x in arg] arg_values[i_arg] = type(arg)([x.detach().cpu().numpy() for x in arg]) else: arg_values[i_arg] = arg save_record = __builtins__['any'](arg_is_tensor) except Exception as e: save_record = False print(f'''Error when saving inputs of {func}: {e}''', flush=True) traceback.print_exc() 'execute the function' try: ret = func(*args, **kwargs) except Exception as e: is_instrumenting = False print(f'''Error when exec {func}: {e}''', flush=True) traceback.print_exc() raise e if (not save_record): is_instrumenting = False return ret 'save outputs' try: if isinstance(ret, tuple): ret_list = list(ret) elif (not isinstance(ret, list)): ret_list = [ret] else: ret_list = ret out_names = [f'o_{i}' for i in __builtins__['range'](len(ret_list))] out_is_tensor = ([False] * len(ret_list)) out_tensor_dtypes = ([None] * len(ret_list)) out_tensor_shapes = ([None] * len(ret_list)) out_values = ([None] * len(ret_list)) for (i_out, out_name) in enumerate(out_names): out = ret_list[i_out] if (isinstance(out, torch.Tensor) and (out.ndim == 0)): out = out.item() if isinstance(out, torch.Tensor): out_is_tensor[i_out] = True out_tensor_dtypes[i_out] = str(out.dtype) out_tensor_shapes[i_out] = list(out.shape) out_values[i_out] = out.detach().cpu().numpy() elif ((isinstance(out, list) or isinstance(out, tuple)) and __builtins__['all']((isinstance(x, torch.Tensor) for x in out))): out_is_tensor[i_out] = True out_tensor_dtypes[i_out] = [str(x.dtype) for x in out] out_tensor_shapes[i_out] = [list(x.shape) for x in out] out_values[i_out] = type(out)([x.detach().cpu().numpy() for x in out]) else: out_values[i_out] = out save_record = __builtins__['any'](out_is_tensor) except Exception as e: save_record = False print(f'''Error when saving outputs of {func}: {e}''', flush=True) traceback.print_exc() if (not save_record): is_instrumenting = False return ret 'save the record' try: import hashlib record = {'name': name, 'args': {'name': arg_names, 'is_pos': arg_is_pos, 'is_tensor': arg_is_tensor, 'tensor_dtype': arg_tensor_dtypes, 'tensor_shape': arg_tensor_shapes, 'value': arg_values}, 'outputs': {'name': out_names, 'is_tensor': out_is_tensor, 'tensor_dtype': out_tensor_dtypes, 'tensor_shape': out_tensor_shapes, 'value': out_values}} list_to_hash = [] for (i_arg, arg_value) in enumerate(arg_values): list_to_hash.append(f'{arg_names[i_arg]}:') if arg_is_tensor[i_arg]: list_to_hash.append(str(arg_tensor_shapes[i_arg])) else: list_to_hash.append(str(arg_value)) list_to_hash.append('->') for (i_out, out_value) in enumerate(out_values): list_to_hash.append(f'{out_names[i_out]}:') if out_is_tensor[i_out]: list_to_hash.append(str(out_tensor_shapes[i_out])) else: list_to_hash.append(str(out_value)) str_to_hash = ','.join(list_to_hash) hash_value = hashlib.md5(str_to_hash.encode()).hexdigest() out_file = os.path.join(out_api_dir, f'{hash_value}.pkl') if os.path.exists(out_file): print(f'File {out_file} already exists', flush=True) else: with open(out_file, 'wb') as f: pickle.dump(record, f) except Exception as e: print(f'''Error when saving record of {func}: {e}''', flush=True) traceback.print_exc() is_instrumenting = False return ret setattr(wrapper, 'func_name', name) last_dot_pos = name.rfind('.') module_obj = eval(name[:last_dot_pos]) func_name = name[(last_dot_pos + 1):] setattr(module_obj, func_name, wrapper)
def resnet50(**kwargs): model = ResNet(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) return model
def reduce_dict(input_dict, average=True): world_size = get_world_size() if (world_size < 2): return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if ((dist.get_rank() == 0) and average): values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def load_training(root_path, dir, batch_size, kwargs): transform = transforms.Compose([transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) data = datasets.ImageFolder(root=os.path.join(root_path, dir), transform=transform) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) return train_loader
def classification_cross_entropy_softmax(y_pred, y_data): loss = ((- y_data) * tf.log(tf.clip_by_value(y_pred, 1e-09, 1.0))) return tf.reduce_mean(tf.reduce_sum(loss, 1))
def getTruthlist(lbs, f2list): total = 0 res = [] assert (len(lbs) == len(f2list)) for i in range(len(lbs)): if (lbs[i] == f2list[i]): res.append(1) total += 1 else: res.append(0) print('Accuracy:', (total / len(f2list))) return res
_model def vovnet57a(pretrained=False, **kwargs): return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs)
def debug(): import json with open('data/didemo/train_data.json') as fp: data = json.load(fp) for (k, v) in data.items(): print(v.keys()) exit(0)
class SGD(Optimizer): def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, use_gc=False, gc_conv_only=False): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (momentum < 0.0): raise ValueError('Invalid momentum value: {}'.format(momentum)) if (weight_decay < 0.0): raise ValueError('Invalid weight_decay value: {}'.format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, use_gc=use_gc, gc_conv_only=gc_conv_only) if (nesterov and ((momentum <= 0) or (dampening != 0))): raise ValueError('Nesterov momentum requires a momentum and zero dampening') super(SGD, self).__init__(params, defaults) def __setstate__(self, state): super(SGD, self).__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) _grad() def step(self, closure=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p in group['params']: if (p.grad is None): continue d_p = p.grad if (weight_decay != 0): d_p = d_p.add(p, alpha=weight_decay) d_p = centralized_gradient(d_p, use_gc=group['use_gc'], gc_conv_only=group['gc_conv_only']) if (momentum != 0): param_state = self.state[p] if ('momentum_buffer' not in param_state): buf = param_state['momentum_buffer'] = torch.clone(d_p).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(d_p, alpha=(1 - dampening)) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf p.add_(d_p, alpha=(- group['lr'])) return loss
def Merge_Label(inputFile): merging_dict = {} merging_dict['Library_Function'] = 'Function' merging_dict['Function_Name'] = 'Function' merging_dict['Class_Name'] = 'Class' merging_dict['Library_Class'] = 'Class' merging_dict['Library_Variable'] = 'Variable' merging_dict['Variable_Name'] = 'Variable' merging_dict['Website'] = 'Website' merging_dict['Organization'] = 'Website' modified_file = (inputFile[:(- 4)] + '_merged_labels.txt') Fout = open(modified_file, 'w') line_count = 0 for line in open(inputFile): line_count += 1 line_values = line.strip().split() if (len(line_values) < 2): opline = line Fout.write(opline) continue gold_word = line_values[0] gold_label = line_values[1] raw_word = line_values[2] raw_label = line_values[3] if (gold_word != raw_word): print('wrong mapping: ', line) word = gold_word label = gold_label if (label == 'O'): opline = line Fout.write(opline) continue label_split = label.split('-', 1) label_prefix = label_split[0] label_name = label_split[1] if (label_name in merging_dict): label_name = merging_dict[label_name] new_label = ((label_prefix + '-') + label_name) opline = (((((((word + ' ') + new_label) + ' ') + raw_word) + ' ') + raw_label) + '\n') Fout.write(opline) Fout.close() return modified_file return modified_file
def pcmworker(pcmqueue): global enable global audio_format p = pyaudio.PyAudio() stream = p.open(format=audio_format, channels=hl2ss.Parameters_MICROPHONE.CHANNELS, rate=hl2ss.Parameters_MICROPHONE.SAMPLE_RATE, output=True) stream.start_stream() while enable: stream.write(pcmqueue.get()) stream.stop_stream() stream.close()
class Mask(): def __init__(self, landmarks, face, channels=4): self.landmarks = landmarks self.face = face self.channels = channels mask = self.build_mask() self.mask = self.merge_mask(mask) def build_mask(self): raise NotImplementedError def merge_mask(self, mask): assert (self.channels in (1, 3, 4)), 'Channels should be 1, 3 or 4' assert ((mask.shape[2] == 1) and (mask.ndim == 3)), 'Input mask be 3 dimensions with 1 channel' if (self.channels == 3): retval = np.tile(mask, 3) elif (self.channels == 4): retval = np.concatenate((self.face, mask), (- 1)) else: retval = mask return retval
class KerasBasePruner(BasePruner): def __init__(self, config, modules): super().__init__(config, modules) for key in self.modules.keys(): module = self.modules[key] self.masks[key] = np.ones(module.get_weights()[0].shape) self._init() def mask_weights(self): for key in self.modules.keys(): module = self.modules[key] module.set_weights(([(module.get_weights()[0] * self.masks[key])] + module.get_weights()[1:]))
class LexicalMap(object): def __init__(self): pass def get(concept, vocab=None): cp_seq = [] for conc in concept: cp_seq.append(conc) if (vocab is None): return cp_seq new_tokens = set((cp for cp in cp_seq if (vocab.token2idx(cp) == vocab.unk_idx))) (token2idx, idx2token) = (dict(), dict()) nxt = vocab.size for x in new_tokens: token2idx[x] = nxt idx2token[nxt] = x nxt += 1 return (cp_seq, token2idx, idx2token)
class Bottleneck(_Bottleneck): def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs): super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if (groups == 1): width = self.planes else: width = (math.floor((self.planes * (base_width / 64))) * groups) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1) (self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_sac: self.conv2 = build_conv_layer(self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) elif ((not self.with_dcn) or fallback_on_stride): self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3)
class GaussPlusNoisePNGenerator(nn.Module): def __init__(self, device, alpha=1.0): super(GaussPlusNoisePNGenerator, self).__init__() self.device = device self.alpha = torch.Tensor([alpha]).to(self.device) def forward(self, emb): z_i = (torch.randn(emb.size(), device=emb.device) * self.alpha) z_i.requires_grad = False z_j = (torch.randn(emb.size(), device=emb.device) * self.alpha) z_i.requires_grad = False return ((emb + z_i), (emb + z_j))
class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_average(self, old, new): if (old is None): return new return ((old * self.beta) + ((1 - self.beta) * new))