code
stringlengths
101
5.91M
class PoolFormerImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, crop_pct: int=0.9, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, rescale_factor: Union[(int, float)]=(1 / 255), do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, **kwargs) -> None: super().__init__(**kwargs) size = (size if (size is not None) else {'shortest_edge': 224}) size = get_size_dict(size, default_to_square=False) crop_size = (crop_size if (crop_size is not None) else {'height': 224, 'width': 224}) crop_size = get_size_dict(crop_size, param_name='crop_size') self.do_resize = do_resize self.size = size self.crop_pct = crop_pct self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_DEFAULT_MEAN) self.image_std = (image_std if (image_std is not None) else IMAGENET_DEFAULT_STD) def resize(self, image: np.ndarray, size: Dict[(str, int)], crop_pct: Optional[float]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size, default_to_square=False) if (('shortest_edge' not in size) and (('height' not in size) or ('width' not in size))): raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") if (crop_pct is not None): if ('shortest_edge' in size): scale_size = int((size['shortest_edge'] / crop_pct)) elif (('height' in size) and ('width' in size)): if (size['height'] == size['width']): scale_size = int((size['height'] / crop_pct)) else: scale_size = (int((size['height'] / crop_pct)), int((size['width'] / crop_pct))) else: raise ValueError('Invalid size for resize: {}'.format(size)) output_size = get_resize_output_image_size(image, size=scale_size, default_to_square=False) elif ('shortest_edge' in size): output_size = get_resize_output_image_size(image, size=size['shortest_edge'], default_to_square=False) elif (('height' in size) and ('width' in size)): output_size = (size['height'], size['width']) else: raise ValueError('Invalid size for resize: {}'.format(size)) return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs) def center_crop(self, image: np.ndarray, size: Dict[(str, int)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if (('height' not in size) or ('width' not in size)): raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}") return center_crop(image, size=(size['height'], size['width']), data_format=data_format, **kwargs) def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs): return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray: return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[(str, int)]=None, crop_pct: int=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[(str, int)]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> PIL.Image.Image: do_resize = (do_resize if (do_resize is not None) else self.do_resize) crop_pct = (crop_pct if (crop_pct is not None) else self.crop_pct) resample = (resample if (resample is not None) else self.resample) do_center_crop = (do_center_crop if (do_center_crop is not None) else self.do_center_crop) do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale) rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor) do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize) image_mean = (image_mean if (image_mean is not None) else self.image_mean) image_std = (image_std if (image_std is not None) else self.image_std) size = (size if (size is not None) else self.size) size = get_size_dict(size, default_to_square=False) crop_size = (crop_size if (crop_size is not None) else self.crop_size) crop_size = get_size_dict(crop_size, param_name='crop_size') images = make_list_of_images(images) if (not valid_images(images)): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') if ((do_resize and (size is None)) or (resample is None)): raise ValueError('Size and resample must be specified if do_resize is True.') if (do_center_crop and (crop_pct is None)): raise ValueError('Crop_pct must be specified if do_center_crop is True.') if (do_rescale and (rescale_factor is None)): raise ValueError('Rescale factor must be specified if do_rescale is True.') if (do_normalize and ((image_mean is None) or (image_std is None))): raise ValueError('Image mean and std must be specified if do_normalize is True.') images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size, crop_pct=crop_pct, resample=resample) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors)
def main(): parser = argparse.ArgumentParser(description='Run the tracker on your webcam.') parser.add_argument('tracker_name', type=str, help='Name of tracking method.') parser.add_argument('tracker_param', type=str, help='Name of parameter file.') parser.add_argument('--debug', type=int, default=0, help='Debug level.') parser.add_argument('--use_visdom', type=bool, default=True, help='Flag to enable visdom') parser.add_argument('--visdom_server', type=str, default='127.0.0.1', help='Server for visdom') parser.add_argument('--visdom_port', type=int, default=8097, help='Port for visdom') args = parser.parse_args() visdom_info = {'use_visdom': args.use_visdom, 'server': args.visdom_server, 'port': args.visdom_port} run_webcam(args.tracker_name, args.tracker_param, args.debug, visdom_info)
def parse_benchmark_only_line(line): perf_data = {} perf_data.update(({'throughput': float(throughput)} if (throughput := parse_benchmark_log('benchmark_only', line)) else {})) perf_data.update(({'batch_size': int(batch_size)} if (batch_size := parse_benchmark_log('batch_size', line)) else {})) return perf_data
def gtp_io(): known_commands = ['boardsize', 'clear_board', 'komi', 'play', 'genmove', 'final_score', 'quit', 'name', 'version', 'known_command', 'list_commands', 'protocol_version', 'gogui-analyze_commands'] analyze_commands = ['gfx/Predict Final Ownership/predict_ownership', 'none/Load New SGF/loadsgf'] sgf_files = get_sgf_filelist(SGF_DIRECTORY) sgf_file = random.choice(sgf_files) driver = GoDriver(sgf_file, MODEL_PATH) print(('starting main.py: loading %s' % sgf_file), file=sys.stderr) output_file = open('output.txt', 'wb') output_file.write('intializing\n') while True: try: line = raw_input().strip() print(line, file=sys.stderr) output_file.write((line + '\n')) except EOFError: output_file.write('Breaking!!\n') break if (line == ''): continue command = [s.lower() for s in line.split()] if re.match('\\d+', command[0]): cmdid = command[0] command = command[1:] else: cmdid = '' ret = '' if (command[0] == 'boardsize'): if (int(command[1]) != N): print(('Warning: Trying to set incompatible boardsize %s (!= %d)' % (command[1], N)), file=sys.stderr) elif (command[0] == 'clear_board'): driver.reset_board() elif (command[0] == 'loadsgf'): sgf_file = random.choice(sgf_files) print(('Loading new file: %s' % sgf_file), file=sys.stderr) print("Make sure to click 'Clear board and start new game' in the gui", file=sys.stderr) driver.load_sgf_file(sgf_file) elif (command[0] == 'komi'): pass elif (command[0] == 'play'): pass print('play', file=sys.stderr) elif (command[0] == 'genmove'): tup = driver.gen_move() if (tup == 'pass'): ret = 'pass' else: ret = coord_to_str(tup[0], tup[1]) print('genmove', file=sys.stderr) elif (command[0] == 'final_score'): print('final_score not implemented', file=sys.stderr) elif (command[0] == 'name'): ret = 'board_evaluator' elif (command[0] == 'predict_ownership'): ownership_prediction = driver.evaluate_current_board() ret = influence_str(ownership_prediction) elif (command[0] == 'version'): ret = '1.0' elif (command[0] == 'list_commands'): ret = '\n'.join(known_commands) elif (command[0] == 'gogui-analyze_commands'): ret = '\n'.join(analyze_commands) elif (command[0] == 'known_command'): ret = ('true' if (command[1] in known_commands) else 'false') elif (command[0] == 'protocol_version'): ret = '2' elif (command[0] == 'quit'): print(('=%s \n\n' % (cmdid,)), end='') break else: print(('Warning: Ignoring unknown command - %s' % (line,)), file=sys.stderr) ret = None if (ret is not None): output_file.write(("returning: '=%s %s'\n" % (cmdid, ret))) print(('=%s %s\n\n' % (cmdid, ret)), end='') else: output_file.write(("returning: '=?%s ???'\n" % cmdid)) print(('?%s ???\n\n' % (cmdid,)), end='') sys.stdout.flush() output_file.write('end of session\n') output_file.close()
def constant_pad_nd(g, input, padding, value=None): mode = 'constant' value = sym_help._maybe_get_scalar(value) value = sym_help._if_scalar_type_as(g, value, input) pad = _prepare_onnx_paddings(g, input.type().dim(), padding) return g.op('Pad', input, pad, value, mode_s=mode)
def get_embedding_folderpath(dataset: str, architecture: str, seed: int, step: int) -> pathlib.Path: path_suffix = f'embeddings/{dataset}/{architecture}/{seed}/{step}/' return (SCRATCH_PATH / pathlib.Path(path_suffix))
def download_from_url_to_file(url, file_path): print(f'Download {url}') r = requests.get(url, stream=True) with open(file_path, 'wb') as f: f.write(r.content) success = (r.status_code == 200) return success
def encoding(dataset, model, tokenizer, max_length, hf_args, async_args, encode_is_qry=False): encode_loader = DataLoader(dataset, batch_size=(hf_args.per_device_eval_batch_size * len(async_args.devices)), collate_fn=EncodeCollator(tokenizer, max_length=max_length, padding='max_length'), shuffle=False, drop_last=False, num_workers=hf_args.dataloader_num_workers) encoded = [] lookup_indices = [] for (batch_ids, batch) in tqdm(encode_loader, desc='Encoding'): lookup_indices.extend(batch_ids) with (torch.cuda.amp.autocast() if hf_args.fp16 else nullcontext()): with torch.no_grad(): batch.to('cuda') reps = model(batch['input_ids'], batch['attention_mask'], encode_is_qry) encoded.append(reps.cpu()) encoded = torch.cat(encoded) return (lookup_indices, encoded)
_model_architecture(model_name='unity_xm_transformer', arch_name='unity_xm_transformer') def base_architecture_unity(args): set_default_general_args(args) set_default_w2v_encoder_args(args) set_default_adaptor_args(args) set_default_transformer_decoder_args(args) args.layernorm_embedding = False args.decoder_learned_pos = False
def _is_iterable(o): try: _ = iter(o) except Exception: return False return True
class GOPSRandomStateEnumerator(RandomStateEnumerator): def __init__(self): super().__init__() def enumerate(self, state: State): prize_cards = state.prize_cards player_cards = state.player_cards opponent_cards = state.opponent_cards num_cards = state.num_cards state_type = state.state_type starting_deck = list(range(1, (num_cards + 1))) actions = list((set(starting_deck) - set(prize_cards))) next_states = set() for action in actions: prize_cards = list(prize_cards) prize_cards.append(action) prize_cards = tuple(prize_cards) next_state = GOPSState(((state_type + 1) % 3), prize_cards, player_cards, opponent_cards, num_cards) next_states.add(next_state) return next_states
class MjvCameraWrapper(object): def __init__(self, wrapped, size_src=None): self._wrapped = wrapped self._size_src = size_src def ptr(self): return self._wrapped def obj(self): return self._wrapped.contents def fovy(self): return self._wrapped.contents.fovy def fovy(self, value): self._wrapped.contents.fovy = value def camid(self): return self._wrapped.contents.camid def camid(self, value): self._wrapped.contents.camid = value def trackbodyid(self): return self._wrapped.contents.trackbodyid def trackbodyid(self, value): self._wrapped.contents.trackbodyid = value def lookat(self): arr = np.reshape(np.fromiter(self._wrapped.contents.lookat, dtype=np.double, count=3), (3,)) arr.setflags(write=False) return arr def lookat(self, value): val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double)) memmove(self._wrapped.contents.lookat, val_ptr, (3 * sizeof(c_double))) def azimuth(self): return self._wrapped.contents.azimuth def azimuth(self, value): self._wrapped.contents.azimuth = value def elevation(self): return self._wrapped.contents.elevation def elevation(self, value): self._wrapped.contents.elevation = value def distance(self): return self._wrapped.contents.distance def distance(self, value): self._wrapped.contents.distance = value def pose(self): return self._wrapped.contents.pose def pose(self, value): self._wrapped.contents.pose = value def VR(self): return self._wrapped.contents.VR def VR(self, value): self._wrapped.contents.VR = value
def _patch_file(path, content): existing_content = open(path).read() if (existing_content == content): log.warn('Already patched.') return False log.warn('Patching...') _rename_path(path) f = open(path, 'w') try: f.write(content) finally: f.close() return True
def _try_register_nav_task(): try: from habitat.tasks.nav.nav import NavigationTask has_navtask = True except ImportError as e: has_navtask = False navtask_import_error = e if has_navtask: from habitat.tasks.nav.nav import NavigationTask else: _task(name='Nav-v0') class NavigationTaskImportError(EmbodiedTask): def __init__(self, *args, **kwargs): raise navtask_import_error
def plot_wins(title, experiments, fig_name): for (experiment, style) in experiments: (label, color, ls) = style steps = [] means = [] counter = 0 running_wins = 0 running_trajs = 0 with open((('path/run_final_' + experiment) + '.log')) as log_f: for line in log_f: if ('|' in line): msg = line.split('|')[1] words = msg.split() if (words[0] == 'Iteration'): it = int(words[1]) if ((it > 0) and ((it % 100) == 0)): steps.append(it) if (running_wins == 0): means.append(0) else: means.append(((1.0 * running_wins) / running_trajs)) running_wins = 0 running_trajs = 0 if (words[0] == 'NumTrajs'): running_trajs += int(words[1]) elif ('WIN' in line): running_wins += 1 plt.plot(steps, means, label=label, color=color, ls=ls) last_mean = means[(- 1)] plt.text((steps[(- 1)] * 1.04), (last_mean - 0.002), label, size='smaller') plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.title(title, y=1.05) plt.xlabel('Timestep') plt.ylabel('Task completion', labelpad=10) plt.tight_layout() plt.gcf().subplots_adjust(right=0.8) plt.savefig(fig_name) plt.show()
def lights_colors_from_lights_cmd(lights_cmd: LightsCmd, acc: float, t: Timestamp) -> LightsColors: phases = lightscmd2phases[lights_cmd] lights_colors = get_phased_lights(phases, float(t)) if (acc < 0): if (lights_colors.back_left == red): lights_colors.back_left = red_more if (lights_colors.back_right == red): lights_colors.back_right = red_more return lights_colors
def train(loader, net, crit, opt, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() accs = AverageMeter() precisions = AverageMeter() recalls = AverageMeter() net.train() end = time.time() for (i, ((feat, adj, cid, h1id), gtmat)) in enumerate(loader): data_time.update((time.time() - end)) (feat, adj, cid, h1id, gtmat) = map((lambda x: x.cuda()), (feat, adj, cid, h1id, gtmat)) pred = net(feat, adj, h1id) labels = make_labels(gtmat).long() loss = crit(pred, labels) (p, r, acc) = accuracy(pred, labels) opt.zero_grad() loss.backward() opt.step() losses.update(loss.item(), feat.size(0)) accs.update(acc.item(), feat.size(0)) precisions.update(p, feat.size(0)) recalls.update(r, feat.size(0)) batch_time.update((time.time() - end)) end = time.time() if ((i % args.print_freq) == 0): print('Epoch:[{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {losses.val:.3f} ({losses.avg:.3f})\tAccuracy {accs.val:.3f} ({accs.avg:.3f})\tPrecison {precisions.val:.3f} ({precisions.avg:.3f})\tRecall {recalls.val:.3f} ({recalls.avg:.3f})'.format(epoch, i, len(loader), batch_time=batch_time, data_time=data_time, losses=losses, accs=accs, precisions=precisions, recalls=recalls))
def arange(t: TensorType, start: int, stop: Optional[int]=None, step: Optional[int]=None) -> TensorType: return t.arange(start, stop, step)
class COGNET360KLoader(BaseLoader): def __init__(self, dataset_path, download=False): super().__init__(dataset_path, download, raw_data_path='COGNET360K/raw_data', processed_data_path='COGNET360K/processed_data', train_name='train.txt', valid_name='valid.txt', test_name='test.txt', data_name='COGNET360K') self.node_lut_name = 'node_lut.json' def download_action(self): self.downloader.COGNET360K() def load_node_lut(self): preprocessed_file = os.path.join(self.processed_data_path, 'node_lut.pkl') if os.path.exists(preprocessed_file): node_lut = LookUpTable() node_lut.read_from_pickle(preprocessed_file) else: node_lut = LookUpTable() node_lut.add_vocab(self.node_vocab) node_lut.add_processed_path(self.processed_data_path) node_lut.save_to_pickle(preprocessed_file) return node_lut def load_all_lut(self): node_lut = self.load_node_lut() relation_lut = LookUpTable() relation_lut.add_vocab(self.relation_vocab) relation_lut.add_processed_path(self.processed_data_path) return (node_lut, relation_lut)
class MetadataKeeper(EventSink): aggregations = {'avg': '_avg.4', 'sum': '_sum.1', None: ''} def __init__(self, dataroot): self.epochs = [] self.data = {} self.keys = {} def load_epochs_data(self, epochs, consts): assert (not self.data) for (i, data) in enumerate(epochs): self.register_epoch_data(i, data, consts) return self def register_epoch_data(self, epoch, data, consts): assert (epoch >= 0) self.epochs.append(epoch) for (key, item) in data.items(): if (key in self.keys): assert (self.keys[key] == item['data'].keys()) continue self.keys[key] = item['data'].keys() if item['dtype'].startswith('scalar/'): for (subkey, subitem) in item['data'].items(): if (not isinstance(subitem, (list, np.ndarray))): aggr = None else: aggr = ('avg' if (item['dtype'] in {'scalar/loss', 'scalar/score'}) else 'sum') new_key = (((key + ':') + subkey) + self.aggregations[aggr]) self.data[(key, subkey)] = {'iteration_density': [], 'dtype': item['dtype'], 'aggr': aggr, 'key': new_key, 'epochs': [], 'data': []} for (key, item) in self.data.items(): if (key[0] not in data): continue value = np.array(data[key[0]]['data'][key[1]]) iteration_density = None if (item['aggr'] is not None): value = value[(~ np.isnan(value))] value = {'avg': np.mean, 'sum': np.sum}[item['aggr']](value) if (data[key[0]]['relative_iteration'] is not None): iteration_density = (len(data[key[0]]['relative_iteration']) / data[key[0]]['epoch_size']) item['iteration_density'].append(iteration_density) item['epochs'].append(epoch) item['data'].append(value) def metric(self, data_key, item_key): return self.data[(data_key, item_key)]['data'] def metadata(self): return {y['key']: y['data'] for y in self.data.values() if (y['dtype'] in {'scalar/loss', 'scalar/score'})} def is_last_best(self, key): if isinstance(key, str): key = tuple(key.split(':')) assert isinstance(key, tuple), key if (key == ('epoch',)): return True elif ((key not in self.data) or (self.data[key]['epochs'][(- 1)] != self.epochs[(- 1)])): return False if (self.data[key]['dtype'] == 'scalar/score'): return (max(self.data[key]['data']) == self.data[key]['data'][(- 1)]) return (min(self.data[key]['data']) == self.data[key]['data'][(- 1)]) def best_epoch(self, key): if isinstance(key, str): key = tuple(key.split(':')) assert isinstance(key, tuple) if (key == ('epoch',)): return {'index': self.epochs[(- 1)], 'metric_avg.3': self.epochs[(- 1)], 'key': 'epoch'} elif (key not in self.data): return None if (self.data[key]['dtype'] == 'scalar/score'): index = np.argmax(self.data[key]['data']) else: index = np.argmin(self.data[key]['data']) return {'index': self.data[key]['epochs'][index], 'metric_avg.3': self.data[key]['data'][index], 'key': self.data[key]['key']} def errors(self): errors = [] if (self.epochs != list(range(len(self.epochs)))): errors.append({'message': 'Non-standard epoch sequence used', 'data': self.epochs}) iteration_density = {('%_%s' % (x['key'], z)): y for x in self.data.values() for (y, z) in zip(x['iteration_density'], x['epochs']) if (y != 1)} if iteration_density: errors.append({'message': 'Some keys have incomplete iteration coverage', 'data': iteration_density}) epoch_coverage = {x['key']: x['epochs'] for x in self.data.values() if (x['epochs'] != self.epochs)} if epoch_coverage: errors.append({'message': 'Some keys have incomplete epoch coverage', 'data': epoch_coverage}) return errors
def test_weighted_loss_forwards(): loss_fn = loss.WeightedLoss([torch.nn.L1Loss(), torch.nn.L1Loss()], weights=[2.0, 1.0]) pred = torch.ones(1, 1, 100) target = torch.zeros(1, 1, 100) assert (loss_fn(pred, target) == 3.0)
_config def rlgsn_base_resnet50(): cfg = {} cfg['learner'] = {'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': {'extra_kwargs': {'sidetune_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}}}}}
def gen_pixel_probabilities(session_location, options, master_logger, image_filename=None): master_logger.info('Generating Pixel Probabilities') if (image_filename is None): image_filename = options.image_stack if (('extract-ilp-prediction' in options) and options.extract_ilp_prediction): master_logger.info('Extract .ilp prediction option has been deprecated') sys.exit(2) else: master_logger.info('Running Ilastik in headless mode') pixel_prob_filename = os.path.join(session_location, 'STACKED_prediction.h5') ilastik_command = 'ilastik_headless --preconvert_stacks --project={project_file} --output_axis_order=xyzc --output_format=hdf5 --output_filename_format={pixel_prob_filename} --output_internal_path=/volume/predictions'.format(project_file=options.ilp_file, pixel_prob_filename=pixel_prob_filename) if (options.temp_dir is not None): temp_dir = util.make_temp_dir(options.temp_dir) ilastik_command += ' --sys_tmp_dir={}'.format(options.temp_dir) ilastik_command += ((' "' + image_filename) + '"') master_logger.info(('Executing ilastik headless command for pixel classification:\n%s' % ilastik_command)) os.system(ilastik_command) if (options.temp_dir is not None): shutil.rmtree(temp_dir) return pixel_prob_filename
def bin_pack_dense_reward(dummy_generator: DummyGenerator, dense_reward: DenseReward) -> BinPack: return BinPack(generator=dummy_generator, obs_num_ems=5, reward_fn=dense_reward)
def batch_norm_in_place(net, axis, scope='batch_norm_in_place', is_training=None): assert (is_training is not None) assert (axis in [1, 3]) data_format = ('NCHW' if (axis == 1) else 'NHWC') with tf.variable_scope(scope): net = tf.contrib.layers.batch_norm(inputs=net, is_training=is_training, data_format=data_format, fused=True, updates_collections=None) return net
_operation def mult(a: torch.Tensor, b: torch.Tensor): if is_real(a): if (a.dim() >= b.dim()): raise ValueError('Incorrect dimensions.') return mult_real_cplx(a, b) if is_real(b): if (b.dim() >= a.dim()): raise ValueError('Incorrect dimensions.') return mult_real_cplx(b, a) c = mult_real_cplx(a[(..., 0)], b) c[(..., 0)] -= (a[(..., 1)] * b[(..., 1)]) c[(..., 1)] += (a[(..., 1)] * b[(..., 0)]) return c
class I3D(torch.nn.Module): def __init__(self, num_classes, modality='rgb', dropout_prob=0, name='inception'): super(I3D, self).__init__() self.name = name self.num_classes = num_classes if (modality == 'rgb'): in_channels = 3 elif (modality == 'flow'): in_channels = 2 else: raise ValueError('{} not among known modalities [rgb|flow]'.format(modality)) self.modality = modality conv3d_1a_7x7 = Unit3Dpy(out_channels=64, in_channels=in_channels, kernel_size=(7, 7, 7), stride=(2, 2, 2), padding='SAME') self.conv3d_1a_7x7 = conv3d_1a_7x7 self.maxPool3d_2a_3x3 = MaxPool3dTFPadding(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding='SAME') conv3d_2b_1x1 = Unit3Dpy(out_channels=64, in_channels=64, kernel_size=(1, 1, 1), padding='SAME') self.conv3d_2b_1x1 = conv3d_2b_1x1 conv3d_2c_3x3 = Unit3Dpy(out_channels=192, in_channels=64, kernel_size=(3, 3, 3), padding='SAME') self.conv3d_2c_3x3 = conv3d_2c_3x3 self.maxPool3d_3a_3x3 = MaxPool3dTFPadding(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding='SAME') self.mixed_3b = Mixed(192, [64, 96, 128, 16, 32, 32]) self.mixed_3c = Mixed(256, [128, 128, 192, 32, 96, 64]) self.maxPool3d_4a_3x3 = MaxPool3dTFPadding(kernel_size=(3, 3, 3), stride=(2, 2, 2), padding='SAME') self.mixed_4b = Mixed(480, [192, 96, 208, 16, 48, 64]) self.mixed_4c = Mixed(512, [160, 112, 224, 24, 64, 64]) self.mixed_4d = Mixed(512, [128, 128, 256, 24, 64, 64]) self.mixed_4e = Mixed(512, [112, 144, 288, 32, 64, 64]) self.mixed_4f = Mixed(528, [256, 160, 320, 32, 128, 128]) self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding='SAME') self.mixed_5b = Mixed(832, [256, 160, 320, 32, 128, 128]) self.mixed_5c = Mixed(832, [384, 192, 384, 48, 128, 128]) self.avg_pool = torch.nn.AvgPool3d((2, 7, 7), (1, 1, 1)) self.dropout = torch.nn.Dropout(dropout_prob) self.conv3d_0c_1x1 = Unit3Dpy(in_channels=1024, out_channels=self.num_classes, kernel_size=(1, 1, 1), activation=None, use_bias=True, use_bn=False) self.softmax = torch.nn.Softmax(1) def forward(self, inp): out = self.conv3d_1a_7x7(inp) out = self.maxPool3d_2a_3x3(out) out = self.conv3d_2b_1x1(out) out = self.conv3d_2c_3x3(out) out = self.maxPool3d_3a_3x3(out) out = self.mixed_3b(out) out = self.mixed_3c(out) out = self.maxPool3d_4a_3x3(out) out = self.mixed_4b(out) out = self.mixed_4c(out) out = self.mixed_4d(out) out = self.mixed_4e(out) out = self.mixed_4f(out) out = self.maxPool3d_5a_2x2(out) out = self.mixed_5b(out) out = self.mixed_5c(out) out = self.avg_pool(out) out = self.dropout(out) out = self.conv3d_0c_1x1(out) out = out.squeeze(3) out = out.squeeze(3) out = out.mean(2) out_logits = out out = self.softmax(out_logits) return (out, out_logits) def load_tf_weights(self, sess): state_dict = {} if (self.modality == 'rgb'): prefix = 'RGB/inception_i3d' elif (self.modality == 'flow'): prefix = 'Flow/inception_i3d' load_conv3d(state_dict, 'conv3d_1a_7x7', sess, os.path.join(prefix, 'Conv3d_1a_7x7')) load_conv3d(state_dict, 'conv3d_2b_1x1', sess, os.path.join(prefix, 'Conv3d_2b_1x1')) load_conv3d(state_dict, 'conv3d_2c_3x3', sess, os.path.join(prefix, 'Conv3d_2c_3x3')) load_mixed(state_dict, 'mixed_3b', sess, os.path.join(prefix, 'Mixed_3b')) load_mixed(state_dict, 'mixed_3c', sess, os.path.join(prefix, 'Mixed_3c')) load_mixed(state_dict, 'mixed_4b', sess, os.path.join(prefix, 'Mixed_4b')) load_mixed(state_dict, 'mixed_4c', sess, os.path.join(prefix, 'Mixed_4c')) load_mixed(state_dict, 'mixed_4d', sess, os.path.join(prefix, 'Mixed_4d')) load_mixed(state_dict, 'mixed_4e', sess, os.path.join(prefix, 'Mixed_4e')) load_mixed(state_dict, 'mixed_4f', sess, os.path.join(prefix, 'Mixed_4f')) load_mixed(state_dict, 'mixed_5b', sess, os.path.join(prefix, 'Mixed_5b'), fix_typo=True) load_mixed(state_dict, 'mixed_5c', sess, os.path.join(prefix, 'Mixed_5c')) load_conv3d(state_dict, 'conv3d_0c_1x1', sess, os.path.join(prefix, 'Logits', 'Conv3d_0c_1x1'), bias=True, bn=False) self.load_state_dict(state_dict)
class Vocabulary(): unk_token = UNK_TOKEN def __init__(self): self.word2id = {} self.id2word = [] self.counts = [] self.unk_id = 0 def normalize(token, lower=LOWER, digit_0=DIGIT_0): if (token in [Vocabulary.unk_token, '<s>', '</s>']): return token elif (token in BRACKETS): token = BRACKETS[token] elif digit_0: token = re.sub('[0-9]', '0', token) if lower: return token.lower() else: return token def load(path): voca = Vocabulary() voca.load_from_file(path) return voca def load_from_file(self, path): self.word2id = {} self.id2word = [] self.counts = [] f = io.open(path, 'r', encoding='utf-8', errors='ignore') for line in f: line = line.strip() comps = line.split('\t') if ((len(comps) == 0) or (len(comps) > 2)): raise Exception('string wrong') token = Vocabulary.normalize(comps[0].strip()) self.id2word.append(token) self.word2id[token] = (len(self.id2word) - 1) if (len(comps) == 2): self.counts.append(float(comps[1])) else: self.counts.append(1) f.close() if (Vocabulary.unk_token not in self.word2id): self.id2word.append(Vocabulary.unk_token) self.word2id[Vocabulary.unk_token] = (len(self.id2word) - 1) self.counts.append(1) self.unk_id = self.word2id[Vocabulary.unk_token] def size(self): return len(self.id2word) def get_id(self, token): tok = Vocabulary.normalize(token) return self.word2id.get(tok, self.unk_id)
def main(): from time import sleep for i in range(500): s = str((2.379 * i)) ProgressLine(s) sleep(0.02) c = Counter(5) for i in range(500): c.tick() sleep(0.005) c.done() p = Progress(5000) for i in range(5000): p.tick() sleep(0.0005) p.done()
class SvmModel(ThundersvmBase): def __init__(self, kernel, degree, gamma, coef0, C, nu, epsilon, tol, probability, class_weight, shrinking, cache_size, verbose, max_iter, n_jobs, max_mem_size, random_state, gpu_id): self.kernel = kernel self.degree = degree self.gamma = gamma self.coef0 = coef0 self.C = C self.nu = nu self.epsilon = epsilon self.tol = tol self.probability = probability self.class_weight = class_weight self.shrinking = shrinking self.cache_size = cache_size self.verbose = verbose self.max_iter = max_iter self.n_jobs = n_jobs self.random_state = random_state self.max_mem_size = max_mem_size self.gpu_id = gpu_id self.model = None thundersvm.model_new.restype = c_void_p def __del__(self): if (self.model is not None): thundersvm.model_free(c_void_p(self.model)) def fit(self, X, y): if (self.model is not None): thundersvm.model_free(c_void_p(self.model)) self.model = None sparse = sp.isspmatrix(X) self._sparse = (sparse and (not callable(self.kernel))) (X, y) = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr') y = column_or_1d(y, warn=True).astype(np.float64) solver_type = SVM_TYPE.index(self._impl) if (self.gamma == 'auto'): self._gamma = (1.0 / X.shape[1]) else: self._gamma = self.gamma if (self.kernel not in KERNEL_TYPE): print('The kernel parameter not recognized, please refer to the document.') exit() else: kernel = KERNEL_TYPE.index(self.kernel) fit = (self._sparse_fit if self._sparse else self._dense_fit) thundersvm.model_new.restype = c_void_p self.model = thundersvm.model_new(solver_type) if (self.max_mem_size != (- 1)): thundersvm.set_memory_size(c_void_p(self.model), self.max_mem_size) fit(X, y, solver_type, kernel) if (self._train_succeed[0] == (- 1)): print('Training failed!') return self.n_sv = thundersvm.n_sv(c_void_p(self.model)) csr_row = (c_int * (self.n_sv + 1))() csr_col = (c_int * (self.n_sv * self.n_features))() csr_data = (c_float * (self.n_sv * self.n_features))() data_size = (c_int * 1)() sv_indices = (c_int * self.n_sv)() thundersvm.get_sv(csr_row, csr_col, csr_data, data_size, sv_indices, c_void_p(self.model)) self.row = np.frombuffer(csr_row, dtype=np.int32) self.col = np.frombuffer(csr_col, dtype=np.int32)[:data_size[0]] self.data = np.frombuffer(csr_data, dtype=np.float32)[:data_size[0]] self.support_vectors_ = sp.csr_matrix((self.data, self.col, self.row)) if (not self._sparse): self.support_vectors_ = self.support_vectors_.toarray(order='C') self.support_ = np.frombuffer(sv_indices, dtype=np.int32).astype(int) dual_coef = (c_float * ((self.n_classes - 1) * self.n_sv))() thundersvm.get_coef(dual_coef, self.n_classes, self.n_sv, c_void_p(self.model)) self.dual_coef_ = np.frombuffer(dual_coef, dtype=np.float32).astype(float).reshape(((self.n_classes - 1), self.n_sv)) rho_size = int(((self.n_classes * (self.n_classes - 1)) / 2)) self.n_binary_model = rho_size rho = (c_float * rho_size)() thundersvm.get_rho(rho, rho_size, c_void_p(self.model)) self.intercept_ = np.frombuffer(rho, dtype=np.float32).astype(float) if (self.kernel == 'linear'): coef = (c_float * (self.n_binary_model * self.n_features))() thundersvm.get_linear_coef(coef, self.n_binary_model, self.n_features, c_void_p(self.model)) self.coef_ = np.frombuffer(coef, dtype=np.float32).astype(float).reshape((self.n_binary_model, self.n_features)) n_support_ = (c_int * self.n_classes)() thundersvm.get_support_classes(n_support_, self.n_classes, c_void_p(self.model)) self.n_support_ = np.frombuffer(n_support_, dtype=np.int32).astype(int) self.shape_fit_ = X.shape return self def _dense_fit(self, X, y, solver_type, kernel): X = np.asarray(X, dtype=np.float32, order='C') samples = X.shape[0] features = X.shape[1] X_1d = X.ravel() data = X_1d.ctypes.data_as(POINTER(c_float)) kernel_type = kernel y = np.asarray(y, dtype=np.float32, order='C') label = y.ctypes.data_as(POINTER(c_float)) if (self.class_weight is None): weight_size = 0 self.class_weight = dict() weight_label = (c_int * weight_size)() weight_label[:] = list(self.class_weight.keys()) weight = (c_float * weight_size)() weight[:] = list(self.class_weight.values()) elif (self.class_weight == 'balanced'): y_unique = np.unique(y) y_count = np.bincount(y.astype(int)) weight_label_list = [] weight_list = [] for n in range(0, len(y_count)): if (y_count[n] != 0): weight_label_list.append(n) weight_list.append((samples / (len(y_unique) * y_count[n]))) weight_size = len(weight_list) weight_label = (c_int * weight_size)() weight_label[:] = weight_label_list weight = (c_float * weight_size)() weight[:] = weight_list else: weight_size = len(self.class_weight) weight_label = (c_int * weight_size)() weight_label[:] = list(self.class_weight.keys()) weight = (c_float * weight_size)() weight[:] = list(self.class_weight.values()) n_features = (c_int * 1)() n_classes = (c_int * 1)() self._train_succeed = (c_int * 1)() thundersvm.dense_model_scikit(samples, features, data, label, solver_type, kernel_type, self.degree, c_float(self._gamma), c_float(self.coef0), c_float(self.C), c_float(self.nu), c_float(self.epsilon), c_float(self.tol), self.probability, weight_size, weight_label, weight, self.verbose, self.max_iter, self.n_jobs, self.max_mem_size, self.gpu_id, n_features, n_classes, self._train_succeed, c_void_p(self.model)) self.n_features = n_features[0] self.n_classes = n_classes[0] def _sparse_fit(self, X, y, solver_type, kernel): X.data = np.asarray(X.data, dtype=np.float32, order='C') X.sort_indices() kernel_type = kernel data = X.data.ctypes.data_as(POINTER(c_float)) indices = X.indices.ctypes.data_as(POINTER(c_int32)) indptr = X.indptr.ctypes.data_as(POINTER(c_int32)) y = np.asarray(y, dtype=np.float32, order='C') label = y.ctypes.data_as(POINTER(c_float)) if (self.class_weight is None): weight_size = 0 self.class_weight = dict() weight_label = (c_int * weight_size)() weight_label[:] = list(self.class_weight.keys()) weight = (c_float * weight_size)() weight[:] = list(self.class_weight.values()) elif (self.class_weight == 'balanced'): y_unique = np.unique(y) y_count = np.bincount(y.astype(int)) weight_label_list = [] weight_list = [] for n in range(0, len(y_count)): if (y_count[n] != 0): weight_label_list.append(n) weight_list.append((X.shape[0] / (len(y_unique) * y_count[n]))) weight_size = len(weight_list) weight_label = (c_int * weight_size)() weight_label[:] = weight_label_list weight = (c_float * weight_size)() weight[:] = weight_list else: weight_size = len(self.class_weight) weight_label = (c_int * weight_size)() weight_label[:] = list(self.class_weight.keys()) weight = (c_float * weight_size)() weight[:] = list(self.class_weight.values()) n_features = (c_int * 1)() n_classes = (c_int * 1)() self._train_succeed = (c_int * 1)() thundersvm.sparse_model_scikit(X.shape[0], data, indptr, indices, label, solver_type, kernel_type, self.degree, c_float(self._gamma), c_float(self.coef0), c_float(self.C), c_float(self.nu), c_float(self.epsilon), c_float(self.tol), self.probability, weight_size, weight_label, weight, self.verbose, self.max_iter, self.n_jobs, self.max_mem_size, self.gpu_id, n_features, n_classes, self._train_succeed, c_void_p(self.model)) self.n_features = n_features[0] self.n_classes = n_classes[0] def _validate_for_predict(self, X): sparse = sp.isspmatrix(X) self._sparse = (sparse and (not callable(self.kernel))) X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C') if (self._sparse and (not sp.isspmatrix(X))): X = sp.csr_matrix(X) if self._sparse: X.sort_indices() if (sp.issparse(X) and (not self._sparse) and (not callable(self.kernel))): raise ValueError(('cannot use sparse input in %r trained on dense data' % type(self).__name__)) return X def predict(self, X): X = self._validate_for_predict(X) predict = (self._sparse_predict if self._sparse else self._dense_predict) return predict(X) def predict_proba(self, X): n_classes = (c_int * 1)() thundersvm.get_n_classes(c_void_p(self.model), n_classes) self.n_classes = n_classes[0] if (self.probability == 0): print('Should fit with probability = 1') return else: size = (X.shape[0] * self.n_classes) samples = X.shape[0] self.predict_pro_ptr = (c_float * size)() X = self._validate_for_predict(X) if self._sparse: self._sparse_predict(X) else: self._dense_predict(X) thundersvm.get_pro(c_void_p(self.model), self.predict_pro_ptr) self.predict_prob = np.frombuffer(self.predict_pro_ptr, dtype=np.float32).reshape((samples, self.n_classes)) return self.predict_prob def _dense_predict(self, X): self.predict_label_ptr = (c_float * X.shape[0])() X = np.asarray(X, dtype=np.float64, order='C') samples = X.shape[0] features = X.shape[1] X_1d = X.ravel() data = (c_float * X_1d.size)() data[:] = X_1d thundersvm.dense_predict(samples, features, data, c_void_p(self.model), self.predict_label_ptr, self.verbose) self.predict_label = np.frombuffer(self.predict_label_ptr, dtype=np.float32) return self.predict_label def _sparse_predict(self, X): self.predict_label_ptr = (c_float * X.shape[0])() data = (c_float * X.data.size)() data[:] = X.data indices = (c_int * X.indices.size)() indices[:] = X.indices indptr = (c_int * X.indptr.size)() indptr[:] = X.indptr thundersvm.sparse_predict(X.shape[0], data, indptr, indices, c_void_p(self.model), self.predict_label_ptr, self.verbose) self.predict_label = np.frombuffer(self.predict_label_ptr, dtype=np.float32) return self.predict_label def decision_function(self, X): X = self._validate_for_predict(X) n_binary_model = (c_int * 1)() thundersvm.get_n_binary_models(c_void_p(self.model), n_binary_model) self.n_binary_model = n_binary_model[0] if (not (self._impl in ['c_svc', 'nu_svc', 'one_class'])): print('Not support decision_function!') return if self._sparse: dec_func = self._sparse_decision_function(X) else: dec_func = self._dense_decision_function(X) return dec_func def _dense_decision_function(self, X): X = check_array(X, dtype=np.float64, order='C') samples = X.shape[0] features = X.shape[1] X_1d = X.ravel() data = (c_float * X_1d.size)() data[:] = X_1d dec_size = (X.shape[0] * self.n_binary_model) dec_value_ptr = (c_float * dec_size)() thundersvm.dense_decision(samples, features, data, c_void_p(self.model), dec_size, dec_value_ptr) self.dec_values = np.frombuffer(dec_value_ptr, dtype=np.float32).astype(float).reshape((X.shape[0], self.n_binary_model)) return self.dec_values def _sparse_decision_function(self, X): X.data = np.asarray(X.data, dtype=np.float64, order='C') data = (c_float * X.data.size)() data[:] = X.data indices = (c_int * X.indices.size)() indices[:] = X.indices indptr = (c_int * X.indptr.size)() indptr[:] = X.indptr dec_size = (X.shape[0] * self.n_binary_model) dec_value_ptr = (c_float * dec_size)() thundersvm.sparse_decision(X.shape[0], data, indptr, indices, c_void_p(self.model), dec_size, dec_value_ptr) self.dec_values = np.frombuffer(dec_value_ptr, dtype=np.float32).reshape((X.shape[0], self.n_binary_model)) return self.dec_values def save_to_file(self, path): if (self.model is None): raise ValueError('Cannot serialize model before fitting') thundersvm.save_to_file_scikit(c_void_p(self.model), path.encode('utf-8')) def save_to_string(self): if (self.model is None): raise ValueError('Cannot serialize model before fitting') thundersvm.save_to_string_scikit.restype = c_void_p sp = thundersvm.save_to_string_scikit(c_void_p(self.model)) retval = string_at(sp) thundersvm.free_string(cast(sp, c_void_p)) return retval def load_from_file(self, path): if (self.model is None): thundersvm.model_new.restype = c_void_p self.model = thundersvm.model_new(SVM_TYPE.index(self._impl)) if (self.max_mem_size != (- 1)): thundersvm.set_memory_size(c_void_p(self.model), self.max_mem_size) thundersvm.load_from_file_scikit(c_void_p(self.model), path.encode('utf-8')) self._post_load_init() def load_from_string(self, data): if (self.model is None): thundersvm.model_new.restype = c_void_p self.model = thundersvm.model_new(SVM_TYPE.index(self._impl)) if (self.max_mem_size != (- 1)): thundersvm.set_memory_size(c_void_p(self.model), self.max_mem_size) thundersvm.load_from_string_scikit(c_void_p(self.model), data) self._post_load_init() def _post_load_init(self): degree = (c_int * 1)() gamma = (c_float * 1)() coef0 = (c_float * 1)() probability = (c_int * 1)() kernel = (c_char * 20)() thundersvm.init_model_param(kernel, degree, gamma, coef0, probability, c_void_p(self.model)) n_classes = (c_int * 1)() thundersvm.get_n_classes(c_void_p(self.model), n_classes) self.n_classes = n_classes[0] n_support_ = (c_int * self.n_classes)() thundersvm.get_support_classes(n_support_, self.n_classes, c_void_p(self.model)) self.n_support_ = np.frombuffer(n_support_, dtype=np.int32).astype(int) self.n_sv = thundersvm.n_sv(c_void_p(self.model)) n_feature = (c_int * 1)() thundersvm.get_sv_max_index(c_void_p(self.model), n_feature) self.n_features = n_feature[0] csr_row = (c_int * (self.n_sv + 1))() csr_col = (c_int * (self.n_sv * self.n_features))() csr_data = (c_float * (self.n_sv * self.n_features))() data_size = (c_int * 1)() sv_indices = (c_int * self.n_sv)() thundersvm.get_sv(csr_row, csr_col, csr_data, data_size, sv_indices, c_void_p(self.model)) self.row = np.frombuffer(csr_row, dtype=np.int32) self.col = np.frombuffer(csr_col, dtype=np.int32)[:data_size[0]] self.data = np.frombuffer(csr_data, dtype=np.float32)[:data_size[0]] self.support_vectors_ = sp.csr_matrix((self.data, self.col, self.row)) self.support_ = np.frombuffer(sv_indices, dtype=np.int32) dual_coef = (c_float * ((self.n_classes - 1) * self.n_sv))() thundersvm.get_coef(dual_coef, self.n_classes, self.n_sv, c_void_p(self.model)) self.dual_coef_ = np.frombuffer(dual_coef, dtype=np.float32).astype(float).reshape(((self.n_classes - 1), self.n_sv)) rho_size = int(((self.n_classes * (self.n_classes - 1)) / 2)) self.n_binary_model = rho_size rho = (c_float * rho_size)() thundersvm.get_rho(rho, rho_size, c_void_p(self.model)) self.intercept_ = np.frombuffer(rho, dtype=np.float32).astype(float) if (self.kernel == 'linear'): coef = (c_float * (self.n_binary_model * self.n_features))() thundersvm.get_linear_coef(coef, self.n_binary_model, self.n_features, c_void_p(self.model)) self.coef_ = np.frombuffer(coef, dtype=np.float32).astype(float).reshape((self.n_binary_model, self.n_features)) self.kernel = kernel.value.decode() self.degree = degree[0] if (gamma[0] != 0.0): self.gamma = gamma[0] self.coef0 = coef0[0] self.probability = probability[0] def __getstate__(self): state = self.__dict__.copy() state['predict_label_ptr'] = None state['_train_succeed'] = None if (state['model'] is not None): state['_saved_as_str'] = self.save_to_string() state['model'] = None return state def __setstate__(self, state): self.__dict__.update(state) if ('_saved_as_str' in state): self.load_from_string(state['_saved_as_str'])
def upload_file_r2(filename: str, url: str, bucket: str): s3 = boto3.client('s3', endpoint_url=url, aws_access_key_id=os.environ.get('CLOUDFLARE_ACCESS_KEY_ID'), aws_secret_access_key=os.environ.get('CLOUDFLARE_ACCESS_SECRET_KEY'), region_name='auto') s3.upload_file(filename, bucket, filename, Callback=R2ProgressPercentage(filename))
def text2html_table(items: Collection[Collection[str]]) -> str: html_code = f'<table border="1" class="dataframe">' html_code += f''' <thead> <tr style="text-align: right;"> ''' for i in items[0]: html_code += f' <th>{_treat_html(i)}</th>' html_code += f''' </tr> </thead> <tbody>''' html_code += ' <tbody>' for line in items[1:]: html_code += ' <tr>' for i in line: html_code += f' <td>{_treat_html(i)}</td>' html_code += ' </tr>' html_code += ' </tbody>\n</table>' return html_code
class IdentityBlock(M.Model): def initialize(self, fmap): self.bn0 = L.batch_norm() self.activ = L.activation(M.PARAM_RELU) self.c1 = L.conv2D(3, fmap, pad='VALID', usebias=False) self.bn1 = L.batch_norm() self.c2 = L.conv2D(3, fmap, pad='VALID', usebias=False) def forward(self, x): short = x x = self.bn0(x) x = self.activ(x) branch = self.c1(M.pad(x, 1)) branch = self.activ(self.bn1(branch)) branch = self.c2(M.pad(branch, 1)) res = (branch + short) return res
def convert_latex(latex_file, colors_head): latex_contents = open_tex_file(latex_file) latex_contents = append_predefined_color(latex_contents, latex_file, colors_head) latex_contents = color_brace(latex_contents, latex_file, 'title_begin', 'MYTITLE', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'abs_begin', 'MYPARA', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'abs_begin', 'MYABS', inner_outer='outer') latex_contents = color_brace(latex_contents, latex_file, 'author_begin', 'MYAUTHOR', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'mail_begin', 'MYMAIL', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'institute_begin', 'MYAFFILI', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'address_begin', 'MYAFFILI', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'affili_begin', 'MYAFFILI', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'sec_begin', 'MYSECTION', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'subsec_begin', 'MYSUBSECTION', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'subsubsec_begin', 'MYSUBSUBSECTION', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'footnote_begin1', 'MYFOOTER', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'footnote_begin2', 'MYFOOTER', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'thanks_begin', 'MYFOOTER', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'thanksref_begin', 'MYFOOTER', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'cap_begin', 'MYCAP', inner_outer='inner') latex_contents = color_brace(latex_contents, latex_file, 'ref_1', 'MYREF', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'array', 'MYEQU', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'eqnarray', 'MYEQU', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'eqnarray*', 'MYEQU', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'align', 'MYEQU', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'align*', 'MYEQU', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'equation', 'MYEQU', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'equation*', 'MYEQU', inner_outer='outer') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'array', '%') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'eqnarray', '%') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'eqnarray*', '%') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'align', '%') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'align*', '%') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'equation', '%') latex_contents = replace_begin_end_blank_line(latex_contents, latex_file, 'equation*', '%') latex_contents = color_normal_eqution(latex_contents, latex_file, 'MYEQU') latex_contents = color_begin_end(latex_contents, latex_file, 'thebibliography', 'MYREF', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'algorithm', 'MYALG', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'algorithm*', 'MYALG', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'algorithmic', 'MYALG', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'tabular', 'MYTAB', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'tabular*', 'MYTAB', inner_outer='outer') latex_contents = color_begin_end(latex_contents, latex_file, 'abstract', 'MYPARA', inner_outer='inner') latex_contents = color_begin_end(latex_contents, latex_file, 'abstract', 'MYABS', inner_outer='outer') latex_contents = color_para(latex_contents, latex_file) return latex_contents
class TestTanhDistortion(): def test_single_channel(self): samples = np.random.normal(0, 0.1, size=(2048,)).astype(np.float32) sample_rate = 16000 augmenter = TanhDistortion(min_distortion=0.2, max_distortion=0.6, p=1.0) distorted_samples = augmenter(samples=samples, sample_rate=sample_rate) assert (samples.dtype == distorted_samples.dtype) assert (samples.shape == distorted_samples.shape) assert (np.amax(distorted_samples) < np.amax(samples)) assert (calculate_rms(distorted_samples) == pytest.approx(calculate_rms(samples), abs=0.001)) def test_multichannel(self): num_channels = 3 samples = np.random.normal(0, 0.1, size=(num_channels, 5555)).astype(np.float32) sample_rate = 16000 augmenter = TanhDistortion(min_distortion=0.05, max_distortion=0.6, p=1.0) distorted_samples = augmenter(samples=samples, sample_rate=sample_rate) assert (samples.dtype == distorted_samples.dtype) assert (samples.shape == distorted_samples.shape) for i in range(num_channels): assert (not np.allclose(samples[i], distorted_samples[i])) assert (calculate_rms(distorted_samples[i]) == pytest.approx(calculate_rms(samples[i]), abs=0.001))
class ImageResize(object): def __init__(self, max_size, interpolation=Image.BILINEAR): assert isinstance(max_size, int) self.max_size = max_size self.interpolation = interpolation def __call__(self, img): if isinstance(img, torch.Tensor): assert isinstance(self.interpolation, str) return img_tensor_resize(img, size=get_resize_size(img, self.max_size), mode=self.interpolation, align_corners=False) return img_resize(img, get_resize_size(img, self.max_size), self.interpolation) def __repr__(self): interpolate_str = _pil_interpolation_to_str[self.interpolation] return (self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str))
class Tdnn1a(AcousticModel): def __init__(self, num_features: int, num_classes: int, subsampling_factor: int=3) -> None: super(Tdnn1a, self).__init__() self.num_features = num_features self.num_classes = num_classes self.subsampling_factor = subsampling_factor self.tdnn = nn.Sequential(nn.Conv1d(in_channels=num_features, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=self.subsampling_factor, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=500, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=500, affine=False), nn.Conv1d(in_channels=500, out_channels=2000, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=2000, affine=False), nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.BatchNorm1d(num_features=2000, affine=False), nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0)) def forward(self, x: Tensor) -> Tensor: x = self.tdnn(x) x = nn.functional.log_softmax(x, dim=1) return x def write_tensorboard_diagnostics(self, tb_writer: SummaryWriter, global_step: Optional[int]=None): tb_writer.add_scalars('train/weight_l2_norms', measure_weight_norms(self, norm='l2'), global_step=global_step) tb_writer.add_scalars('train/weight_max_norms', measure_weight_norms(self, norm='linf'), global_step=global_step)
_registry(pattern_type='LayerNormWithReduceMean') class LayerNormWithReduceMean(Pattern): def __call__(self, model): pattern_mapping_config = {'LayerNormWithReduceMean': [{'patterns': {'in': [[(0, 'LayerNorm'), (1, 'ReduceMean')]], 'out': [[(0, 'LayerNorm'), (1, 'Reshape'), (2, 'ReduceMean'), (3, 'Reshape')]]}, 'search_mode': 'op_type', 'node_names': {0: 0, 1: 'reshape_before_reducemean', 2: 'reducemean_after_reshape', 3: 1}, 'input_tensors': {0: [[{0: [0]}, {0: [1]}, {0: [2]}], [[0, 1, 2], 3]], 1: [[{'input_data': [0]}], [[1], 2]], 2: [[], [[], 1]], 3: [[], [[], 1]]}, 'output_tensors': {0: [[], [[], 1]], 1: [[], [[], 1]], 2: [[], [[], 1]], 3: [[{1: [0]}], [[0], 1]]}, 'returns': [0, 1]}]} def _set_attr(hidden_size, epsilon, reduce_mean_attr, node_names, model): attr1 = OrderedDict() attr1['epsilon'] = float(epsilon) attr2 = OrderedDict() attr2['dst_shape'] = ('-1,-1,' + str(hidden_size)) attr2['dims'] = '0,1' attr3 = reduce_mean_attr attr4 = OrderedDict() attr4['dst_shape'] = ('-1,' + str(hidden_size)) ln_node_idx = model.get_node_id(node_names[0]) model.nodes[ln_node_idx].attr = attr1 reshape_1_node_idx = model.get_node_id(node_names[1]) model.nodes[reshape_1_node_idx].attr = attr2 reduce_mean_node_idx = model.get_node_id(node_names[2]) model.nodes[reduce_mean_node_idx].attr = attr3 reshape_2_node_idx = model.get_node_id(node_names[3]) model.nodes[reshape_2_node_idx].attr = attr4 pattern_dict = pattern_mapping_config['LayerNormWithReduceMean'][0] (model, new_node_names, ret_old_nodes) = util.pattern_mapping('LayerNormWithReduceMean', pattern_dict, model) if (len(new_node_names) != 0): for j in range(len(new_node_names)): ln_node = ret_old_nodes[j][0] reduce_mean_node = ret_old_nodes[j][1] hidden_size = int(ln_node.input_tensors[(- 1)].shape[(- 1)]) epsilon = ln_node.attr['epsilon'] _set_attr(hidden_size, epsilon, reduce_mean_node.attr, new_node_names[j], model) return model return model
def threeClassAcc(labels, preds): tp = ((labels > 0) & preds).sum() tn = ((labels < 0) & (~ preds)).sum() acc = ((tp + tn) / np.abs(labels).sum()) return acc
class TransformerSentenceEncoderLayerStd(TransformerSentenceEncoderLayer): def __init__(self, sent_enc_layer): super(TransformerSentenceEncoderLayer, self).__init__() self.embedding_dim = sent_enc_layer.embedding_dim self.dropout = sent_enc_layer.dropout self.activation_dropout = sent_enc_layer.activation_dropout self.activation_fn = sent_enc_layer.activation_fn self.self_attn = sent_enc_layer.self_attn self.dropout1 = sent_enc_layer.dropout1 self.dropout2 = sent_enc_layer.dropout2 self.dropout3 = sent_enc_layer.dropout3 self.layer_norm_first = sent_enc_layer.layer_norm_first self.self_attn_layer_norm = sent_enc_layer.self_attn_layer_norm self.fc1 = sent_enc_layer.fc1 self.fc2 = sent_enc_layer.fc2 self.final_layer_norm = sent_enc_layer.final_layer_norm def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None, need_weights=None, att_args=None): (x, attn) = super().forward(x, self_attn_mask, self_attn_padding_mask, need_weights, att_args) return x
def atari_match_conv(num_frames, num_inputs_per_frame): num_inputs = (num_frames * num_inputs_per_frame) init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), nn.init.calculate_gain('relu'))) return nn.Sequential(init_(nn.Conv2d(num_inputs, 64, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(64, (8 * num_frames), 5, stride=1)), nn.ReLU())
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): super().__init__() self.hidden = layers.Dense(Nh) self.last = layers.Dense(Nout) def call(self, x): relu = layers.Activation('relu') softmax = layers.Activation('softmax') h = relu(self.hidden(x)) y = softmax(self.last(h)) return y
def vgg16Netvlad(image_batch): assert (len(image_batch.shape) == 4) with tf.variable_scope('vgg16_netvlad_pca'): if (image_batch.shape[3] == 1): x = tf.nn.conv2d(image_batch, np.ones((1, 1, 1, 3)), np.ones(4).tolist(), 'VALID') else: assert (image_batch.shape[3] == 3) x = image_batch average_rgb = tf.get_variable('average_rgb', 3, dtype=image_batch.dtype) x = (x - average_rgb) def vggConv(inputs, numbers, out_axis, with_relu): if with_relu: activation = tf.nn.relu else: activation = None return tf.layers.conv2d(inputs, out_axis, [3, 3], 1, padding='same', activation=activation, name=('conv%s' % numbers)) def vggPool(inputs): return tf.layers.max_pooling2d(inputs, 2, 2) x = vggConv(x, '1_1', 64, True) x = vggConv(x, '1_2', 64, False) x = vggPool(x) x = tf.nn.relu(x) x = vggConv(x, '2_1', 128, True) x = vggConv(x, '2_2', 128, False) x = vggPool(x) x = tf.nn.relu(x) x = vggConv(x, '3_1', 256, True) x = vggConv(x, '3_2', 256, True) x = vggConv(x, '3_3', 256, False) x = vggPool(x) x = tf.nn.relu(x) x = vggConv(x, '4_1', 512, True) x = vggConv(x, '4_2', 512, True) x = vggConv(x, '4_3', 512, False) x = vggPool(x) x = tf.nn.relu(x) x = vggConv(x, '5_1', 512, True) x = vggConv(x, '5_2', 512, True) grad_in = vggConv(x, '5_3', 512, False) x = tf.nn.l2_normalize(grad_in, axis=(- 1)) x = layers.netVLAD(x, 64) return (x, grad_in)
class UnpairedAudioTextConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={'help': 'path to data directory containing audio'}) text_data: str = field(default=MISSING, metadata={'help': 'path to data directory containing text'}) max_length: Optional[int] = None labels: Optional[str] = field(default=None, metadata={'help': 'extension of the label file to load, used for fine-tuning'}) aux_target_postfix: Optional[str] = field(default=None, metadata={'help': 'auxaliry target filename extension'}) unfiltered: bool = field(default=False, metadata={'help': 'load data with _unfiltered suffix'}) ctc_eval: bool = field(default=False, metadata={'help': 'eval UER as if computed by CTC'}) sort_by_length: bool = field(default=True, metadata={'help': 'sort examples by length of audio timesteps'}) shuffle: bool = field(default=True, metadata={'help': 'shuffle examples'}) append_eos: bool = field(default=False, metadata={'help': 'append eos'}) uppercase: Optional[bool] = field(default=False, metadata={'help': 'uppercase for LM score computation'}) skipwords: Optional[str] = field(default='', metadata={'help': 'comma-separated words to be removed for LM score computation'}) kenlm_path: Optional[str] = None vocab_usage_power: float = 2 word_decoder_config: Optional[KaldiDecoderConfig] = None word_kenlm_path: Optional[str] = None decoding_config: DecodingConfig = DecodingConfig()
def normalize(word): if (not isinstance(word, str)): word = str(word) if (not isinstance(word, str)): try: word = word.encode('utf-8', 'ignore') except: pass for (k, v) in DIACRITICS.items(): for v in v: word = word.replace(v, k) word = word.replace(' ', '_') return word
class DummyObject(type): def __getattr__(cls, key): if (key.startswith('_') and (key != '_load_connected_pipes')): return super().__getattr__(cls, key) requires_backends(cls, cls._backends)
def determine_node_label_by_layertype(layer, layertype, rankdir): if (rankdir in ('TB', 'BT')): separator = ' ' else: separator = '\n' if (layertype == 'Convolution'): node_label = ('"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' % (layer.name, separator, layertype, separator, layer.convolution_param.kernel_size, separator, layer.convolution_param.stride, separator, layer.convolution_param.pad)) elif (layertype == 'Pooling'): pooling_types_dict = get_pooling_types_dict() node_label = ('"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' % (layer.name, separator, pooling_types_dict[layer.pooling_param.pool], layertype, separator, layer.pooling_param.kernel_size, separator, layer.pooling_param.stride, separator, layer.pooling_param.pad)) else: node_label = ('"%s%s(%s)"' % (layer.name, separator, layertype)) return node_label
def quantize_items(items, ticks=120): grids = np.arange(0, items[(- 1)].start, ticks, dtype=int) for item in items: index = np.argmin(abs((grids - item.start))) shift = (grids[index] - item.start) item.start += shift item.end += shift return items
def init_logger(log_file=None, log_file_level=logging.NOTSET): log_format = logging.Formatter('[%(asctime)s %(levelname)s] %(message)s') logger = logging.getLogger() logger.setLevel(logging.INFO) console_handler = logging.StreamHandler() console_handler.setFormatter(log_format) logger.handlers = [console_handler] if (log_file and (log_file != '')): file_handler = logging.FileHandler(log_file) file_handler.setLevel(log_file_level) file_handler.setFormatter(log_format) logger.addHandler(file_handler) return logger
def d1_metric_np(disp_est, disp_gt, mask): if (mask.sum() == 0): return np.mean(0.0) (disp_est, disp_gt) = (disp_est[mask], disp_gt[mask]) E = np.abs((disp_gt - disp_est)) err_mask = ((E > 3) & ((E / np.abs(disp_gt)) > 0.05)) return (np.mean(err_mask.astype(float)) * 100)
def test_damaged_helmet(): gt_prefix = 'DamagedHelmetModel' (gt_data_root, gt_download_dir, gt_extract_dir) = get_test_data_dirs(gt_prefix) damaged_helmet = o3d.data.DamagedHelmetModel() assert Path(gt_download_dir).is_dir() assert (Path(damaged_helmet.path) == (gt_extract_dir / 'DamagedHelmetModel.glb')) assert Path(damaged_helmet.path).is_file() assert (damaged_helmet.prefix == gt_prefix) assert (Path(damaged_helmet.data_root) == gt_data_root) assert (Path(damaged_helmet.download_dir) == gt_download_dir) assert (Path(damaged_helmet.extract_dir) == gt_extract_dir)
def ball_query_gpu(radius, nsample, xyz, new_xyz): if (not (open3d.core.cuda.device_count() > 0)): raise NotImplementedError idx = ball_query(xyz, new_xyz, radius, nsample) return idx
class Kinetics200DataModule(KineticsDataModule): def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None: super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args) def num_classes(self) -> int: return 200
def get_file_from_repo(path_or_repo: Union[(str, os.PathLike)], filename: str, cache_dir: Optional[Union[(str, os.PathLike)]]=None, force_download: bool=False, resume_download: bool=False, proxies: Optional[Dict[(str, str)]]=None, use_auth_token: Optional[Union[(bool, str)]]=None, revision: Optional[str]=None, local_files_only: bool=False): if (is_offline_mode() and (not local_files_only)): logger.info('Offline mode: forcing local_files_only=True') local_files_only = True path_or_repo = str(path_or_repo) if os.path.isdir(path_or_repo): resolved_file = os.path.join(path_or_repo, filename) return (resolved_file if os.path.isfile(resolved_file) else None) else: resolved_file = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=None) try: resolved_file = cached_path(resolved_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token) except RepositoryNotFoundError as err: logger.error(err) raise EnvironmentError(f'''{path_or_repo} is not a local folder and is not a valid model identifier listed on ' If this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass `use_auth_token=True`.''') except RevisionNotFoundError as err: logger.error(err) raise EnvironmentError(f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this model name. Check the model page at ' for available revisions.") except EnvironmentError: return None return resolved_file
def Solarize(img, v, max_v, bias=0): v = (_int_parameter(v, max_v) + bias) return PIL.ImageOps.solarize(img, (256 - v))
class SparseResNet_ImageNet(nn.Module): def __init__(self, block, num_blocks, sparsities, num_classes=1000, sparse_func='vol', bias=False): super(SparseResNet_ImageNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, sparsity=sparsities[0], sparse_func=sparse_func, bias=bias) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, sparsity=sparsities[1], sparse_func=sparse_func, bias=bias) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, sparsity=sparsities[2], sparse_func=sparse_func, bias=bias) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, sparsity=sparsities[3], sparse_func=sparse_func, bias=bias) self.linear = nn.Linear((512 * block.expansion), num_classes) self.sp = models.sparse_func_dict[sparse_func](sparsities[0]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.activation = {} def get_activation(self, name): def hook(model, input, output): self.activation[name] = output.cpu().detach() return hook def register_layer(self, layer, name): layer.register_forward_hook(self.get_activation(name)) def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5, sparse_func='reg', bias=True): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride, sparsity, use_relu=False, sparse_func=sparse_func, bias=bias)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = self.sp(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.avgpool(out) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def average_ari(masks, masks_gt, foreground_only=False): ari = [] assert (masks.shape[0] == masks_gt.shape[0]), f'The number of masks is not equal to the number of masks_gt' for i in range(masks.shape[0]): m = masks[i].cpu().numpy().flatten() m_gt = masks_gt[i].cpu().numpy().flatten() if foreground_only: m = m[np.where((m_gt > 0))] m_gt = m_gt[np.where((m_gt > 0))] score = adjusted_rand_score(m, m_gt) ari.append(score) return (torch.Tensor(ari).mean(), ari)
class SplitAttnConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, radix=2, reduction_factor=4, act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs): super(SplitAttnConv2d, self).__init__() self.radix = radix self.drop_block = drop_block mid_chs = (out_channels * radix) attn_chs = max(((in_channels * radix) // reduction_factor), 32) self.conv = nn.Conv2d(in_channels, mid_chs, kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, **kwargs) self.bn0 = (norm_layer(mid_chs) if (norm_layer is not None) else None) self.act0 = act_layer(inplace=True) self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) self.bn1 = (norm_layer(attn_chs) if (norm_layer is not None) else None) self.act1 = act_layer(inplace=True) self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) self.rsoftmax = RadixSoftmax(radix, groups) def in_channels(self): return self.conv.in_channels def out_channels(self): return self.fc1.out_channels def forward(self, x): x = self.conv(x) if (self.bn0 is not None): x = self.bn0(x) if (self.drop_block is not None): x = self.drop_block(x) x = self.act0(x) (B, RC, H, W) = x.shape if (self.radix > 1): x = x.reshape((B, self.radix, (RC // self.radix), H, W)) x_gap = x.sum(dim=1) else: x_gap = x x_gap = F.adaptive_avg_pool2d(x_gap, 1) x_gap = self.fc1(x_gap) if (self.bn1 is not None): x_gap = self.bn1(x_gap) x_gap = self.act1(x_gap) x_attn = self.fc2(x_gap) x_attn = self.rsoftmax(x_attn).view(B, (- 1), 1, 1) if (self.radix > 1): out = (x * x_attn.reshape((B, self.radix, (RC // self.radix), 1, 1))).sum(dim=1) else: out = (x * x_attn) return out.contiguous()
class Clip(torch.nn.Module): def __init__(self, min_val=0.0, max_val=1.0): super().__init__() self.min_val = min_val self.max_val = max_val def forward(self, img): return torch.clip(img, self.min_val, self.max_val) def __repr__(self): return (self.__class__.__name__ + '(min_val={0}, max_val={1})'.format(self.min_val, self.max_val))
class PreResNet20NoAug(): base = PreResNet args = list() kwargs = {'depth': 20} transform_train = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transform_test = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
def test_potential_method_returnunit(): from galpy.potential import PlummerPotential pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0) try: pot(1.1, 0.1).to(((units.km ** 2) / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method __call__ does not return Quantity with the right units') try: pot.Rforce(1.1, 0.1).to((units.km / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method Rforce does not return Quantity with the right units') try: pot.rforce(1.1, 0.1).to((units.km / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method rforce does not return Quantity with the right units') try: pot.zforce(1.1, 0.1).to((units.km / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method zforce does not return Quantity with the right units') try: pot.phitorque(1.1, 0.1).to(((units.km ** 2) / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method phitorque does not return Quantity with the right units') try: pot.dens(1.1, 0.1).to((units.kg / (units.m ** 3))) except units.UnitConversionError: raise AssertionError('Potential method dens does not return Quantity with the right units') try: pot.surfdens(1.1, 0.1).to((units.kg / (units.m ** 2))) except units.UnitConversionError: raise AssertionError('Potential method surfdens does not return Quantity with the right units') try: pot.mass(1.1, 0.1).to(units.kg) except units.UnitConversionError: raise AssertionError('Potential method mass does not return Quantity with the right units') try: pot.R2deriv(1.1, 0.1).to((1 / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method R2deriv does not return Quantity with the right units') try: pot.z2deriv(1.1, 0.1).to((1 / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method z2deriv does not return Quantity with the right units') try: pot.Rzderiv(1.1, 0.1).to((1 / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method Rzderiv does not return Quantity with the right units') try: pot.phi2deriv(1.1, 0.1).to(((units.km ** 2) / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method phi2deriv does not return Quantity with the right units') try: pot.Rphideriv(1.1, 0.1).to((units.km / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method Rphideriv does not return Quantity with the right units') try: pot.phizderiv(1.1, 0.1).to((units.km / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method phizderiv does not return Quantity with the right units') try: pot.flattening(1.1, 0.1).to(units.dimensionless_unscaled) except units.UnitConversionError: raise AssertionError('Potential method flattening does not return Quantity with the right units') try: pot.vcirc(1.1).to((units.km / units.s)) except units.UnitConversionError: raise AssertionError('Potential method vcirc does not return Quantity with the right units') try: pot.dvcircdR(1.1).to((1.0 / units.s)) except units.UnitConversionError: raise AssertionError('Potential method dvcircdR does not return Quantity with the right units') try: pot.omegac(1.1).to((1.0 / units.s)) except units.UnitConversionError: raise AssertionError('Potential method omegac does not return Quantity with the right units') try: pot.epifreq(1.1).to((1.0 / units.s)) except units.UnitConversionError: raise AssertionError('Potential method epifreq does not return Quantity with the right units') try: pot.verticalfreq(1.1).to((1.0 / units.s)) except units.UnitConversionError: raise AssertionError('Potential method verticalfreq does not return Quantity with the right units') try: pot.lindbladR(0.9, m='corot').to(units.km) except units.UnitConversionError: raise AssertionError('Potential method lindbladR does not return Quantity with the right units') try: pot.vesc(1.3).to((units.km / units.s)) except units.UnitConversionError: raise AssertionError('Potential method vesc does not return Quantity with the right units') try: pot.rl(1.3).to(units.km) except units.UnitConversionError: raise AssertionError('Potential method rl does not return Quantity with the right units') try: pot.rE((- 1.14)).to(units.km) except units.UnitConversionError: raise AssertionError('Potential method rE does not return Quantity with the right units') try: pot.LcE((- 1.14)).to(((units.km / units.s) * units.kpc)) except units.UnitConversionError: raise AssertionError('Potential method LcE does not return Quantity with the right units') try: pot.vterm(45.0).to((units.km / units.s)) except units.UnitConversionError: raise AssertionError('Potential method vterm does not return Quantity with the right units') try: pot.rtide(1.0, 0.0, M=1.0).to(units.kpc) except units.UnitConversionError: raise AssertionError('Potential method rtide does not return Quantity with the right units') try: pot.ttensor(1.0, 0.0).to((1 / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method ttensor does not return Quantity with the right units') try: pot.ttensor(1.0, 0.0, eigenval=True).to((1 / (units.s ** 2))) except units.UnitConversionError: raise AssertionError('Potential method ttensor does not return Quantity with the right units') try: pot.zvc_range((- 1.9), 0.2).to(units.kpc) except units.UnitConversionError: raise AssertionError('Potential method zvc_range does not return Quantity with the right units') try: pot.zvc(0.4, (- 1.9), 0.2).to(units.kpc) except units.UnitConversionError: raise AssertionError('Potential method zvc does not return Quantity with the right units') try: pot.rhalf().to(units.kpc) except units.UnitConversionError: raise AssertionError('Potential method rhalf does not return Quantity with the right units') try: pot.tdyn(1.4).to(units.Gyr) except units.UnitConversionError: raise AssertionError('Potential method tdyn does not return Quantity with the right units') return None
class MyDataloader(): def __init__(self, dataset, batch_size=1): self.dataset = dataset self.batch_size = batch_size self.length = math.ceil((len(dataset) / self.batch_size)) def __iter__(self): for (_, (images, labels)) in enumerate(self.dataset): images = np.expand_dims(images, axis=0) labels = np.expand_dims(labels, axis=0) (yield (images, labels)) def __len__(self): return self.length
class GridWorldEnv(gym.Env): def __init__(self, desc='4x4'): if isinstance(desc, str): desc = MAPS[desc] desc = np.array(list(map(list, desc))) desc[(desc == '.')] = 'F' desc[(desc == 'o')] = 'H' desc[(desc == 'x')] = 'W' self.desc = desc (self.n_row, self.n_col) = desc.shape ((start_x,), (start_y,)) = np.nonzero((desc == 'S')) self.start_state = ((start_x * self.n_col) + start_y) self.state = None self.domain_fig = None def reset(self): self.state = self.start_state return self.state def action_from_direction(d): return dict(left=0, down=1, right=2, up=3)[d] def step(self, action): possible_next_states = self.get_possible_next_states(self.state, action) probs = [x[1] for x in possible_next_states] next_state_idx = np.random.choice(len(probs), p=probs) next_state = possible_next_states[next_state_idx][0] next_x = (next_state // self.n_col) next_y = (next_state % self.n_col) next_state_type = self.desc[(next_x, next_y)] if (next_state_type == 'H'): done = True reward = 0 elif (next_state_type in ['F', 'S']): done = False reward = 0 elif (next_state_type == 'G'): done = True reward = 1 else: raise NotImplementedError self.state = next_state return Step(observation=self.state, reward=reward, done=done) def get_possible_next_states(self, state, action): x = (state // self.n_col) y = (state % self.n_col) coords = np.array([x, y]) increments = np.array([[0, (- 1)], [1, 0], [0, 1], [(- 1), 0]]) next_coords = np.clip((coords + increments[action]), [0, 0], [(self.n_row - 1), (self.n_col - 1)]) next_state = ((next_coords[0] * self.n_col) + next_coords[1]) state_type = self.desc[(x, y)] next_state_type = self.desc[(next_coords[0], next_coords[1])] if ((next_state_type == 'W') or (state_type == 'H') or (state_type == 'G')): return [(state, 1.0)] else: return [(next_state, 1.0)] def action_space(self): return gym.spaces.Discrete(4) def observation_space(self): return gym.spaces.Discrete((self.n_row * self.n_col)) def render(self, mode='human'): pass def log_diagnostics(self, paths): pass
def calc_overlap2(set_pred, set_gt): try: len_gt = len(set_gt) len_pred = len(set_pred) inter = len((set_gt & set_pred)) overlap_1 = (inter / len_gt) overlap_2 = (inter / len_pred) return (overlap_1, overlap_2) except: return (0, 0)
class QConv2dSamePadding(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False, measure=False): super(QConv2dSamePadding, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) if (in_channels == groups): num_bits = 8 num_bits_weight = 8 per_ch_input = False else: per_ch_input = False self.num_bits = num_bits self.num_bits_weight = (num_bits_weight or num_bits) self.num_bits_grad = num_bits_grad self.measure = measure num_measure = (in_channels if per_ch_input else 1) self.quantize_input = QuantMeasure(self.num_bits, shape_measure=(num_measure, 1, 1, 1), flatten_dims=(1, (- 1)), measure=measure, per_ch_input=per_ch_input) self.biprecision = biprecision self.stride = (self.stride if (len(self.stride) == 2) else ([self.stride[0]] * 2)) def forward(self, input): (ih, iw) = input.size()[(- 2):] (kh, kw) = self.weight.size()[(- 2):] (sh, sw) = self.stride (oh, ow) = (math.ceil((ih / sh)), math.ceil((iw / sw))) pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0) pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0) if ((pad_h > 0) or (pad_w > 0)): input = F.pad(input, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) qinput = self.quantize_input(input) weight_qparams = calculate_qparams(self.weight, num_bits=self.num_bits_weight, flatten_dims=(1, (- 1)), reduce_dim=None) qweight = (quantize(self.weight, qparams=weight_qparams) if (not self.measure) else self.weight) if (self.bias is not None): qbias = (self.bias if self.measure else quantize(self.bias, num_bits=(self.num_bits_weight + self.num_bits), flatten_dims=(0, (- 1)))) else: qbias = None if ((not self.biprecision) or (self.num_bits_grad is None)): output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if (self.num_bits_grad is not None): output = quantize_grad(output, num_bits=self.num_bits_grad, flatten_dims=(1, (- 1))) else: output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) return output
def collate_fn(examples): pixel_values = torch.stack([example['pixel_values'] for example in examples]) labels = torch.tensor([example['labels'] for example in examples]) return {'pixel_values': pixel_values, 'labels': labels}
class AudioLDMPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device='cpu', dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = {'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5} return inputs def test_audioldm(self): audioldm_pipe = AudioLDMPipeline.from_pretrained('cvssp/audioldm') audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs['num_inference_steps'] = 25 audio = audioldm_pipe(**inputs).audios[0] assert (audio.ndim == 1) assert (len(audio) == 81920) audio_slice = audio[77230:77240] expected_slice = np.array([(- 0.4884), (- 0.4607), 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, (- 0.0208), (- 0.3687), (- 0.4315)]) max_diff = np.abs((expected_slice - audio_slice)).max() assert (max_diff < 0.01)
def get_latest_parameter_file(folder): import os.path as op yaml_pattern = op.join(folder, 'parameters_*.yaml') yaml_files = glob.glob(yaml_pattern) assert (len(yaml_files) > 0), folder def parse_time(f): m = re.search('.*parameters_(.*)\\.yaml', f) t = datetime.strptime(m.group(1), '%Y_%m_%d_%H_%M_%S') return t times = [parse_time(f) for f in yaml_files] fts = [(f, t) for (f, t) in zip(yaml_files, times)] fts.sort(key=(lambda x: x[1]), reverse=True) yaml_file = fts[0][0] return yaml_file
_ingredient.config def config(): optimizer_name = 'adam' loss_str = 'ce' lr = None max_epochs = 1000 metrics = ['loss'] val_metric_to_monitor = 'loss' epoch_per_metric = 1 print_freq = 5 plateau_patience = 15 plateau_terminate = 60 gpu_if_available = True gpu_idx = (- 1)
def main(args): utils.set_seed_everywhere((args.seed + 42)) if args.use_wandb: wandb.init(project=args.wandb_project, name=str(args.seed), entity=args.wandb_entity, group=args.wandb_group, job_type=args.wandb_job) wandb.config.update(args) gym.logger.set_level(40) env = make_env(domain_name=args.domain_name, task_name=args.task_name, seed=args.seed, episode_length=args.episode_length, n_substeps=args.n_substeps, frame_stack=args.frame_stack, image_size=args.image_size, cameras='dynamic', render=args.render, observation_type=args.observation_type, action_space=args.action_space, camera_move_range=args.camera_move_range, action_repeat=args.action_repeat) env.seed(args.seed) env.observation_space.seed(args.seed) env.action_space.seed(args.seed) work_dir = os.path.join(args.log_dir, ((args.domain_name + '_') + args.task_name), args.algorithm, args.exp_suffix, str(args.seed)) print('Working directory:', work_dir) utils.make_dir(work_dir) model_dir = utils.make_dir(os.path.join(work_dir, 'model')) video_dir = utils.make_dir(os.path.join(work_dir, 'video')) video = VideoRecorder(video_dir, height=128, width=128, fps=(15 if (args.domain_name == 'robot') else 25)) utils.write_info(args, os.path.join(work_dir, 'info.log')) assert torch.cuda.is_available(), 'must have cuda enabled' from algorithms.per import EfficientPrioritizedReplayBuffer replay_buffer = EfficientPrioritizedReplayBuffer(obs_shape=env.observation_space.shape, state_shape=env.state_space_shape, action_shape=env.action_space.shape, capacity=args.buffer_capacity, batch_size=args.batch_size, prioritized_replay=args.use_prioritized_buffer, alpha=args.prioritized_replay_alpha, beta=args.prioritized_replay_beta, ensemble_size=args.ensemble_size, episode_length=args.episode_length, observation_type=args.observation_type, use_single_image=(False if ('3d' in args.algorithm) else True)) print('Observations:', env.observation_space.shape) print('Action space:', f'{args.action_space} ({env.action_space.shape[0]})') agent = make_agent(obs_shape=env.observation_space.shape, state_shape=env.state_space_shape, action_shape=env.action_space.shape, args=args) (start_step, episode, episode_reward, info, done, episode_success) = (0, 0, 0, {}, True, 0) L = Logger(work_dir) start_time = time.time() training_time = start_time video_tensor = list() for step in range(start_step, (args.train_steps + 1)): if done: if (step > start_step): if args.use_wandb: wandb.log({'train/duration': (time.time() - start_time)}, step=(step + 1)) start_time = time.time() if (((step % args.log_train_video) == 0) and (args.observation_type != 'state')): if args.use_wandb: wandb.log({'train/train_video': wandb.Video(np.array(video_tensor), fps=14, format='mp4')}, step=(step + 1)) L.dump(step) if ((step % args.eval_freq) == 0): print('Evaluating:', work_dir) evaluate(env, agent, video, args.eval_episodes, L, step, args=args) L.dump(step) if args.train_3d: (obs, state, info) = env.reset() a_eval = env.action_space.sample() env.change_traj_idx((env.traj_len - 1)) (obs, state, _, _, info) = env.step(a_eval) o1 = obs[:3] o2 = obs[3:] images_rgb = np.concatenate([np.expand_dims(o1, axis=0), np.expand_dims(o2, axis=0)], axis=0) images_rgb = torch.from_numpy(images_rgb).float().cuda().unsqueeze(0).div(255) agent.gen_interpolate(images_rgb, step) if (args.save_model and (((step % 500000) == 0) or (step == args.train_steps))): torch.save(agent, os.path.join(model_dir, f'{step}.pt')) if args.use_wandb: wandb.save(os.path.join(model_dir, f'{step}.pt')) L.log('train/episode_reward', episode_reward, step) L.log('train/success_rate', (episode_success / args.episode_length), step) if args.use_wandb: wandb.log({'train/episode_reward': episode_reward, 'train/success_rate': (episode_success / args.episode_length)}, step=(step + 1)) (obs, state, info) = env.reset() done = False video_tensor = list() video_tensor.append(obs[:3]) episode_reward = 0 episode_step = 0 episode += 1 episode_success = 0 L.log('train/episode', episode, step) if (step < args.init_steps): action = env.action_space.sample() else: with torch.no_grad(), utils.eval_mode(agent): action = agent.sample_action(obs, state) num_updates = ((args.init_steps // args.update_freq) if (step == args.init_steps) else 1) for i in range(num_updates): agent.update(replay_buffer, L, step) (next_obs, next_state, reward, done, info) = env.step(action) replay_buffer.add(obs, state, action, reward, next_obs, next_state) episode_reward += reward obs = next_obs state = next_state video_tensor.append(obs[:3]) episode_success += float(info['is_success']) episode_step += 1 print('Completed training for', work_dir) print('Total Training Time: ', round(((time.time() - training_time) / 3600), 2), 'hrs')
_vision class BlipProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = BlipImageProcessor() tokenizer = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel') processor = BlipProcessor(image_processor, tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)') image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = BlipProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, BlipImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors='np') input_processor = processor(images=image_input, return_tensors='np') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = 'lower newer' encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = 'lower newer' image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ['pixel_values', 'input_ids', 'attention_mask']) with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = 'lower newer' image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ['pixel_values', 'input_ids', 'attention_mask'])
def main_worker(rank, world_size, model, teacher_model, dataset): try: distributed_init('gloo', world_size=world_size, rank=rank, init_method='tcp://127.0.0.1:23456') except: distributed_init('gloo', world_size=world_size, rank=rank, init_method='tcp://127.0.0.1:12345') training_args = TrainingArguments(output_dir='tmp_trainer', overwrite_output_dir=True, no_cuda=True, local_rank=rank) trainer = NLPTrainer(model=model, train_dataset=dataset, eval_dataset=dataset, args=training_args) autodistillation_config = AutoDistillationConfig(search_space={'hidden_size': [128, 256], 'intermediate_size': [256, 512]}, search_algorithm='Grid', metrics=[metrics.Metric(name='eval_loss', greater_is_better=False)], knowledge_transfer=FlashDistillationConfig(block_names=['bert.encoder.layer.0'], layer_mappings_for_knowledge_transfer=[[[('bert.encoder.layer.0.output',)]]], train_steps=[3], loss_types=[['MSE']]), regular_distillation=FlashDistillationConfig(layer_mappings_for_knowledge_transfer=[[[('cls', '0')]]], loss_types=[['KL']], add_origin_loss=[True], train_steps=[5])) best_model_archs = trainer.autodistillation(autodistillation_config, teacher_model, model_cls=AutoModelForPreTraining) assert (len(best_model_archs) > 0), 'Expected at least one best model archs.'
class ListSchema(Schema): schemas: List[Schema] sizes: List[int] def __call__(self, values): values = self._split(values, self.sizes) if (len(values) != len(self.schemas)): raise ValueError(f'Values has length {len(values)} but schemas has length {len(self.schemas)}!') values = [m(v) for (m, v) in zip(self.schemas, values)] return list(values) def flatten(cls, obj): res = [flatten_to_tuple(k) for k in obj] (values, sizes) = cls._concat([k[0] for k in res]) return (values, cls([k[1] for k in res], sizes))
def get_equivalent_kernel_bias(rbr_dense, rbr_1x1, rbr_identity, in_channels, groups, padding_11): (kernel3x3, bias3x3) = _fuse_bn_tensor(rbr_dense, in_channels, groups) (kernel1x1, bias1x1) = _fuse_bn_tensor(rbr_1x1, in_channels, groups) (kernelid, biasid) = _fuse_bn_tensor(rbr_identity, in_channels, groups) return (((kernel3x3 + _pad_1x1_to_3x3_tensor(kernel1x1, padding_11)) + kernelid), ((bias3x3 + bias1x1) + biasid))
class BondFeaturizer(): def __init__(self): self.bond_type_to_oh_loc = {Chem.BondType.SINGLE: 0, Chem.BondType.DOUBLE: 1, Chem.BondType.TRIPLE: 2, Chem.BondType.AROMATIC: 3} def bond_to_feat(self, bnd: Chem.Bond): bond_indices = torch.tensor([bnd.GetBeginAtomIdx(), bnd.GetEndAtomIdx()]) feat = torch.zeros(len(self.bond_type_to_oh_loc), dtype=torch.float32) feat[self.bond_type_to_oh_loc[bnd.GetBondType()]] = 1.0 return (bond_indices, feat)
class SpleenDataset(DatasetBase): download_link = ' zip_name = 'Spleen.zip' folder_name = 'Spleen' def __init__(self, *, root_dir: str, mode: str, transforms: SequentialWrapper=None) -> None: sub_folders = ['img', 'gt'] sub_folder_types = ['image', 'gt'] group_re = 'Patient_\\d+' path = Path(root_dir, self.folder_name) downloading(path, self.folder_name, self.download_link, root_dir, self.zip_name) super().__init__(root_dir=str(path), mode=mode, sub_folders=sub_folders, sub_folder_types=sub_folder_types, transforms=transforms, group_re=group_re)
def _import_handler(config): print('[Warning] Currently we do not support recursive `_import`. If the base file you are importing from also has `_import`, it will not be correctly imported. If not, you can safely ignore this warning.') imported_configs = config.pop('_import', []) new_config = config.copy() config = {} for imported_config in imported_configs: assert set(imported_config.keys()).isdisjoint(set(config.keys())), f'Conflict between imported config fields: {set(imported_config.keys()).intersection(set(config.keys()))}' config.update(imported_config) config.update(new_config) config['_import'] = imported_configs config['_new'] = new_config return config
def UnLearningScore(tmodel, gold_model, forget_dl, batch_size, device): model_preds = [] gold_model_preds = [] with torch.no_grad(): for batch in forget_dl: (x, y, cy) = batch x = x.to(device) model_output = tmodel(x) gold_model_output = gold_model(x) model_preds.append(F.softmax(model_output, dim=1).detach().cpu()) gold_model_preds.append(F.softmax(gold_model_output, dim=1).detach().cpu()) model_preds = torch.cat(model_preds, axis=0) gold_model_preds = torch.cat(gold_model_preds, axis=0) return (1 - JSDiv(model_preds, gold_model_preds))
def test_bottleneck(): data = torch.randn(1, 256, 56, 56) in_planes = 256 out_planes = 128 expansion = Bottleneck.expansion stride = 1 down_sample = nn.Sequential(nn.Conv2d(in_planes, (out_planes * expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((out_planes * expansion))) model = Bottleneck(in_planes, out_planes, stride, down_sample) print(model) outputs = model(data) print(outputs.shape) assert (outputs.shape == (1, 512, 56, 56)) stride = 2 down_sample = nn.Sequential(nn.Conv2d(in_planes, (out_planes * expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((out_planes * expansion))) model = Bottleneck(in_planes, out_planes, stride, down_sample) print(model) outputs = model(data) print(outputs.shape) assert (outputs.shape == (1, 512, 28, 28)) stride = 2 down_sample = nn.Sequential(nn.Conv2d(in_planes, (out_planes * expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((out_planes * expansion))) model = Bottleneck(in_planes, out_planes, stride, down_sample, 32, 4) print(model) outputs = model(data) print(outputs.shape) assert (outputs.shape == (1, 512, 28, 28))
class DistEvalHook(EvalHook): def __init__(self, dataloader, interval=1, gpu_collect=False, by_epoch=False, **eval_kwargs): if (not isinstance(dataloader, DataLoader)): raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader))) self.dataloader = dataloader self.interval = interval self.gpu_collect = gpu_collect self.by_epoch = by_epoch self.eval_kwargs = eval_kwargs def after_train_iter(self, runner): if (self.by_epoch or (not self.every_n_iters(runner, self.interval))): return from mmseg.apis import multi_gpu_test runner.log_buffer.clear() results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect) if (runner.rank == 0): print('\n') self.evaluate(runner, results) def after_train_epoch(self, runner): if ((not self.by_epoch) or (not self.every_n_epochs(runner, self.interval))): return from mmseg.apis import multi_gpu_test runner.log_buffer.clear() results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect) if (runner.rank == 0): print('\n') self.evaluate(runner, results)
def scan_net_ICO_preprocess_create(loc): def load_model_scan_ICO(inference_vectorizer): sn = ScanNetICO(inference_vectorizer, use_attention=False) state = sn.state_dict() partial = torch.load(loc) state.update(partial) sn.load_state_dict(state) sn = sn.cuda() sn.eval() return sn def get_preds_ICO(model, span, I, C, O, inference_vectorizer): if (len(span) == 0): return 0 unk_idx = int(inference_vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD]) sentences = [torch.LongTensor(span)] I = [torch.LongTensor(I)] C = [torch.LongTensor(C)] O = [torch.LongTensor(O)] (sens, I, C, O) = [PaddedSequence.autopad(to_enc, batch_first=True, padding_value=unk_idx) for to_enc in [sentences, I, C, O]] (sens, I, C, O) = (sens.cuda(), I.cuda(), C.cuda(), O.cuda()) preds = model(sens, I, C, O) pred = preds[0].data.tolist()[0] return pred def reformat_ICO(Xy, model, inference_vectorizer, sent_out_prefix=''): all_str = [] for prompt in Xy: sen = prompt['sentence_span'] I = prompt['I'] C = prompt['C'] O = prompt['O'] new_article = [] best_pred = 0 back_up_sentence = [] for s in sen: pred = get_preds_ICO(model, s[0], I, C, O, inference_vectorizer) if (best_pred < pred): best_pred = pred back_up_sentence = s[0] if (pred > 0.5): str_v = ' '.join([inference_vectorizer.idx_to_str[word] for word in s[0]]) all_str.append([prompt['p_id'], pred, str_v]) new_article = np.append(new_article, s[0]) if (len(new_article) == 0): new_article = back_up_sentence prompt['article'] = new_article def scan_net_preprocess_ICO(train_Xy, val_Xy, test_Xy, inference_vectorizer): sn = load_model_scan_ICO(inference_vectorizer) reformat_ICO(train_Xy, sn, inference_vectorizer, sent_out_prefix='train') reformat_ICO(val_Xy, sn, inference_vectorizer, sent_out_prefix='val') reformat_ICO(test_Xy, sn, inference_vectorizer, sent_out_prefix='test') return (train_Xy, val_Xy, test_Xy) return scan_net_preprocess_ICO
def get_module_dependencies(module_fname): with open(os.path.join(PATH_TO_TRANFORMERS, module_fname), 'r', encoding='utf-8') as f: content = f.read() module_parts = module_fname.split(os.path.sep) imported_modules = [] relative_imports = re.findall('from\\s+(\\.+\\S+)\\s+import\\s+([^\\n]+)\\n', content) relative_imports = [mod for (mod, imp) in relative_imports if ('# tests_ignore' not in imp)] for imp in relative_imports: level = 0 while imp.startswith('.'): imp = imp[1:] level += 1 if (len(imp) > 0): dep_parts = (module_parts[:(len(module_parts) - level)] + imp.split('.')) else: dep_parts = (module_parts[:(len(module_parts) - level)] + ['__init__.py']) imported_module = os.path.sep.join(dep_parts) if (not imported_module.endswith('transformers/__init__.py')): imported_modules.append(imported_module) direct_imports = re.findall('from\\s+transformers\\.(\\S+)\\s+import\\s+([^\\n]+)\\n', content) direct_imports = [mod for (mod, imp) in direct_imports if ('# tests_ignore' not in imp)] for imp in direct_imports: import_parts = imp.split('.') dep_parts = (['src', 'transformers'] + import_parts) imported_modules.append(os.path.sep.join(dep_parts)) dependencies = [] for imported_module in imported_modules: if os.path.isfile(os.path.join(PATH_TO_TRANFORMERS, f'{imported_module}.py')): dependencies.append(f'{imported_module}.py') elif (os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, imported_module)) and os.path.isfile(os.path.sep.join([PATH_TO_TRANFORMERS, imported_module, '__init__.py']))): dependencies.append(os.path.sep.join([imported_module, '__init__.py'])) return dependencies
class QCircuitImage(): def __init__(self, qregs, cregs, ops, scale, style=None, plot_barriers=True, reverse_bits=False): if (not HAS_PYLATEX): raise ImportError('The latex and latex_source drawers need pylatexenc installed. Run "pip install pylatexenc" before using the latex or latex_source drawers.') self._style = _qcstyle.BWStyle() if style: if isinstance(style, dict): self._style.set_style(style) elif isinstance(style, str): with open(style, 'r') as infile: dic = json.load(infile) self._style.set_style(dic) self.ops = ops self.scale = scale self.qregs = {} self.cregs = {} self.ordered_regs = [] self.img_regs = {} self._latex = [] self.img_depth = 0 self.img_width = 0 self.sum_column_widths = 0 self.sum_row_heights = 0 self.column_separation = 0.5 self.row_separation = 0.0 self.has_box = False self.has_target = False self.reverse_bits = reverse_bits self.plot_barriers = plot_barriers self.qregs = collections.OrderedDict(_get_register_specs(qregs)) self.qubit_list = qregs self.ordered_regs = (qregs + cregs) self.cregs = collections.OrderedDict(_get_register_specs(cregs)) self.clbit_list = cregs self.img_regs = {bit: ind for (ind, bit) in enumerate(self.ordered_regs)} self.img_width = len(self.img_regs) self.wire_type = {} for (key, value) in self.ordered_regs: self.wire_type[(key, value)] = (key in self.cregs.keys()) def latex(self, aliases=None): self._initialize_latex_array(aliases) self._build_latex_array(aliases) header_1 = '% \\documentclass[preview]{standalone}\n% If the image is too large to fit on this documentclass use\n\\documentclass[draft]{beamer}\n' beamer_line = '\\usepackage[size=custom,height=%d,width=%d,scale=%.1f]{beamerposter}\n' header_2 = '% instead and customize the height and width (in cm) to fit.\n% Large images may run out of memory quickly.\n% To fix this use the LuaLaTeX compiler, which dynamically\n% allocates memory.\n\\usepackage[braket, qm]{qcircuit}\n\\usepackage{amsmath}\n\\pdfmapfile{+sansmathaccent.map}\n% \\usepackage[landscape]{geometry}\n% Comment out the above line if using the beamer documentclass.\n\\begin{document}\n\\begin{equation*}' qcircuit_line = '\n \\Qcircuit =%.1fem =%.1fem !R {\n' output = io.StringIO() output.write(header_1) output.write(('%% img_width = %d, img_depth = %d\n' % (self.img_width, self.img_depth))) output.write((beamer_line % self._get_beamer_page())) output.write(header_2) output.write((qcircuit_line % (self.column_separation, self.row_separation))) for i in range(self.img_width): output.write('\t \t') for j in range((self.img_depth + 1)): cell_str = self._latex[i][j] if ('barrier' in cell_str): output.write(cell_str) else: cell_str = re.sub('[-+]?\\d*\\.\\d{2,}|\\d{2,}', _truncate_float, cell_str) output.write(cell_str) if (j != self.img_depth): output.write(' & ') else: output.write(('\\\\' + '\n')) output.write('\t }\n') output.write('\\end{equation*}\n\n') output.write('\\end{document}') contents = output.getvalue() output.close() return contents def _initialize_latex_array(self, aliases=None): del aliases (self.img_depth, self.sum_column_widths) = self._get_image_depth() self.sum_row_heights = self.img_width if self.has_box: self.row_separation = 0.0 elif self.has_target: self.row_separation = 0.2 else: self.row_separation = 1.0 self._latex = [[('\\cw' if self.wire_type[self.ordered_regs[j]] else '\\qw') for _ in range((self.img_depth + 1))] for j in range(self.img_width)] self._latex.append(([' '] * (self.img_depth + 1))) for i in range(self.img_width): if self.wire_type[self.ordered_regs[i]]: self._latex[i][0] = ((((('\\lstick{' + self.ordered_regs[i][0].name) + '_{') + str(self.ordered_regs[i][1])) + '}') + ': 0}') else: self._latex[i][0] = ((((('\\lstick{' + self.ordered_regs[i][0].name) + '_{') + str(self.ordered_regs[i][1])) + '}') + ': \\ket{0}}') def _get_image_depth(self): max_column_widths = [] for layer in self.ops: for op in layer: boxed_gates = ['u0', 'u1', 'u2', 'u3', 'x', 'y', 'z', 'h', 's', 'sdg', 't', 'tdg', 'rx', 'ry', 'rz', 'ch', 'cy', 'crz', 'cu3', 'id'] target_gates = ['cx', 'ccx'] if (op.name in boxed_gates): self.has_box = True if (op.name in target_gates): self.has_target = True for layer in self.ops: current_max = 0 for op in layer: arg_str_len = 0 for arg in op.op.params: arg_str = re.sub('[-+]?\\d*\\.\\d{2,}|\\d{2,}', _truncate_float, str(arg)) arg_str_len += len(arg_str) current_max = max(arg_str_len, current_max) max_column_widths.append(current_max) columns = 2 columns += sum([(2 if (nd.name == 'cu1') else 1) for layer in self.ops for nd in layer]) sum_column_widths = sum(((1 + (v / 3)) for v in max_column_widths)) return (columns, (math.ceil(sum_column_widths) + 4)) def _get_beamer_page(self): PIL_limit = 40000 beamer_limit = 550 aspect_ratio = (self.sum_row_heights / self.sum_column_widths) margin_factor = 1.5 height = min((self.sum_row_heights * margin_factor), beamer_limit) width = min((self.sum_column_widths * margin_factor), beamer_limit) if ((height * width) > PIL_limit): height = min(np.sqrt((PIL_limit * aspect_ratio)), beamer_limit) width = min(np.sqrt((PIL_limit / aspect_ratio)), beamer_limit) height = max(height, 10) width = max(width, 10) return (height, width, self.scale) def _get_mask(self, creg_name): mask = 0 for (index, cbit) in enumerate(self.clbit_list): if (creg_name == cbit[0]): mask |= (1 << index) return mask def _build_latex_array(self, aliases=None): if aliases: qregdata = {} for q in aliases.values(): if (q[0] not in qregdata): qregdata[q[0]] = (q[1] + 1) elif (qregdata[q[0]] < (q[1] + 1)): qregdata[q[0]] = (q[1] + 1) else: qregdata = self.qregs column = 1 for layer in self.ops: num_cols_used = 1 for op in layer: if op.condition: mask = self._get_mask(op.condition[0]) cl_reg = self.clbit_list[self._ffs(mask)] if_reg = cl_reg[0] pos_2 = self.img_regs[cl_reg] if_value = format(op.condition[1], 'b').zfill(self.cregs[if_reg])[::(- 1)] if (op.name not in ['measure', 'barrier', 'snapshot', 'load', 'save', 'noise']): nm = op.name qarglist = op.qargs if (aliases is not None): qarglist = map((lambda x: aliases[x]), qarglist) if (len(qarglist) == 1): pos_1 = self.img_regs[(qarglist[0].register, qarglist[0].index)] if op.condition: mask = self._get_mask(op.condition[0]) cl_reg = self.clbit_list[self._ffs(mask)] if_reg = cl_reg[0] pos_2 = self.img_regs[cl_reg] if (nm == 'x'): self._latex[pos_1][column] = '\\gate{X}' elif (nm == 'y'): self._latex[pos_1][column] = '\\gate{Y}' elif (nm == 'z'): self._latex[pos_1][column] = '\\gate{Z}' elif (nm == 'h'): self._latex[pos_1][column] = '\\gate{H}' elif (nm == 's'): self._latex[pos_1][column] = '\\gate{S}' elif (nm == 'sdg'): self._latex[pos_1][column] = '\\gate{S^\\dag}' elif (nm == 't'): self._latex[pos_1][column] = '\\gate{T}' elif (nm == 'tdg'): self._latex[pos_1][column] = '\\gate{T^\\dag}' elif (nm == 'u0'): self._latex[pos_1][column] = ('\\gate{U_0(%s)}' % op.op.params[0]) elif (nm == 'u1'): self._latex[pos_1][column] = ('\\gate{U_1(%s)}' % op.op.params[0]) elif (nm == 'u2'): self._latex[pos_1][column] = ('\\gate{U_2\\left(%s,%s\\right)}' % (op.op.params[0], op.op.params[1])) elif (nm == 'u3'): self._latex[pos_1][column] = ('\\gate{U_3(%s,%s,%s)}' % (op.op.params[0], op.op.params[1], op.op.params[2])) elif (nm == 'rx'): self._latex[pos_1][column] = ('\\gate{R_x(%s)}' % op.op.params[0]) elif (nm == 'ry'): self._latex[pos_1][column] = ('\\gate{R_y(%s)}' % op.op.params[0]) elif (nm == 'rz'): self._latex[pos_1][column] = ('\\gate{R_z(%s)}' % op.op.params[0]) else: self._latex[pos_1][column] = ('\\gate{%s}' % utf8tolatex(nm)) gap = (pos_2 - pos_1) for i in range(self.cregs[if_reg]): if (if_value[i] == '1'): self._latex[(pos_2 + i)][column] = (('\\control \\cw \\cwx[-' + str(gap)) + ']') gap = 1 else: self._latex[(pos_2 + i)][column] = (('\\controlo \\cw \\cwx[-' + str(gap)) + ']') gap = 1 elif (nm == 'x'): self._latex[pos_1][column] = '\\gate{X}' elif (nm == 'y'): self._latex[pos_1][column] = '\\gate{Y}' elif (nm == 'z'): self._latex[pos_1][column] = '\\gate{Z}' elif (nm == 'h'): self._latex[pos_1][column] = '\\gate{H}' elif (nm == 's'): self._latex[pos_1][column] = '\\gate{S}' elif (nm == 'sdg'): self._latex[pos_1][column] = '\\gate{S^\\dag}' elif (nm == 't'): self._latex[pos_1][column] = '\\gate{T}' elif (nm == 'tdg'): self._latex[pos_1][column] = '\\gate{T^\\dag}' elif (nm == 'u0'): self._latex[pos_1][column] = ('\\gate{U_0(%s)}' % op.op.params[0]) elif (nm == 'u1'): self._latex[pos_1][column] = ('\\gate{U_1(%s)}' % op.op.params[0]) elif (nm == 'u2'): self._latex[pos_1][column] = ('\\gate{U_2\\left(%s,%s\\right)}' % (op.op.params[0], op.op.params[1])) elif (nm == 'u3'): self._latex[pos_1][column] = ('\\gate{U_3(%s,%s,%s)}' % (op.op.params[0], op.op.params[1], op.op.params[2])) elif (nm == 'rx'): self._latex[pos_1][column] = ('\\gate{R_x(%s)}' % op.op.params[0]) elif (nm == 'ry'): self._latex[pos_1][column] = ('\\gate{R_y(%s)}' % op.op.params[0]) elif (nm == 'rz'): self._latex[pos_1][column] = ('\\gate{R_z(%s)}' % op.op.params[0]) elif (nm == 'reset'): self._latex[pos_1][column] = '\\push{\\rule{.6em}{0em}\\ket{0}\\rule{.2em}{0em}} \\qw' else: self._latex[pos_1][column] = ('\\gate{%s}' % utf8tolatex(nm)) elif (len(qarglist) == 2): pos_1 = self.img_regs[(qarglist[0].register, qarglist[0].index)] pos_2 = self.img_regs[(qarglist[1].register, qarglist[1].index)] if op.condition: pos_3 = self.img_regs[(if_reg, 0)] temp = [pos_1, pos_2, pos_3] temp.sort(key=int) bottom = temp[1] gap = (pos_3 - bottom) for i in range(self.cregs[if_reg]): if (if_value[i] == '1'): self._latex[(pos_3 + i)][column] = (('\\control \\cw \\cwx[-' + str(gap)) + ']') gap = 1 else: self._latex[(pos_3 + i)][column] = (('\\controlo \\cw \\cwx[-' + str(gap)) + ']') gap = 1 if (nm == 'cx'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\targ' elif (nm == 'cz'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\control\\qw' elif (nm == 'cy'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\gate{Y}' elif (nm == 'ch'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\gate{H}' elif (nm == 'swap'): self._latex[pos_1][column] = '\\qswap' self._latex[pos_2][column] = (('\\qswap \\qwx[' + str((pos_1 - pos_2))) + ']') elif (nm == 'crz'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = ('\\gate{R_z(%s)}' % op.op.params[0]) elif (nm == 'cu1'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\control \\qw' self._latex[min(pos_1, pos_2)][(column + 1)] = ('\\dstick{%s}\\qw' % op.op.params[0]) self._latex[max(pos_1, pos_2)][(column + 1)] = '\\qw' num_cols_used = 2 elif (nm == 'cu3'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = ('\\gate{U_3(%s,%s,%s)}' % (op.op.params[0], op.op.params[1], op.op.params[2])) else: temp = [pos_1, pos_2] temp.sort(key=int) if (nm == 'cx'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\targ' elif (nm == 'cz'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\control\\qw' elif (nm == 'cy'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\gate{Y}' elif (nm == 'ch'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\gate{H}' elif (nm == 'swap'): self._latex[pos_1][column] = '\\qswap' self._latex[pos_2][column] = (('\\qswap \\qwx[' + str((pos_1 - pos_2))) + ']') elif (nm == 'crz'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = ('\\gate{R_z(%s)}' % op.op.params[0]) elif (nm == 'cu1'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\control \\qw' self._latex[min(pos_1, pos_2)][(column + 1)] = ('\\dstick{%s}\\qw' % op.op.params[0]) self._latex[max(pos_1, pos_2)][(column + 1)] = '\\qw' num_cols_used = 2 elif (nm == 'cu3'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = ('\\gate{U_3(%s,%s,%s)}' % (op.op.params[0], op.op.params[1], op.op.params[2])) else: start_pos = min([pos_1, pos_2]) stop_pos = max([pos_1, pos_2]) if ((stop_pos - start_pos) >= 2): delta = (stop_pos - start_pos) self._latex[start_pos][column] = ('\\multigate{%s}{%s}' % (delta, utf8tolatex(nm))) for i_pos in range((start_pos + 1), (stop_pos + 1)): self._latex[i_pos][column] = ('\\ghost{%s}' % utf8tolatex(nm)) else: self._latex[start_pos][column] = ('\\multigate{1}{%s}' % utf8tolatex(nm)) self._latex[stop_pos][column] = ('\\ghost{%s}' % utf8tolatex(nm)) elif (len(qarglist) == 3): pos_1 = self.img_regs[(qarglist[0].register, qarglist[0].index)] pos_2 = self.img_regs[(qarglist[1].register, qarglist[1].index)] pos_3 = self.img_regs[(qarglist[2].register, qarglist[2].index)] if op.condition: pos_4 = self.img_regs[(if_reg, 0)] temp = [pos_1, pos_2, pos_3, pos_4] temp.sort(key=int) bottom = temp[2] prev_column = [x[(column - 1)] for x in self._latex] for (item, prev_entry) in enumerate(prev_column): if ('barrier' in prev_entry): span = re.search('barrier{(.*)}', prev_entry) if (span and any(((i in temp) for i in range(item, int(span.group(1)))))): self._latex[item][(column - 1)] = prev_entry.replace('\\barrier{', '\\barrier[-0.65em]{') gap = (pos_4 - bottom) for i in range(self.cregs[if_reg]): if (if_value[i] == '1'): self._latex[(pos_4 + i)][column] = (('\\control \\cw \\cwx[-' + str(gap)) + ']') gap = 1 else: self._latex[(pos_4 + i)][column] = (('\\controlo \\cw \\cwx[-' + str(gap)) + ']') gap = 1 if (nm == 'ccx'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = (('\\ctrl{' + str((pos_3 - pos_2))) + '}') self._latex[pos_3][column] = '\\targ' if (nm == 'cswap'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\qswap' self._latex[pos_3][column] = (('\\qswap \\qwx[' + str((pos_2 - pos_3))) + ']') else: temp = [pos_1, pos_2, pos_3] temp.sort(key=int) prev_column = [x[(column - 1)] for x in self._latex] for (item, prev_entry) in enumerate(prev_column): if ('barrier' in prev_entry): span = re.search('barrier{(.*)}', prev_entry) if (span and any(((i in temp) for i in range(item, int(span.group(1)))))): self._latex[item][(column - 1)] = prev_entry.replace('\\barrier{', '\\barrier[-0.65em]{') if (nm == 'ccx'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = (('\\ctrl{' + str((pos_3 - pos_2))) + '}') self._latex[pos_3][column] = '\\targ' elif (nm == 'cswap'): self._latex[pos_1][column] = (('\\ctrl{' + str((pos_2 - pos_1))) + '}') self._latex[pos_2][column] = '\\qswap' self._latex[pos_3][column] = (('\\qswap \\qwx[' + str((pos_2 - pos_3))) + ']') else: start_pos = min([pos_1, pos_2, pos_3]) stop_pos = max([pos_1, pos_2, pos_3]) if ((stop_pos - start_pos) >= 3): delta = (stop_pos - start_pos) self._latex[start_pos][column] = ('\\multigate{%s}{%s}' % (delta, utf8tolatex(nm))) for i_pos in range((start_pos + 1), (stop_pos + 1)): self._latex[i_pos][column] = ('\\ghost{%s}' % utf8tolatex(nm)) else: self._latex[pos_1][column] = ('\\multigate{2}{%s}' % utf8tolatex(nm)) self._latex[pos_2][column] = ('\\ghost{%s}' % utf8tolatex(nm)) self._latex[pos_3][column] = ('\\ghost{%s}' % utf8tolatex(nm)) elif (len(qarglist) > 3): nbits = len(qarglist) pos_array = [self.img_regs[(qarglist[0][0], qarglist[0][1])]] for i in range(1, nbits): pos_array.append(self.img_regs[(qarglist[i][0], qarglist[i][1])]) pos_start = min(pos_array) pos_stop = max(pos_array) delta = (pos_stop - pos_start) self._latex[pos_start][column] = ('\\multigate{%s}{%s}' % ((nbits - 1), utf8tolatex(nm))) for pos in range((pos_start + 1), (pos_stop + 1)): self._latex[pos][column] = ('\\ghost{%s}' % utf8tolatex(nm)) elif (op.name == 'measure'): if ((len(op.cargs) != 1) or (len(op.qargs) != 1) or op.op.params): raise exceptions.VisualizationError('bad operation record') if op.condition: raise exceptions.VisualizationError('If controlled measures currently not supported.') qname = op.qargs[0].register qindex = op.qargs[0].index cname = op.cargs[0].register cindex = op.cargs[0].index if aliases: newq = aliases[(qname, qindex)] qname = newq[0] qindex = newq[1] pos_1 = self.img_regs[(qname, qindex)] pos_2 = self.img_regs[(cname, cindex)] try: self._latex[pos_1][column] = '\\meter' prev_column = [x[(column - 1)] for x in self._latex] for (item, prev_entry) in enumerate(prev_column): if ('barrier' in prev_entry): span = re.search('barrier{(.*)}', prev_entry) if (span and (((item + int(span.group(1))) - pos_1) >= 0)): self._latex[item][(column - 1)] = prev_entry.replace('\\barrier{', '\\barrier[-1.15em]{') self._latex[pos_2][column] = (('\\cw \\cwx[-' + str((pos_2 - pos_1))) + ']') except Exception as e: raise exceptions.VisualizationError(('Error during Latex building: %s' % str(e))) elif (op.name in ['barrier', 'snapshot', 'load', 'save', 'noise']): if self.plot_barriers: qarglist = op.qargs indexes = [self._get_qubit_index(x) for x in qarglist] start_bit = self.qubit_list[min(indexes)] if (aliases is not None): qarglist = map((lambda x: aliases[x]), qarglist) start = self.img_regs[start_bit] span = (len(op.qargs) - 1) self._latex[start][column] = (('\\qw \\barrier{' + str(span)) + '}') else: raise exceptions.VisualizationError('bad node data') column += num_cols_used def _get_qubit_index(self, qubit): for (i, bit) in enumerate(self.qubit_list): if (qubit == bit): qindex = i break else: raise exceptions.VisualizationError('unable to find bit for operation') return qindex def _ffs(self, mask): origin = (mask & (- mask)).bit_length() return (origin - 1)
class PerplexStatistics(): def __init__(self): def _item(x): return x.item() def _exp_item(x): return torch.exp(x).item() self.stat = {'ppx': (WeightedSum('ppx', 0, _exp_item), '', ''), 'ppx_doc': (WeightedSum('ppx_doc', 0, _exp_item), '', ''), 'loss': (WeightedSum('loss', 0, _item), 'loss', 'doc_count'), 'loss_rec': (WeightedSum('loss_rec', 0, _item), 'rec_loss', 'doc_count'), 'kld': (WeightedSum('kld', 0, _item), 'kld', 'doc_count'), 'penalty': (WeightedSum('penalty', 0, _item), 'penalty', 'doc_count'), 'penalty_mean': (WeightedSum('penalty_mean', 0, _item), 'penalty_mean', 'doc_count'), 'penalty_var': (WeightedSum('penalty_var', 0, _item), 'penalty_var', 'doc_count')} def add(self, stat): with torch.no_grad(): data_batch = stat.pop('data') weight = {'word_count': data_batch.sum(), 'doc_count': len(data_batch)} for (s, k, w) in self.stat.values(): if (s.name == 'ppx_doc'): s.add(((stat['minus_elbo'] / data_batch.sum(dim=(- 1))).sum() / weight['doc_count']), weight['doc_count']) elif (s.name == 'ppx'): s.add((stat['minus_elbo'].sum() / weight['word_count']), weight['word_count']) else: if (k not in stat): continue s.add(stat[k].mean(), weight[w]) return self def description(self, prefix=''): return ' | '.join(['{} {:.5f}'.format((prefix + k), v) for (k, v) in self.get_dict().items()]) def get_value(self, k): return self.stat[k][0].get() def get_dict(self): r = {} for k in self.stat.keys(): t = self.stat[k][0].get() if (t != 0): r[k] = t return r
def network_load(filename=None, path=None, device='cpu', load_weight=True): import os if path: filedir = ((path + '/') + filename) else: path = './' file = filename.split('.')[0] origin_path = os.getcwd() os.chdir(((path + '/') + file)) if os.path.exists(filename): with open(filename, 'r') as f: data = f.read() if data.startswith('{'): data = json.loads(data) else: data = yaml.load(data, Loader=yaml.FullLoader) elif os.path.exists((filename + '.yml')): with open((filename + '.yml'), 'r') as f: data = yaml.load(f, Loader=yaml.FullLoader) elif os.path.exists((filename + '.json')): with open((filename + '.json'), 'r') as f: data = json.load(f) elif os.path.exists((filename + '.txt')): with open((filename + '.txt'), 'r') as f: data = f.read() if data.startswith('{'): data = json.loads(data) else: data = yaml.load(data, Loader=yaml.FullLoader) else: raise ValueError(("file %s doesn't exist, please check the filename" % filename)) net = ReloadedNetwork(net_data=data, device=device, load_weight=load_weight) os.chdir(origin_path) return net
class CtcCriterionConfig(FairseqDataclass): zero_infinity: bool = field(default=False, metadata={'help': 'zero inf loss when source length <= target length'}) sentence_avg: bool = II('optimization.sentence_avg') post_process: str = field(default='letter', metadata={'help': 'how to post process predictions into words. can be letter, wordpiece, BPE symbols, etc. See fairseq.data.data_utils.post_process() for full list of options'}) wer_kenlm_model: Optional[str] = field(default=None, metadata={'help': 'if this is provided, use kenlm to compute wer (along with other wer_* args)'}) wer_lexicon: Optional[str] = field(default=None, metadata={'help': 'lexicon to use with wer_kenlm_model'}) wer_lm_weight: float = field(default=2.0, metadata={'help': 'lm weight to use with wer_kenlm_model'}) wer_word_score: float = field(default=(- 1.0), metadata={'help': 'lm word score to use with wer_kenlm_model'}) wer_sil_weight: float = field(default=0, metadata={'help': 'lm word score to use with wer_kenlm_model'}) wer_args: Optional[str] = field(default=None, metadata={'help': 'DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)'})
class ActivationQuantizer(): def __init__(self, module, p=1, update_step=1000, bits=8, method='histogram', clamp_threshold=5): self.module = module self.p = p self.update_step = update_step self.counter = 0 self.bits = bits self.method = method self.clamp_threshold = clamp_threshold self.handle = None self.register_hook() def register_hook(self): def quantize_hook(module, x, y): if ((self.counter % self.update_step) == 0): self.scale = None self.zero_point = None self.counter += 1 p = (self.p if self.module.training else 1) (y_q, self.scale, self.zero_point) = emulate_int(y.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point) mask = torch.zeros_like(y) mask.bernoulli_((1 - p)) noise = (y_q - y).masked_fill(mask.bool(), 0) clamp_low = ((- self.scale) * self.zero_point) clamp_high = (self.scale * (((2 ** self.bits) - 1) - self.zero_point)) return (torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach()) self.handle = self.module.register_forward_hook(quantize_hook)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False): try: import tensorflow as tf import torch from tensorflow.python.keras import backend as K except ImportError: logger.error('Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.') raise if (tf_inputs is None): tf_inputs = tf_model.dummy_inputs if (tf_inputs is not None): tf_model(tf_inputs, training=False) old_keys = [] new_keys = [] for key in pt_state_dict.keys(): new_key = None if ('gamma' in key): new_key = key.replace('gamma', 'weight') if ('beta' in key): new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for (old_key, new_key) in zip(old_keys, new_keys): pt_state_dict[new_key] = pt_state_dict.pop(old_key) start_prefix_to_remove = '' if (not any((s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()))): start_prefix_to_remove = (tf_model.base_model_prefix + '.') symbolic_weights = (tf_model.trainable_weights + tf_model.non_trainable_weights) tf_loaded_numel = 0 weight_value_tuples = [] all_pytorch_weights = set(list(pt_state_dict.keys())) missing_keys = [] for symbolic_weight in symbolic_weights: sw_name = symbolic_weight.name (name, transpose) = convert_tf_weight_name_to_pt_weight_name(sw_name, start_prefix_to_remove=start_prefix_to_remove) if (name not in pt_state_dict): if allow_missing_keys: missing_keys.append(name) continue elif (tf_model._keys_to_ignore_on_load_missing is not None): if any(((re.search(pat, name) is not None) for pat in tf_model._keys_to_ignore_on_load_missing)): continue raise AttributeError('{} not found in PyTorch model'.format(name)) array = pt_state_dict[name].numpy() if transpose: array = numpy.transpose(array) if (len(symbolic_weight.shape) < len(array.shape)): array = numpy.squeeze(array) elif (len(symbolic_weight.shape) > len(array.shape)): array = numpy.expand_dims(array, axis=0) if (list(symbolic_weight.shape) != list(array.shape)): try: array = numpy.reshape(array, symbolic_weight.shape) except AssertionError as e: e.args += (symbolic_weight.shape, array.shape) raise e try: assert (list(symbolic_weight.shape) == list(array.shape)) except AssertionError as e: e.args += (symbolic_weight.shape, array.shape) raise e tf_loaded_numel += array.size weight_value_tuples.append((symbolic_weight, array)) all_pytorch_weights.discard(name) K.batch_set_value(weight_value_tuples) if (tf_inputs is not None): tf_model(tf_inputs, training=False) logger.info('Loaded {:,} parameters in the TF 2.0 model.'.format(tf_loaded_numel)) unexpected_keys = list(all_pytorch_weights) if (tf_model._keys_to_ignore_on_load_missing is not None): for pat in tf_model._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if (re.search(pat, k) is None)] if (tf_model._keys_to_ignore_on_load_unexpected is not None): for pat in tf_model._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if (re.search(pat, k) is None)] if (len(unexpected_keys) > 0): logger.warning(f'''Some weights of the PyTorch model were not used when initializing the TF 2.0 model {tf_model.__class__.__name__}: {unexpected_keys} - This IS expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model trained on another task or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model).''') else: logger.warning(f'''All PyTorch model weights were used when initializing {tf_model.__class__.__name__}. ''') if (len(missing_keys) > 0): logger.warning(f'''Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the PyTorch model and are newly initialized: {missing_keys} You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''') else: logger.warning(f'''All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model. If your task is similar to the task the model of the checkpoint was trained on, you can already use {tf_model.__class__.__name__} for predictions without further training.''') return tf_model
class WordSequence(nn.Module): def __init__(self, data): super(WordSequence, self).__init__() print(('build word sequence feature extractor: %s...' % data.word_feature_extractor)) self.gpu = data.HP_gpu self.use_char = data.use_char self.droplstm = nn.Dropout(data.HP_dropout) self.bilstm_flag = data.HP_bilstm self.lstm_layer = data.HP_lstm_layer self.wordrep = WordRep(data) self.input_size = data.word_emb_dim self.feature_num = data.feature_num if self.use_char: self.input_size += data.HP_char_hidden_dim if (data.char_feature_extractor == 'ALL'): self.input_size += data.HP_char_hidden_dim for idx in range(self.feature_num): self.input_size += data.feature_emb_dims[idx] if self.bilstm_flag: lstm_hidden = (data.HP_hidden_dim // 2) else: lstm_hidden = data.HP_hidden_dim self.word_feature_extractor = data.word_feature_extractor if (self.word_feature_extractor == 'GRU'): self.lstm = nn.GRU(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag) elif (self.word_feature_extractor == 'LSTM'): self.lstm = nn.LSTM(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag) elif (self.word_feature_extractor == 'CNN'): self.word2cnn = nn.Linear(self.input_size, data.HP_hidden_dim) self.cnn_layer = data.HP_cnn_layer print('CNN layer: ', self.cnn_layer) self.cnn_list = nn.ModuleList() self.cnn_drop_list = nn.ModuleList() self.cnn_batchnorm_list = nn.ModuleList() kernel = 3 pad_size = int(((kernel - 1) / 2)) for idx in range(self.cnn_layer): self.cnn_list.append(nn.Conv1d(data.HP_hidden_dim, data.HP_hidden_dim, kernel_size=kernel, padding=pad_size)) self.cnn_drop_list.append(nn.Dropout(data.HP_dropout)) self.cnn_batchnorm_list.append(nn.BatchNorm1d(data.HP_hidden_dim)) self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size) if self.gpu: self.droplstm = self.droplstm.cuda() self.hidden2tag = self.hidden2tag.cuda() if (self.word_feature_extractor == 'CNN'): self.word2cnn = self.word2cnn.cuda() for idx in range(self.cnn_layer): self.cnn_list[idx] = self.cnn_list[idx].cuda() self.cnn_drop_list[idx] = self.cnn_drop_list[idx].cuda() self.cnn_batchnorm_list[idx] = self.cnn_batchnorm_list[idx].cuda() else: self.lstm = self.lstm.cuda() def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover): word_represent = self.wordrep(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover) if (self.word_feature_extractor == 'CNN'): batch_size = word_inputs.size(0) word_in = torch.tanh(self.word2cnn(word_represent)).transpose(2, 1).contiguous() for idx in range(self.cnn_layer): if (idx == 0): cnn_feature = F.relu(self.cnn_list[idx](word_in)) else: cnn_feature = F.relu(self.cnn_list[idx](cnn_feature)) cnn_feature = self.cnn_drop_list[idx](cnn_feature) if (batch_size > 1): cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature) feature_out = cnn_feature.transpose(2, 1).contiguous() else: packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True) hidden = None (lstm_out, hidden) = self.lstm(packed_words, hidden) (lstm_out, _) = pad_packed_sequence(lstm_out) feature_out = self.droplstm(lstm_out.transpose(1, 0)) outputs = self.hidden2tag(feature_out) return outputs def sentence_representation(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover): word_represent = self.wordrep(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover) batch_size = word_inputs.size(0) if (self.word_feature_extractor == 'CNN'): word_in = torch.tanh(self.word2cnn(word_represent)).transpose(2, 1).contiguous() for idx in range(self.cnn_layer): if (idx == 0): cnn_feature = F.relu(self.cnn_list[idx](word_in)) else: cnn_feature = F.relu(self.cnn_list[idx](cnn_feature)) cnn_feature = self.cnn_drop_list[idx](cnn_feature) if (batch_size > 1): cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature) feature_out = F.max_pool1d(cnn_feature, cnn_feature.size(2)).view(batch_size, (- 1)) else: packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True) hidden = None (lstm_out, hidden) = self.lstm(packed_words, hidden) feature_out = hidden[0].transpose(1, 0).contiguous().view(batch_size, (- 1)) feature_list = [feature_out] for idx in range(self.feature_num): feature_list.append(self.feature_embeddings[idx](feature_inputs[idx])) final_feature = torch.cat(feature_list, 1) outputs = self.hidden2tag(self.droplstm(final_feature)) return outputs
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, isshape=False, modalbn=1): super(Bottleneck, self).__init__() self.isshape = isshape self.modalbn = modalbn assert ((modalbn == 1) or (modalbn == 2) or (modalbn == 3)) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) if isshape: self.bn1_shape = nn.BatchNorm2d(planes) if (modalbn == 2): self.bn1_ir = nn.BatchNorm2d(planes) if (modalbn == 3): self.bn1_ir = nn.BatchNorm2d(planes) self.bn1_modalx = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) if isshape: self.bn2_shape = nn.BatchNorm2d(planes) if (modalbn == 2): self.bn2_ir = nn.BatchNorm2d(planes) if (modalbn == 3): self.bn2_ir = nn.BatchNorm2d(planes) self.bn2_modalx = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * 4)) if isshape: self.bn3_shape = nn.BatchNorm2d((planes * 4)) if (modalbn == 2): self.bn3_ir = nn.BatchNorm2d((planes * 4)) if (modalbn == 3): self.bn3_ir = nn.BatchNorm2d((planes * 4)) self.bn3_modalx = nn.BatchNorm2d((planes * 4)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if (downsample is not None): if isshape: self.dsbn_shape = nn.BatchNorm2d(downsample[1].weight.shape[0]) if (modalbn == 2): self.dsbn_ir = nn.BatchNorm2d(downsample[1].weight.shape[0]) if (modalbn == 3): self.dsbn_ir = nn.BatchNorm2d(downsample[1].weight.shape[0]) self.dsbn_modalx = nn.BatchNorm2d(downsample[1].weight.shape[0]) self.stride = stride def forward(self, x, modal=0): if (modal == 0): bbn1 = self.bn1 bbn2 = self.bn2 bbn3 = self.bn3 if (self.downsample is not None): dsbn = self.downsample[1] elif (modal == 1): bbn1 = self.bn1_ir bbn2 = self.bn2_ir bbn3 = self.bn3_ir if (self.downsample is not None): dsbn = self.dsbn_ir elif (modal == 2): bbn1 = self.bn1_modalx bbn2 = self.bn2_modalx bbn3 = self.bn3_modalx if (self.downsample is not None): dsbn = self.dsbn_modalx elif (modal == 3): assert (self.isshape == True) bbn1 = self.bn1_shape bbn2 = self.bn2_shape bbn3 = self.bn3_shape if (self.downsample is not None): dsbn = self.dsbn_shape residual = x out = self.conv1(x) out = bbn1(out) out = self.relu(out) out = self.conv2(out) out = bbn2(out) out = self.relu(out) out = self.conv3(out) out = bbn3(out) if (self.downsample is not None): residual = dsbn(self.downsample[0](x)) out += residual outt = F.relu(out) return outt
class MViT(Backbone): def __init__(self, img_size=224, patch_kernel=(7, 7), patch_stride=(4, 4), patch_padding=(3, 3), in_chans=3, embed_dim=96, depth=16, num_heads=1, last_block_indexes=(0, 2, 11, 15), qkv_pool_kernel=(3, 3), adaptive_kv_stride=4, adaptive_window_size=56, residual_pooling=True, mlp_ratio=4.0, qkv_bias=True, drop_path_rate=0.0, norm_layer=nn.LayerNorm, act_layer=nn.GELU, use_abs_pos=False, use_rel_pos=True, rel_pos_zero_init=True, use_act_checkpoint=False, pretrain_img_size=224, pretrain_use_cls_token=True, out_features=('scale2', 'scale3', 'scale4', 'scale5')): super().__init__() self.pretrain_use_cls_token = pretrain_use_cls_token self.patch_embed = PatchEmbed(kernel_size=patch_kernel, stride=patch_stride, padding=patch_padding, in_chans=in_chans, embed_dim=embed_dim) if use_abs_pos: num_patches = ((pretrain_img_size // patch_stride[0]) * (pretrain_img_size // patch_stride[1])) num_positions = ((num_patches + 1) if pretrain_use_cls_token else num_patches) self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) else: self.pos_embed = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] dim_out = embed_dim stride_kv = adaptive_kv_stride window_size = adaptive_window_size input_size = ((img_size // patch_stride[0]), (img_size // patch_stride[1])) stage = 2 stride = patch_stride[0] self._out_feature_strides = {} self._out_feature_channels = {} self.blocks = nn.ModuleList() for i in range(depth): if ((i == last_block_indexes[1]) or (i == last_block_indexes[2])): stride_kv_ = (stride_kv * 2) else: stride_kv_ = stride_kv window_size_ = (0 if (i in last_block_indexes[1:]) else window_size) block = MultiScaleBlock(dim=embed_dim, dim_out=dim_out, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=dpr[i], norm_layer=norm_layer, qkv_pool_kernel=qkv_pool_kernel, stride_q=(2 if ((i - 1) in last_block_indexes) else 1), stride_kv=stride_kv_, residual_pooling=residual_pooling, window_size=window_size_, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, input_size=input_size) if use_act_checkpoint: from fairscale.nn.checkpoint import checkpoint_wrapper block = checkpoint_wrapper(block) self.blocks.append(block) embed_dim = dim_out if (i in last_block_indexes): name = f'scale{stage}' if (name in out_features): self._out_feature_channels[name] = dim_out self._out_feature_strides[name] = stride self.add_module(f'{name}_norm', norm_layer(dim_out)) dim_out *= 2 num_heads *= 2 stride_kv = max((stride_kv // 2), 1) stride *= 2 stage += 1 if ((i - 1) in last_block_indexes): window_size = (window_size // 2) input_size = [(s // 2) for s in input_size] self._out_features = out_features self._last_block_indexes = last_block_indexes if (self.pos_embed is not None): nn.init.trunc_normal_(self.pos_embed, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self, x): x = self.patch_embed(x) if (self.pos_embed is not None): x = (x + get_abs_pos(self.pos_embed, self.pretrain_use_cls_token, x.shape[1:3])) outputs = {} stage = 2 for (i, blk) in enumerate(self.blocks): x = blk(x) if (i in self._last_block_indexes): name = f'scale{stage}' if (name in self._out_features): x_out = getattr(self, f'{name}_norm')(x) outputs[name] = x_out.permute(0, 3, 1, 2) stage += 1 return outputs
def create_train_dataloader(opt): opt = copy.deepcopy(opt) opt.no_flip = False opt.serial_batches = False opt.phase = 'train' opt.meta_path = opt.calibration_meta_path opt.load_in_memory = opt.calibration_load_in_memory opt.max_dataset_size = 512 dataloader = CustomDatasetDataLoader(opt) dataloader = dataloader.load_data() return dataloader
def test_fun_weak(model, loss_fn, dataloader, dataloader_neg, batch_tnf, use_cuda=True, triplet=False, tps_grid_regularity_loss=0): model.eval() test_loss = 0 if (dataloader_neg is not None): dataloader_neg_iter = iter(dataloader_neg) for (batch_idx, batch) in enumerate(dataloader): batch = batch_tnf(batch) if (dataloader_neg is not None): batch_neg = next(dataloader_neg_iter) batch_neg = batch_tnf(batch_neg) (theta_pos, corr_pos, theta_neg, corr_neg) = model(batch, batch_neg) inliers_pos = loss_fn(theta_pos, corr_pos) inliers_neg = loss_fn(theta_neg, corr_neg) loss = torch.sum((inliers_neg - inliers_pos)) elif ((dataloader_neg is None) and (triplet == False)): (theta, corr) = model(batch) loss = loss_fn(theta, corr) elif ((dataloader_neg is None) and (triplet == True)): (theta_pos, corr_pos, theta_neg, corr_neg) = model(batch, triplet=True) inliers_pos = loss_fn(theta_pos, corr_pos) inliers_neg = loss_fn(theta_neg, corr_neg) loss = torch.sum((inliers_neg - inliers_pos)) test_loss += loss.data.cpu().numpy()[0] test_loss /= len(dataloader) print('Test set: Average loss: {:.4f}'.format(test_loss)) return test_loss
def combine_results(): results = pd.DataFrame(columns=['noise_rel', 'grid_param', 'err_min', 'grid', 'err', 'psnr', 'ssim']) for idx in range(len(noise_rels)): results_cur = pd.read_pickle(os.path.join(save_path, '{}{:.2f}.pkl'.format(file_name, noise_rels[idx]))) results.loc[idx] = results_cur.loc[0] os.makedirs(save_path, exist_ok=True) results.to_pickle(os.path.join(save_path, (file_name + 'all.pkl'))) return results
def standard_embed(nvar, topdim, pols, verbose_level=0): from phcpy.phcpy2c3 import py2c_embed_standard_system from phcpy.interface import store_standard_system, load_standard_system store_standard_system(pols, nbvar=nvar) py2c_embed_standard_system(topdim, verbose_level) return load_standard_system()
class TestCheckInvalidLossHook(TestCase): def test_after_train_iter(self): n = 50 hook = CheckInvalidLossHook(n) runner = Mock() runner.logger = Mock() runner.logger.info = Mock() runner.iter = 10 outputs = dict(loss=torch.LongTensor([2])) hook.after_train_iter(runner, 10, outputs=outputs) outputs = dict(loss=torch.tensor(float('nan'))) hook.after_train_iter(runner, 10, outputs=outputs) outputs = dict(loss=torch.tensor(float('inf'))) hook.after_train_iter(runner, 10, outputs=outputs) runner.iter = (n - 1) outputs = dict(loss=torch.LongTensor([2])) hook.after_train_iter(runner, (n - 1), outputs=outputs) outputs = dict(loss=torch.tensor(float('nan'))) with self.assertRaises(AssertionError): hook.after_train_iter(runner, (n - 1), outputs=outputs) outputs = dict(loss=torch.tensor(float('inf'))) with self.assertRaises(AssertionError): hook.after_train_iter(runner, (n - 1), outputs=outputs)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): n = len(annotation_lines) if ((n == 0) or (batch_size <= 0)): return None return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
def test_optional_import(): (has_pytest, pyt) = optional_import('pytest') assert has_pytest assert (pyt == pytest)
def maybe_append_new_line(code): lines = code.split('\n') if (lines[0] in ['py', 'python']): last_line = lines[(- 1)] lines.pop() lines.append(('\n' + last_line)) return '\n'.join(lines)
('A2CAgent') class AdvantageActorCriticAgent(SyncRunningAgent, ActorCriticAgent): def __init__(self, obs_spec: Spec, act_spec: Spec, model_fn: ModelBuilder=None, policy_cls: PolicyType=None, sess_mgr: SessionManager=None, optimizer: tf.train.Optimizer=None, n_envs=4, value_coef=DEFAULTS['value_coef'], entropy_coef=DEFAULTS['entropy_coef'], traj_len=DEFAULTS['traj_len'], batch_sz=DEFAULTS['batch_sz'], discount=DEFAULTS['discount'], gae_lambda=DEFAULTS['gae_lambda'], clip_rewards=DEFAULTS['clip_rewards'], clip_grads_norm=DEFAULTS['clip_grads_norm'], normalize_returns=DEFAULTS['normalize_returns'], normalize_advantages=DEFAULTS['normalize_advantages']): kwargs = {k: v for (k, v) in locals().items() if ((k in DEFAULTS) and (DEFAULTS[k] != v))} SyncRunningAgent.__init__(self, n_envs) ActorCriticAgent.__init__(self, obs_spec, act_spec, sess_mgr=sess_mgr, **kwargs) self.logger = StreamLogger(n_envs=n_envs, log_freq=10, sess_mgr=self.sess_mgr) def loss_fn(self): adv = tf.placeholder(tf.float32, [None], name='advantages') returns = tf.placeholder(tf.float32, [None], name='returns') policy_loss = (- tf.reduce_mean((self.policy.logli * adv))) value_loss = (tf.reduce_mean(((self.value - returns) ** 2)) * self.value_coef) entropy_loss = (tf.reduce_mean(self.policy.entropy) * self.entropy_coef) full_loss = ((policy_loss + value_loss) - entropy_loss) return (full_loss, [policy_loss, value_loss, entropy_loss], [adv, returns])