code
stringlengths
101
5.91M
def create_animation(file_path: str, sim_context: SimContext, figsize: Optional[Union[(list, tuple)]]=None, dt: float=30, dpi: int=120, plot_limits: Union[(str, Sequence[Sequence[float]], PlayerName)]='auto') -> None: logger.info('Creating animation...') sim_viz: SimRenderer = SimRenderer(sim_context, figsize=figsize) time_begin = sim_context.log.get_init_time() time_end = sim_context.log.get_last_time() if (not (time_begin < time_end)): raise ValueError(f'Begin time {time_begin} cannot be greater than end time {time_end}') ax: Axes = sim_viz.commonroad_renderer.ax fig = ax.figure fig.tight_layout() ax.set_aspect('equal') (states, actions, extra, texts, goals) = ({}, {}, {}, {}, {}) (traj_lines, traj_points) = ({}, {}) history = {} plot_wheels: bool = True plot_ligths: bool = True def _get_list() -> list[Artist]: return (((((list(chain.from_iterable(states.values())) + list(chain.from_iterable(actions.values()))) + list(extra.values())) + list(traj_lines.values())) + list(traj_points.values())) + list(texts.values())) def init_plot() -> Iterable[Artist]: ax.clear() with sim_viz.plot_arena(ax=ax): init_log_entry: Mapping[(PlayerName, LogEntry)] = sim_context.log.at_interp(time_begin) for (pname, plog) in init_log_entry.items(): lights_colors: LightsColors = get_lights_colors_from_cmds(init_log_entry[pname].commands, t=0) (states[pname], actions[pname]) = sim_viz.plot_player(ax=ax, state=plog.state, command=plog.commands, lights_colors=lights_colors, player_name=pname, alpha=0.8, plot_wheels=plot_wheels, plot_lights=plot_ligths) if plog.extra: try: (trajectories, tcolors) = unzip(plog.extra) (traj_lines[pname], traj_points[pname]) = sim_viz.plot_trajectories(ax=ax, player_name=pname, trajectories=list(trajectories), colors=list(tcolors)) except: logger.debug('Cannot plot extra', extra=type(plog.extra)) adjust_axes_limits(ax=ax, plot_limits=plot_limits, players_states={p: log_entry.state for (p, log_entry) in init_log_entry.items()}) texts['time'] = ax.text(0.02, 0.96, '', transform=ax.transAxes, bbox=dict(facecolor='lightgreen', alpha=0.5), zorder=ZOrders.TIME_TEXT) return _get_list() def update_plot(frame: int=0) -> Iterable[Artist]: t: float = ((frame * dt) / 1000.0) log_at_t: Mapping[(PlayerName, LogEntry)] = sim_context.log.at_interp(t) for (pname, box_handle) in states.items(): lights_colors: LightsColors = get_lights_colors_from_cmds(log_at_t[pname].commands, t=t) (states[pname], actions[pname]) = sim_viz.plot_player(ax=ax, player_name=pname, state=log_at_t[pname].state, command=log_at_t[pname].commands, lights_colors=lights_colors, model_poly=box_handle, lights_patches=actions[pname], plot_wheels=plot_wheels, plot_lights=plot_ligths) if log_at_t[pname].extra: try: (trajectories, tcolors) = unzip(log_at_t[pname].extra) (traj_lines[pname], traj_points[pname]) = sim_viz.plot_trajectories(ax=ax, player_name=pname, trajectories=list(trajectories), traj_lines=traj_lines[pname], traj_points=traj_points[pname], colors=list(tcolors)) except: pass if (pname in sim_context.missions): goal_box = (goals[pname] if (pname in goals) else None) goals[pname] = sim_viz.plot_timevarying_goals(ax=ax, player_name=pname, goal_box=goal_box, t=t) adjust_axes_limits(ax=ax, plot_limits=plot_limits, players_states={p: log_entry.state for (p, log_entry) in log_at_t.items()}) texts['time'].set_text(f't = {t:.1f}s') texts['time'].set_transform(ax.transAxes) return _get_list() dt = min(1000.0, dt) frame_count: int = int((float((time_end - time_begin)) // (dt / 1000.0))) plt.ioff() anim = FuncAnimation(fig=fig, func=update_plot, init_func=init_plot, frames=frame_count, blit=True, interval=dt) if (not any([file_path.endswith('.mp4'), file_path.endswith('.gif'), file_path.endswith('.avi')])): file_path += '.mp4' fps = int(math.ceil((1000.0 / dt))) interval_seconds = (dt / 1000.0) with tqdm(total=frame_count, unit='frame') as t: anim.save(file_path, dpi=dpi, writer='ffmpeg', fps=fps, progress_callback=(lambda *_: t.update(1))) ax.clear()
_module() class GFL(SingleStageDetector): def __init__(self, backbone: ConfigType, neck: ConfigType, bbox_head: ConfigType, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None: super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg)
class RandomVerticalFlip(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target=None, rois=None): if (random.random() < self.prob): image = F.vflip(image) if (target is not None): target = target.transpose(1) if (rois is not None): rois = rois.transpose(1) return (image, target, rois)
def lightgbm_eval_metric_user_defined(preds, dtrain): target = dtrain.get_label() weight = dtrain.get_weight() metric = UserDefinedEvalMetric() return ('user_defined_metric', metric(target, preds, sample_weight=weight), False)
class ArdisDataset(torch.utils.data.Dataset): def __init__(self, transform=None, train=True): if train: X = np.loadtxt('../data/ARDIS_DATASET_IV/ARDIS_train_2828.csv', dtype='float') Y = np.loadtxt('../data/ARDIS_DATASET_IV/ARDIS_train_labels.csv', dtype='float') else: X = np.loadtxt('../data/ARDIS_DATASET_IV/ARDIS_test_2828.csv', dtype='float') Y = np.loadtxt('../data/ARDIS_DATASET_IV/ARDIS_test_labels.csv', dtype='float') Y = np.argmax(Y, axis=1) X = X[(Y == 7)] self.X = X self.transform = transform self.attack_target = 1 def __len__(self): return len(self.X) def __getitem__(self, index): img = self.X[index] img = np.reshape(img, (28, 28)) print(img.shape) img = Image.fromarray(img) target = int(self.attack_target) if (self.transform is not None): img = self.transform(img) return (img, target)
def get_confusion_matrix(prediction: np.ndarray, reference: np.ndarray, roi_mask: np.ndarray) -> Tuple[(int, int, int, int)]: assert (prediction.shape == reference.shape), "'prediction' and 'reference' must have the same shape" tp = int(((roi_mask * (prediction != 0)) * (reference != 0)).sum()) fp = int(((roi_mask * (prediction != 0)) * (reference == 0)).sum()) tn = int(((roi_mask * (prediction == 0)) * (reference == 0)).sum()) fn = int(((roi_mask * (prediction == 0)) * (reference != 0)).sum()) return (tp, fp, tn, fn)
class MetaLoader(object): def __init__(self, loaders, accum_steps=1, distributed=False): assert isinstance(loaders, dict) self.name2loader = {} self.name2iter = {} self.sampling_pools = [] for (n, l) in loaders.items(): if isinstance(l, tuple): (l, r) = l elif isinstance(l, DataLoader): r = 1 else: raise ValueError() self.name2loader[n] = l self.name2iter[n] = iter(l) self.sampling_pools.extend(([n] * r)) self.accum_steps = accum_steps self.distributed = distributed self.step = 0 def __iter__(self): task = self.sampling_pools[0] while True: if ((self.step % self.accum_steps) == 0): task = random.choice(self.sampling_pools) if self.distributed: task = any_broadcast(task, 0) self.step += 1 iter_ = self.name2iter[task] try: batch = next(iter_) except StopIteration: iter_ = iter(self.name2loader[task]) batch = next(iter_) self.name2iter[task] = iter_ (yield (task, batch))
def track(opt): result_root = (opt.output_root if (opt.output_root != '') else '.') mkdir_if_missing(result_root) cfg_dict = parse_model_cfg(opt.cfg) opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])] timer = Timer() accs = [] n_frame = 0 logger.info('Starting tracking...') dataloader = datasets.LoadVideo(opt.input_video, opt.img_size) result_filename = os.path.join(result_root, 'results.txt') frame_rate = dataloader.frame_rate frame_dir = (None if (opt.output_format == 'text') else osp.join(result_root, 'frame')) try: eval_seq(opt, dataloader, 'mot', result_filename, save_dir=frame_dir, show_image=False, frame_rate=frame_rate) except Exception as e: logger.info(e) if (opt.output_format == 'video'): output_video_path = osp.join(result_root, 'result.mp4') cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(osp.join(result_root, 'frame'), output_video_path) os.system(cmd_str)
class nnUNetTrainerV2_ResencUNet_DA3_BN(nnUNetTrainerV2_ResencUNet_DA3): def initialize_network(self): if self.threeD: cfg = get_default_network_config(3, None, norm_type='bn') else: cfg = get_default_network_config(1, None, norm_type='bn') stage_plans = self.plans['plans_per_stage'][self.stage] conv_kernel_sizes = stage_plans['conv_kernel_sizes'] blocks_per_stage_encoder = stage_plans['num_blocks_encoder'] blocks_per_stage_decoder = stage_plans['num_blocks_decoder'] pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes'] self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2, pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes, blocks_per_stage_decoder, True, False, 320, InitWeights_He(0.01)) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
class Link(xmlr.Object): def __init__(self, name=None, visual=None, inertial=None, collision=None, origin=None): self.name = name self.visual = visual self.inertial = inertial self.collision = collision self.origin = origin
def gumbel_softmax(logits, temperature, hard=False): y = gumbel_softmax_sample(logits, temperature) if hard: y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype) y = (tf.stop_gradient((y_hard - y)) + y) return y
def get_detection_dataset_dicts(dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None): assert len(dataset_names) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] for (dataset_name, dicts) in zip(dataset_names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if (proposal_files is not None): assert (len(dataset_names) == len(proposal_files)) dataset_dicts = [load_proposals_into_dataset(dataset_i_dicts, proposal_file) for (dataset_i_dicts, proposal_file) in zip(dataset_dicts, proposal_files)] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = ('annotations' in dataset_dicts[0]) if (filter_empty and has_instances): dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if ((min_keypoints > 0) and has_instances): dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if has_instances: try: class_names = MetadataCatalog.get(dataset_names[0]).thing_classes check_metadata_consistency('thing_classes', dataset_names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: pass return dataset_dicts
def compute_conv2d_ds(in_h, in_w, in_ch, out_ch, k_w, k_h): pw = compute_conv2d_pw(in_h, in_w, in_ch, out_ch) dw = compute_conv2d_dw(in_h, in_w, in_ch, k_w, k_h) return (pw + dw)
def printm(): process = psutil.Process(os.getpid()) print(('Gen RAM Free: ' + humanize.naturalsize(psutil.virtual_memory().available)), (' | Proc size: ' + humanize.naturalsize(process.memory_info().rss))) print('GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB'.format(gpu.memoryFree, gpu.memoryUsed, (gpu.memoryUtil * 100), gpu.memoryTotal))
def is_method_overridden(method, base_class, derived_class): assert isinstance(base_class, type), "base_class doesn't accept instance, Please pass class instead." if (not isinstance(derived_class, type)): derived_class = derived_class.__class__ base_method = getattr(base_class, method) derived_method = getattr(derived_class, method) return (derived_method != base_method)
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): def __init__(self, optimizer: torch.optim.Optimizer, milestones: List[int], gamma: float=0.1, warmup_factor: float=0., warmup_epochs: int=5, warmup_method: str='linear', last_epoch: int=(- 1)): if (not (list(milestones) == sorted(milestones))): raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones) self.milestones = milestones self.gamma = gamma self.warmup_factor = warmup_factor self.warmup_epochs = warmup_epochs self.warmup_method = warmup_method super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_epochs, self.warmup_factor) return [((base_lr * warmup_factor) * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs] def _compute_values(self) -> List[float]: return self.get_lr()
class CostarWorld(AbstractWorld): def __init__(self, reward=NullReward(), namespace='/costar', observe=None, robot_config=None, lfd=None, tf_listener=None, use_default_pose=False, *args, **kwargs): super(CostarWorld, self).__init__(reward, *args, **kwargs) self.trajectories = {} self.objs = {} self.trajectory_data = {} self.models = {} self.traj_pubs = {} self.traj_data_pubs = {} self.skill_pubs = {} self.tf_listener = tf_listener if (self.tf_listener is None): self.tf_listener = tf.TransformListener() self.observe = observe self.predicates = [] self.namespace = namespace self.observation = {} self.object_by_class = {} self.objects_to_track = [] if (robot_config is None): raise RuntimeError('Must provide a robot config!') elif (not isinstance(robot_config, list)): robot_config = [robot_config] self.tf_pub = tf.TransformBroadcaster() for (i, robot) in enumerate(robot_config): name = robot['name'] if (robot['q0'] is not None): s0 = CostarState(i, q=robot['q0'], dq=np.zeros_like(robot['q0'])) else: s0 = CostarState(self, i, None, None) self.addActor(CostarActor(robot, state=s0, dynamics=self.getT(robot), policy=NullPolicy())) self.gripper_status_listeners = {} self.plan_pub = rospy.Publisher(join(self.namespace, 'plan'), PoseArray, queue_size=1000) self.base_link = self.actors[0].base_link if (not lfd): self.lfd = LfD(self.actors[0].config) else: self.lfd = lfd def _update_environment(self): if (self.observe is not None): self.observe(self) def addTrajectories(self, name, trajectories, data, objs): self.trajectories[name] = trajectories self.objs[name] = objs self._preprocessData(data) self.trajectory_data[name] = data if (not (name in self.traj_pubs)): self.traj_pubs[name] = rospy.Publisher(join(self.namespace, 'trajectories', name), PoseArray, queue_size=1000) self.traj_data_pubs[name] = rospy.Publisher(join(self.namespace, 'trajectory_data', name), PoseArray, queue_size=1000) def addGripperStatusListener(self, robot_name, listener): self.gripper_status_listeners[robot_name] = listener def execute(self, path, actor_id=0): for node in path: if (node.action is None): rospy.logwarn('Done execution.') break policy = node.action.policy condition = node.action.condition actor = self.actors[actor__id].state while (not condition(self, self.actors[actor_id].state)): pass def getT(self, robot_config, *args, **kwargs): if (self.observe is None): return SimulatedDynamics() else: return self.observe.dynamics(self, robot_config) def visualize(self): for (name, trajs) in self.trajectories.items(): msg = PoseArray() msg.header.frame_id = self.base_link for traj in trajs: for (_, pose, _, _) in traj: msg.poses.append(pose) self.traj_pubs[name].publish(msg) for (name, data) in self.trajectory_data.items(): msg = self._dataToPose(data) msg.header.frame_id = self.base_link self.traj_data_pubs[name].publish(msg) for actor in self.actors: msg = JointState(name=actor.joints, position=actor.state.q, velocity=actor.state.dq) def visualizePlan(self, plan): actor = self.actors[plan.actor_id] msg = PoseArray() msg.header.frame_id = actor.base_link if (plan.actor_id is not 0): raise NotImplementedError('kdl kinematics only created for actor 0 right now') for node in plan.nodes: for (state, action) in node.traj: T = self.lfd.kdl_kin.forward(state.q) msg.poses.append(pm.toMsg(pm.fromMatrix(T))) self.plan_pub.publish(msg) def debugLfD(self, *args, **kwargs): self.lfd.debug(self, *args, **kwargs) def update(self, maxt=0.01): self.observation = {} for obj in self.objects_to_track: try: self.tf_listener.waitForTransform(self.base_link, obj, rospy.Time.now(), rospy.Duration(maxt)) (trans, rot) = self.tf_listener.lookupTransform(self.base_link, obj, rospy.Time(0.0)) self.observation[obj] = pm.fromTf((trans, rot)) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException, tf2.TransformException) as e: self.observation[obj] = None for actor in self.actors: actor.state = actor.getState() actor.state.t = rospy.Time.now().to_sec() def getPose(self, obj): return self.observation[obj] def addObjects(self, objects): for (obj_class, objs) in objects.items(): for obj_name in objs: self.addObject(obj_name, obj_class) def addObject(self, obj_name, obj_class, *args): if (obj_class not in self.object_by_class): self.object_by_class[obj_class] = [obj_name] else: self.object_by_class[obj_class].append(obj_name) self.objects_to_track.append(obj_name) def hasObject(self, obj): return (obj in self.objects_to_track) def getObjects(self, obj_class): if (obj_class in self.object_by_class): return self.object_by_class[obj_class] else: return None def _dataToPose(self, data): return PoseArray() def _preprocessData(self, data): pass def makeRewardFunction(self, name): if (name in self.models.keys()): model = self.models[name] self.reward = DemoReward(gmm=model) else: LOGGER.warning(('model "%s" does not exist' % name)) def zeroAction(self, actor_id): dq = np.zeros((self.actors[actor_id].dof,)) return CostarAction(q=self.actors[actor_id].state.q, dq=dq) def fitTrajectories(self): self.models = self.lfd.train(self.trajectories, self.trajectory_data, self.objs) def getArgs(self): return self.object_classes def loadModels(self, project): self.lfd.load(project) def saveModels(self, project): self.lfd.save(project)
def extract_program(result: str, last_only=True): if last_only: return extract_program_simple(result, last_only=True) program = '' temp_lines = [] start = False output_start = False first_snippet = True error_in_output = False for line in result.split('\n'): if line.startswith('```python'): if (not last_only): if (not error_in_output): cleaned_lines = remove_prints_and_comments(temp_lines) cleaned_lines = [('from sympy import *' if ('from sympy import' in line) else line) for line in cleaned_lines] if cleaned_lines: if (not first_snippet): program += '\n# \n' else: first_snippet = False program += ('\n'.join(cleaned_lines) + '\n') temp_lines = [] error_in_output = False else: program = '' start = True output_start = False elif line.startswith('```output'): output_start = True start = False elif line.startswith('```'): output_start = False start = False elif start: temp_lines.append(line) elif (output_start and ('Error:' in line)): error_in_output = True temp_lines = [('from sympy import *' if ('from sympy import' in line) else line) for line in temp_lines] if ((not error_in_output) and temp_lines): if (not first_snippet): program += '\n# \n' program += ('\n'.join(temp_lines) + '\n') return program
def create_parameter(self, attr, shape, dtype, is_bias=False, default_initializer=None): mp_state = mixed_precision_global_state() is_half = ((isinstance(dtype, str) and (dtype == 'float16')) or (isinstance(dtype, core.VarDesc.VarType) and (dtype == core.VarDesc.VarType.FP16))) if (is_half and (mp_state is not None)): dtype = 'float32' param = self._create_parameter(attr, shape, dtype, is_bias, default_initializer) if ((not is_half) or (mp_state is None)): return param param16 = self.main_program.current_block().create_var(name=(param.name + '.fp16'), dtype='float16', type=param.type, persistable=False) self.append_op(type='cast', inputs={'X': [param]}, outputs={'Out': [param16]}, attrs={'in_dtype': param.dtype, 'out_dtype': param16.dtype}) return param16
def custom_name_func(func, param_num, param): param_based_name = parameterized.to_safe_name('_'.join((str(x) for x in param.args))) return f'{func.__name__}_{param_based_name}'
class DifferentiableSGD(): def __init__(self, module, lr=0.001): self.module = module self.lr = lr def step(self): memo = set() def update(module): for child in module.children(): if (child not in memo): memo.add(child) update(child) params = list(module.named_parameters()) for (name, param) in params: if ('.' not in name): if (param.grad is None): continue new_param = param.add((- self.lr), param.grad) del module._parameters[name] setattr(module, name, new_param) module._parameters[name] = new_param update(self.module) def zero_grad(self): for param in self.module.parameters(): if (param.grad is not None): param.grad.detach_() param.grad.zero_()
(scope='module') def synaptic_hidden_reset_zero_instance(): return snn.Synaptic(alpha=0.5, beta=0.5, init_hidden=True, reset_mechanism='zero')
def get_runtimes(configs): runtime_list = [] for (model_name, model_config) in configs[YAMLKeyword.models].items(): subgraphs = model_config[YAMLKeyword.subgraphs] default_rt = (model_config[YAMLKeyword.runtime] if (YAMLKeyword.runtime in model_config) else RuntimeType.cpu) for (graph_name, graph_config) in subgraphs.items(): model_runtime = graph_config.get(YAMLKeyword.runtime, default_rt) runtime_list.append(model_runtime) return runtime_list
def test_D(g1): assert (g1.D_v[(0, 0)].item() == 2) assert (g1.D_v[(1, 1)].item() == 1) assert (g1.D_v_neg_1[(1, 1)].item() == 1) assert (pytest.approx(g1.D_v_neg_1[(3, 3)].item()) == 0) assert (g1.D_v_neg_1_2[(1, 1)].item() == 1) assert (pytest.approx(g1.D_v_neg_1_2[(3, 3)].item()) == 0) g1.add_extra_selfloop() assert (g1.D_v[(0, 0)].item() == 3) assert (g1.D_v[(1, 1)].item() == 2) assert (g1.D_v_neg_1[(1, 1)].item() == 0.5) assert (g1.D_v_neg_1[(3, 3)].item() == 1) assert (pytest.approx(g1.D_v_neg_1_2[(1, 1)].item()) == 0.) assert (g1.D_v_neg_1_2[(3, 3)].item() == 1)
class MergerConfig(object): TYPE_NONE = 0 TYPE_MASKED = 1 TYPE_FACE_AVATAR = 2 TYPE_IMAGE = 3 TYPE_IMAGE_WITH_LANDMARKS = 4 def __init__(self, type=0, sharpen_mode=0, blursharpen_amount=0, **kwargs): self.type = type self.sharpen_dict = {0: 'None', 1: 'box', 2: 'gaussian'} self.sharpen_mode = sharpen_mode self.blursharpen_amount = blursharpen_amount def copy(self): return copy.copy(self) def ask_settings(self): s = 'Choose sharpen mode: \n' for key in self.sharpen_dict.keys(): s += f'''({key}) {self.sharpen_dict[key]} ''' io.log_info(s) self.sharpen_mode = io.input_int('', 0, valid_list=self.sharpen_dict.keys(), help_message='Enhance details by applying sharpen filter.') if (self.sharpen_mode != 0): self.blursharpen_amount = np.clip(io.input_int('Choose blur/sharpen amount', 0, add_info='-100..100'), (- 100), 100) def toggle_sharpen_mode(self): a = list(self.sharpen_dict.keys()) self.sharpen_mode = a[((a.index(self.sharpen_mode) + 1) % len(a))] def add_blursharpen_amount(self, diff): self.blursharpen_amount = np.clip((self.blursharpen_amount + diff), (- 100), 100) def get_config(self): d = self.__dict__.copy() d.pop('type') return d def __eq__(self, other): if isinstance(other, MergerConfig): return ((self.sharpen_mode == other.sharpen_mode) and (self.blursharpen_amount == other.blursharpen_amount)) return False def to_string(self, filename): r = '' r += f'''sharpen_mode : {self.sharpen_dict[self.sharpen_mode]} ''' r += f'''blursharpen_amount : {self.blursharpen_amount} ''' return r
class RunningMeter(object): def __init__(self, name, val=None, smooth=0.99): self._name = name self._sm = smooth self._val = val def __call__(self, value): self._val = (value if (self._val is None) else ((value * (1 - self._sm)) + (self._val * self._sm))) def __str__(self): return f'{self._name}: {self._val:.4f}' def val(self): return self._val def name(self): return self._name
def get_runner_status(target_runners, token): offline_runners = [] cmd = f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}" output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) o = output.stdout.decode('utf-8') status = json.loads(o) runners = status['runners'] for runner in runners: if (runner['name'] in target_runners): if (runner['status'] == 'offline'): offline_runners.append(runner) with open('offline_runners.txt', 'w') as fp: fp.write(json.dumps(offline_runners)) if (len(offline_runners) > 0): failed = '\n'.join(offline_runners) raise ValueError(f'''The following runners are offline: {failed}''')
class _NonLocalBlockND(nn.Module): def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True): super(_NonLocalBlockND, self).__init__() assert (dimension in [1, 2, 3]) self.dimension = dimension self.sub_sample = sub_sample self.in_channels = in_channels self.inter_channels = inter_channels if (self.inter_channels is None): self.inter_channels = (in_channels // 2) if (self.inter_channels == 0): self.inter_channels = 1 if (dimension == 3): conv_nd = nn.Conv3d max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) bn = nn.BatchNorm3d elif (dimension == 2): conv_nd = nn.Conv2d max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) bn = nn.BatchNorm2d else: conv_nd = nn.Conv1d max_pool_layer = nn.MaxPool1d(kernel_size=2) bn = nn.BatchNorm1d self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) if bn_layer: self.W = nn.Sequential(conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0), bn(self.in_channels)) nn.init.constant_(self.W[1].weight, 0) nn.init.constant_(self.W[1].bias, 0) else: self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0) nn.init.constant_(self.W.weight, 0) nn.init.constant_(self.W.bias, 0) self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) self.concat_project = nn.Sequential(nn.Conv2d((self.inter_channels * 2), 1, 1, 1, 0, bias=False), nn.ReLU()) if sub_sample: self.g = nn.Sequential(self.g, max_pool_layer) self.phi = nn.Sequential(self.phi, max_pool_layer) def forward(self, x, return_nl_map=False): batch_size = x.size(0) g_x = self.g(x).view(batch_size, self.inter_channels, (- 1)) g_x = g_x.permute(0, 2, 1) theta_x = self.theta(x).view(batch_size, self.inter_channels, (- 1), 1) phi_x = self.phi(x).view(batch_size, self.inter_channels, 1, (- 1)) h = theta_x.size(2) w = phi_x.size(3) theta_x = theta_x.repeat(1, 1, 1, w) phi_x = phi_x.repeat(1, 1, h, 1) concat_feature = torch.cat([theta_x, phi_x], dim=1) f = self.concat_project(concat_feature) (b, _, h, w) = f.size() f = f.view(b, h, w) N = f.size((- 1)) f_div_C = (f / N) y = torch.matmul(f_div_C, g_x) y = y.permute(0, 2, 1).contiguous() y = y.view(batch_size, self.inter_channels, *x.size()[2:]) W_y = self.W(y) z = (W_y + x) if return_nl_map: return (z, f_div_C) return z
def compute_rouge_L(pred, refs, beta=1.2): prec = [] rec = [] for ref in refs: lcs = my_lcs(pred, ref) prec.append(((lcs / float(len(pred))) if (len(pred) != 0) else 0.0)) rec.append(((lcs / float(len(ref))) if (len(ref) != 0) else 0.0)) prec_max = max(prec) rec_max = max(rec) if ((prec_max != 0) and (rec_max != 0)): score = ((((1 + (beta ** 2)) * prec_max) * rec_max) / float((rec_max + ((beta ** 2) * prec_max)))) else: score = 0.0 return score
def test_guided_anchor(): from mmdet.models import build_head if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' bbox_head = dict(type='GARetinaHead', num_classes=8, in_channels=4, stacked_convs=1, feat_channels=4, approx_anchor_generator=dict(type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128])) ga_retina_head = build_head(bbox_head) assert (ga_retina_head.approx_anchor_generator is not None) featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)] expected_approxs = [torch.Tensor([[(- 22.6274), (- 11.3137), 22.6274, 11.3137], [(- 28.5088), (- 14.2544), 28.5088, 14.2544], [(- 35.9188), (- 17.9594), 35.9188, 17.9594], [(- 16.0), (- 16.0), 16.0, 16.0], [(- 20.1587), (- 20.1587), 20.1587, 20.1587], [(- 25.3984), (- 25.3984), 25.3984, 25.3984], [(- 11.3137), (- 22.6274), 11.3137, 22.6274], [(- 14.2544), (- 28.5088), 14.2544, 28.5088], [(- 17.9594), (- 35.9188), 17.9594, 35.9188]]), torch.Tensor([[(- 45.2548), (- 22.6274), 45.2548, 22.6274], [(- 57.0175), (- 28.5088), 57.0175, 28.5088], [(- 71.8376), (- 35.9188), 71.8376, 35.9188], [(- 32.0), (- 32.0), 32.0, 32.0], [(- 40.3175), (- 40.3175), 40.3175, 40.3175], [(- 50.7968), (- 50.7968), 50.7968, 50.7968], [(- 22.6274), (- 45.2548), 22.6274, 45.2548], [(- 28.5088), (- 57.0175), 28.5088, 57.0175], [(- 35.9188), (- 71.8376), 35.9188, 71.8376]]), torch.Tensor([[(- 90.5097), (- 45.2548), 90.5097, 45.2548], [(- 114.035), (- 57.0175), 114.035, 57.0175], [(- 143.6751), (- 71.8376), 143.6751, 71.8376], [(- 64.0), (- 64.0), 64.0, 64.0], [(- 80.6349), (- 80.6349), 80.6349, 80.6349], [(- 101.5937), (- 101.5937), 101.5937, 101.5937], [(- 45.2548), (- 90.5097), 45.2548, 90.5097], [(- 57.0175), (- 114.035), 57.0175, 114.035], [(- 71.8376), (- 143.6751), 71.8376, 143.6751]]), torch.Tensor([[(- 181.0193), (- 90.5097), 181.0193, 90.5097], [(- 228.0701), (- 114.035), 228.0701, 114.035], [(- 287.3503), (- 143.6751), 287.3503, 143.6751], [(- 128.0), (- 128.0), 128.0, 128.0], [(- 161.2699), (- 161.2699), 161.2699, 161.2699], [(- 203.1873), (- 203.1873), 203.1873, 203.1873], [(- 90.5097), (- 181.0193), 90.5097, 181.0193], [(- 114.035), (- 228.0701), 114.035, 228.0701], [(- 143.6751), (- 287.3503), 143.6751, 287.3503]]), torch.Tensor([[(- 362.0387), (- 181.0193), 362.0387, 181.0193], [(- 456.1401), (- 228.0701), 456.1401, 228.0701], [(- 574.7006), (- 287.3503), 574.7006, 287.3503], [(- 256.0), (- 256.0), 256.0, 256.0], [(- 322.5398), (- 322.5398), 322.5398, 322.5398], [(- 406.3747), (- 406.3747), 406.3747, 406.3747], [(- 181.0193), (- 362.0387), 181.0193, 362.0387], [(- 228.0701), (- 456.1401), 228.0701, 456.1401], [(- 287.3503), (- 574.7006), 287.3503, 574.7006]])] approxs = ga_retina_head.approx_anchor_generator.base_anchors for (i, base_anchor) in enumerate(approxs): assert base_anchor.allclose(expected_approxs[i]) expected_valid_pixels = [136800, 34200, 8550, 2223, 630] multi_level_valid_flags = ga_retina_head.approx_anchor_generator.valid_flags(featmap_sizes, (800, 1216), device) for (i, single_level_valid_flag) in enumerate(multi_level_valid_flags): assert (single_level_valid_flag.sum() == expected_valid_pixels[i]) assert (ga_retina_head.approx_anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]) squares = ga_retina_head.square_anchor_generator.grid_anchors(featmap_sizes, device) assert (len(squares) == 5) expected_squares = [torch.Tensor([[(- 16.0), (- 16.0), 16.0, 16.0]]), torch.Tensor([[(- 32.0), (- 32.0), 32.0, 32]]), torch.Tensor([[(- 64.0), (- 64.0), 64.0, 64.0]]), torch.Tensor([[(- 128.0), (- 128.0), 128.0, 128.0]]), torch.Tensor([[(- 256.0), (- 256.0), 256.0, 256.0]])] squares = ga_retina_head.square_anchor_generator.base_anchors for (i, base_anchor) in enumerate(squares): assert base_anchor.allclose(expected_squares[i]) assert (ga_retina_head.square_anchor_generator.num_base_anchors == [1, 1, 1, 1, 1]) anchors = ga_retina_head.square_anchor_generator.grid_anchors(featmap_sizes, device) assert (len(anchors) == 5)
def tf_required(func): (func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f'Method `{func.__name__}` requires TF.') return wrapper
def batch_norm(layer, b=lasagne.init.Constant(0.0), g=lasagne.init.Constant(1.0), **kwargs): nonlinearity = getattr(layer, 'nonlinearity', None) if (nonlinearity is not None): layer.nonlinearity = lasagne.nonlinearities.identity else: nonlinearity = lasagne.nonlinearities.identity if hasattr(layer, 'b'): del layer.params[layer.b] layer.b = None return BatchNormLayer(layer, b, g, nonlinearity=nonlinearity, **kwargs)
class BinaryNode(Node): arity = 2 op = None def __init__(self, left, right): super().__init__() self.left = left self.right = right def __str__(self): return f'({self.left} {self.op} {self.right})' def to_str(self, namer, sort=False): left_name = self.left.to_str(namer, sort=sort) right_name = self.right.to_str(namer, sort=sort) if ((not sort) or (left_name < right_name)): return f'({left_name} {self.op} {right_name})' else: return f'({right_name} {self.op} {left_name})' def to_expr(self, namer=(lambda x: x)): left_val = self.left.to_expr(namer) right_val = self.right.to_expr(namer) return self.expr_op(left_val, right_val) def __len__(self): return (len(self.left) + len(self.right)) def __hash__(self): return hash(str(self)) def __repr__(self): return f'{self.op}({self.left}, {self.right})' def get_vals(self): vals = [] vals.extend(self.left.get_vals()) vals.extend(self.right.get_vals()) return vals
def test_modify_order_quantity_up(): (book, agent, orders) = setup_book_with_orders(bids=[(100, [40, 10]), (200, [10, 30, 20, 10])], asks=[(300, [10, 50, 20]), (400, [40, 10]), (500, [20])]) modified_order = deepcopy(orders[0]) modified_order.quantity = 70 book.modify_order(orders[0], modified_order) assert (book.get_l3_bid_data() == [(200, [10, 30, 20, 10]), (100, [10, 70])]) assert (len(agent.messages) == 1) assert (agent.messages[0][0] == 1) assert isinstance(agent.messages[0][1], OrderModifiedMsg) assert (agent.messages[0][1].new_order.agent_id == 1) assert (agent.messages[0][1].new_order.side == Side.BID) assert (agent.messages[0][1].new_order.limit_price == 100) assert (agent.messages[0][1].new_order.quantity == 70)
_module() class APCHead(BaseDecodeHead): def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs): super(APCHead, self).__init__(**kwargs) assert isinstance(pool_scales, (list, tuple)) self.pool_scales = pool_scales self.fusion = fusion acm_modules = [] for pool_scale in self.pool_scales: acm_modules.append(ACM(pool_scale, self.fusion, self.in_channels, self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.acm_modules = nn.ModuleList(acm_modules) self.bottleneck = ConvModule((self.in_channels + (len(pool_scales) * self.channels)), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) def forward(self, inputs): x = self._transform_inputs(inputs) acm_outs = [x] for acm_module in self.acm_modules: acm_outs.append(acm_module(x)) acm_outs = torch.cat(acm_outs, dim=1) output = self.bottleneck(acm_outs) output = self.cls_seg(output) return output
class DummyDataset(Dataset): def __init__(self, length): self.length = length self.shapes = np.random.random((length, 2)) def __len__(self): return self.length def __getitem__(self, idx): return self.shapes[idx] def get_data_info(self, idx): return dict(width=self.shapes[idx][0], height=self.shapes[idx][1])
def main(argv): start_time = time.time() print('TF Version:', tf.__version__) with open((FLAGS.input + 'train.pkl'), 'rb') as ftrain: (train_cascade, train_global, train_label) = pickle.load(ftrain) with open((FLAGS.input + 'val.pkl'), 'rb') as fval: (val_cascade, val_global, val_label) = pickle.load(fval) with open((FLAGS.input + 'test.pkl'), 'rb') as ftest: (test_cascade, test_global, test_label) = pickle.load(ftest) casflow_inputs = tf.keras.layers.Input(shape=(FLAGS.max_seq, FLAGS.emb_dim)) bn_casflow_inputs = tf.keras.layers.BatchNormalization()(casflow_inputs) node_emb = tf.keras.layers.Dense(FLAGS.emb_dim)(bn_casflow_inputs) node_mean = tf.keras.layers.Dense(FLAGS.z_dim)(node_emb) node_log_var = tf.keras.layers.Dense(FLAGS.z_dim)(node_emb) node_z = Sampling3D()((node_mean, node_log_var)) node_rec = tf.keras.layers.Dense(FLAGS.z_dim)(node_z) node_rec = tf.keras.layers.Dense(FLAGS.emb_dim)(node_rec) cas_emb = tf.keras.layers.GRU(FLAGS.rnn_units)(node_z) cas_mean = tf.keras.layers.Dense(FLAGS.z_dim)(cas_emb) cas_log_var = tf.keras.layers.Dense(FLAGS.z_dim)(cas_emb) cas_z = Sampling2D()((cas_mean, cas_log_var)) (zk, logD_loss) = nf_transformations(cas_z, FLAGS.z_dim, FLAGS.n_flows) cas_recon = tf.keras.layers.RepeatVector(FLAGS.max_seq)(zk) cas_recon = tf.keras.layers.GRU(FLAGS.rnn_units, return_sequences=True)(cas_recon) cas_recon = tf.keras.layers.Dense(FLAGS.z_dim)(cas_recon) gru_1 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU((FLAGS.rnn_units * 2), return_sequences=True))(bn_casflow_inputs) gru_2 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(FLAGS.rnn_units))(gru_1) con = tf.keras.layers.Concatenate()([zk, gru_2]) mlp_1 = tf.keras.layers.Dense(128, activation='relu')(con) mlp_2 = tf.keras.layers.Dense(64, activation='relu')(mlp_1) outputs = tf.keras.layers.Dense(1)(mlp_2) casflow = tf.keras.Model(inputs=casflow_inputs, outputs=outputs) node_ce_loss = tf.reduce_mean(tf.square((bn_casflow_inputs - node_rec))) node_kl_loss = ((- 0.5) * tf.reduce_mean((((node_log_var - tf.square(node_mean)) - tf.exp(node_log_var)) + 1))) casflow.add_loss(node_ce_loss) casflow.add_loss(node_kl_loss) cas_ce_loss = tf.reduce_mean(tf.square((node_z - cas_recon))) cas_kl_loss = ((- 0.5) * tf.reduce_mean((((cas_log_var - tf.square(cas_mean)) - tf.exp(cas_log_var)) + 1))) casflow.add_loss(cas_ce_loss) casflow.add_loss(cas_kl_loss) casflow.add_loss(((- 0.1) * tf.reduce_mean(logD_loss))) optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.lr) casflow.compile(loss='msle', optimizer=optimizer, metrics=['msle']) train_generator = Generator(train_cascade, train_global, train_label, FLAGS.b_size, FLAGS.max_seq) val_generator = Generator(val_cascade, val_global, val_label, FLAGS.b_size, FLAGS.max_seq, is_train=False) test_generator = Generator(test_cascade, test_global, test_label, FLAGS.b_size, FLAGS.max_seq, is_train=False) early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_msle', patience=FLAGS.patience, restore_best_weights=True) casflow.fit(train_generator, validation_data=val_generator, epochs=1000, verbose=FLAGS.verbose, callbacks=[early_stop]) print('Training ended!') predictions = np.array([(1 if (pred < 1) else pred) for pred in np.squeeze(casflow.predict(test_generator))]) test_label = np.array([(1 if (label < 1) else label) for label in test_label]) report_msle = np.mean(np.square((np.log2(predictions) - np.log2(test_label)))) report_mape = np.mean((np.abs((np.log2((predictions + 1)) - np.log2((test_label + 1)))) / np.log2((test_label + 2)))) print('Test MSLE: {:.4f}, MAPE: {:.4f}'.format(report_msle, report_mape)) print('Finished! Time used: {:.3f}mins.'.format(((time.time() - start_time) / 60)))
class GraphSage(nn.Module): '\n\tVanilla GraphSAGE Model\n\tCode partially from def __init__(self, num_classes, enc): super(GraphSage, self).__init__() self.enc = enc self.xent = nn.CrossEntropyLoss() self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim)) init.xavier_uniform_(self.weight) def forward(self, nodes): embeds = self.enc(nodes) scores = self.weight.mm(embeds) return scores.t() def to_prob(self, nodes): pos_scores = torch.sigmoid(self.forward(nodes)) return pos_scores def loss(self, nodes, labels): scores = self.forward(nodes) return self.xent(scores, labels.squeeze())
class StringLiteralAnnotationExample(): foo: int required_enum: 'BasicEnum' = field() opt: 'Optional[bool]' = None baz: 'str' = field(default='toto', metadata={'help': 'help message'}) foo_str: 'List[str]' = list_field(default=['Hallo', 'Bonjour', 'Hello'])
def test_one_hot(): from lasagne.utils import one_hot a = np.random.randint(0, 10, 20) b = np.zeros((a.size, (a.max() + 1))) b[(np.arange(a.size), a)] = 1 result = one_hot(a).eval() assert (result == b).all()
class DenseReward(RewardFn): def __call__(self, state: State, action: chex.Array, next_state: State, is_valid: bool, is_done: bool) -> float: del next_state, is_done (_, item_id) = action chosen_item_volume = item_volume(tree_slice(state.items, item_id)) container_volume = state.container.volume() reward = (chosen_item_volume / container_volume) reward: float = jax.lax.select(is_valid, reward, jnp.array(0, float)) return reward
def main(argv=None): parser = argparse.ArgumentParser() parser.add_argument('-e', '--n-episodes', type=int, default=200) args = parser.parse_args(argv) checkpoint_path = (pathlib.Path('pretrained') / 'invariant_official.pkl') assert checkpoint_path.exists() with checkpoint_path.open('rb') as f: obj = pickle.load(f) if (len(obj) == 1): solution_inst = obj[0] elif (len(obj) == 2): (solver, solution_inst) = obj solution_inst.set_params(solver.result.xfavorite) else: raise ValueError results = [] for n_noise_features in range(0, 30, 5): for shuffle in [True, False]: print(f'n_noise_features={n_noise_features!r}, shuffle={shuffle!r}') task = Task(render=False, n_noise_features=n_noise_features, shuffle_on_reset=shuffle, env_seed=None, feature_seed=None) for episode_ix in range(args.n_episodes): reward = task.rollout(solution_inst) results.append({'n_noise_features': n_noise_features, 'shuffle': shuffle, 'episode_ix': episode_ix, 'reward': reward}) results_df = pd.DataFrame(results) (fig, ax) = plt.subplots(1, 1, figsize=(10, 5), dpi=300) sns.violinplot(data=results_df, x='n_noise_features', y='reward', hue='shuffle', split=True, inner='quart', linewidth=1, palette='muted', ax=ax, scale='count') sns.despine(left=True) ax.set_ylim(0, 1000) ax.grid(True) fig.tight_layout() fig.savefig('invariant_model_noise.png')
def MyDataLoader(root, name, batch_size, num_workers=1, distributed=False, rank=0, world_size=None): print('----Loading dataset----') TRAIN_TRANSFORM_IMG = torchvision.transforms.Compose([torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.RandomVerticalFlip(), torchvision.transforms.RandomRotation(degrees=((- 25), 25))]) VAL_TRANSFORM_IMG = torchvision.transforms.Compose([]) files = sorted(os.listdir(root)) training = torch.load(((root + '/') + files[0])) validation = torch.load(((root + '/') + files[1])) train_dataset = MyDataset(training, transform=TRAIN_TRANSFORM_IMG) eval_dataset = MyDataset(validation, transform=VAL_TRANSFORM_IMG) if distributed: train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, drop_last=True) eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False, drop_last=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=0, sampler=train_sampler) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, shuffle=False, num_workers=0, sampler=eval_sampler) else: train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) print('Dataset:', name) print('#Traning images: ', len(train_dataset)) print('#Validation images: ', len(eval_dataset)) print('') return (train_loader, eval_loader)
def get_detection_dataset_dicts(dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None): assert len(dataset_names) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] for (dataset_name, dicts) in zip(dataset_names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if (proposal_files is not None): assert (len(dataset_names) == len(proposal_files)) dataset_dicts = [load_proposals_into_dataset(dataset_i_dicts, proposal_file) for (dataset_i_dicts, proposal_file) in zip(dataset_dicts, proposal_files)] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = ('annotations' in dataset_dicts[0]) if (filter_empty and has_instances and ('sem_seg_file_name' not in dataset_dicts[0])): dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if ((min_keypoints > 0) and has_instances): dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if has_instances: try: class_names = MetadataCatalog.get(dataset_names[0]).thing_classes check_metadata_consistency('thing_classes', dataset_names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: pass return dataset_dicts
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch(train_parser, (['--task', 'sentence_prediction', data_dir, '--arch', arch, '--encoder-layers', '2', '--num-classes', str(num_classes), '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'sentence_prediction', '--max-tokens', '500', '--max-positions', '500', '--batch-size', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', '0'] + (extra_flags or []))) train.main(train_args)
def get_pytorch_sut(model, preprocessed_data_dir, performance_count, folds=1, checkpoint_name='model_final_checkpoint'): return _3DUNET_PyTorch_SUT(model, preprocessed_data_dir, performance_count, folds, checkpoint_name)
class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10, device='cpu'): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear((512 * block.expansion), num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
_torch _vision class MobileNetV1ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = (MobileNetV1ImageProcessor if is_vision_available() else None) def setUp(self): self.image_processor_tester = MobileNetV1ImageProcessingTester(self) def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, 'do_resize')) self.assertTrue(hasattr(image_processing, 'size')) self.assertTrue(hasattr(image_processing, 'do_center_crop')) self.assertTrue(hasattr(image_processing, 'center_crop')) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {'shortest_edge': 20}) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {'shortest_edge': 42}) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84}) def test_batch_feature(self): pass def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
def create_1d_conv_core_model(input_shape, model_name='base_model', use_standard_max_pooling=False): inputs = tf.keras.Input(shape=input_shape, name='input') x = inputs x = tf.keras.layers.Conv1D(32, 24, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l=0.0001))(x) x = tf.keras.layers.Dropout(0.1)(x) x = tf.keras.layers.Conv1D(64, 16, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l=0.0001))(x) x = tf.keras.layers.Dropout(0.1)(x) x = tf.keras.layers.Conv1D(96, 8, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l=0.0001))(x) x = tf.keras.layers.Dropout(0.1)(x) if use_standard_max_pooling: x = tf.keras.layers.MaxPool1D(pool_size=x.shape[1], padding='valid', data_format='channels_last', name='max_pooling1d')(x) x = tf.keras.layers.Reshape([x.shape[(- 1)]], name='reshape_squeeze')(x) else: x = tf.keras.layers.GlobalMaxPool1D(data_format='channels_last', name='global_max_pooling1d')(x) return tf.keras.Model(inputs, x, name=model_name)
class DiTPipeline(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['torch']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['torch'])
def tidy_sequential(model): for (k, m) in list(model.named_children()): if isinstance(m, nn.Sequential): if (m.__len__() == 1): model._modules[k] = m.__getitem__(0) tidy_sequential(m)
def process_win_streak(data: pd.DataFrame) -> pd.DataFrame: if (data['Streak'].count() > 0): data['Streak2'] = data['Streak'].str.len() data.loc[((data['Streak'].str[0] == '-'), 'Streak2')] = (- data['Streak2']) data['Streak'] = data['Streak2'] data = data.drop(columns='Streak2') return data
class TestJumanjiSpecsToDmEnvSpecs(): def test_array(self) -> None: jumanji_spec = specs.Array((1, 2), jnp.int32) dm_env_spec = dm_env.specs.Array((1, 2), jnp.int32) converted_spec: dm_env.specs.Array = specs.jumanji_specs_to_dm_env_specs(jumanji_spec) assert (type(converted_spec) == type(dm_env_spec)) assert (converted_spec.shape == dm_env_spec.shape) assert (converted_spec.dtype == dm_env_spec.dtype) assert (converted_spec.name == dm_env_spec.name) def test_bounded_array(self) -> None: jumanji_spec = specs.BoundedArray((1, 2), jnp.float32, minimum=0.0, maximum=1.0) dm_env_spec = dm_env.specs.BoundedArray((1, 2), jnp.float32, minimum=0.0, maximum=1.0) converted_spec: dm_env.specs.BoundedArray = specs.jumanji_specs_to_dm_env_specs(jumanji_spec) assert (type(converted_spec) == type(dm_env_spec)) assert (converted_spec.shape == dm_env_spec.shape) assert (converted_spec.dtype == dm_env_spec.dtype) assert (converted_spec.name == dm_env_spec.name) assert (converted_spec.minimum == dm_env_spec.minimum) assert (converted_spec.maximum == dm_env_spec.maximum) def test_discrete_array(self) -> None: jumanji_spec = specs.DiscreteArray(num_values=5, dtype=jnp.int32) dm_env_spec = dm_env.specs.DiscreteArray(num_values=5, dtype=jnp.int32) converted_spec: dm_env.specs.DiscreteArray = specs.jumanji_specs_to_dm_env_specs(jumanji_spec) assert (type(converted_spec) == type(dm_env_spec)) assert (converted_spec.shape == dm_env_spec.shape) assert (converted_spec.dtype == dm_env_spec.dtype) assert (converted_spec.name == dm_env_spec.name) assert (converted_spec.minimum == dm_env_spec.minimum) assert (converted_spec.maximum == dm_env_spec.maximum) assert (converted_spec.num_values == dm_env_spec.num_values) def test_triply_nested_spec(self, triply_nested_spec: specs.Spec) -> None: converted_spec = specs.jumanji_specs_to_dm_env_specs(triply_nested_spec) assert isinstance(converted_spec, dict) assert isinstance(converted_spec['doubly_nested'], dict) assert isinstance(converted_spec['doubly_nested']['singly_nested'], dict) assert isinstance(converted_spec['doubly_nested']['singly_nested']['array'], dm_env.specs.Array) assert isinstance(converted_spec['doubly_nested']['singly_nested']['bounded_array'], dm_env.specs.BoundedArray) assert isinstance(converted_spec['doubly_nested']['singly_nested']['multi_discrete_array'], dm_env.specs.BoundedArray) assert isinstance(converted_spec['doubly_nested']['discrete_array'], dm_env.specs.DiscreteArray) assert isinstance(converted_spec['bounded_array'], dm_env.specs.BoundedArray) assert isinstance(converted_spec['discrete_array'], dm_env.specs.DiscreteArray) def test_mixed_spec(self, mixed_spec: specs.Spec) -> None: converted_spec = specs.jumanji_specs_to_dm_env_specs(mixed_spec) assert isinstance(converted_spec, dict) assert isinstance(converted_spec['singly_nested'], dict) assert_tree_with_leaves_of_type(converted_spec['singly_nested'], dm_env.specs.Array) assert (not converted_spec['not_jumanji_type']) assert mixed_spec['not_jumanji_type'] def test_not_jumanji_type_spec(self, not_jumanji_type_spec: specs.Spec) -> None: converted_spec = specs.jumanji_specs_to_dm_env_specs(not_jumanji_type_spec) assert isinstance(converted_spec, dict) assert (converted_spec == {})
class HashEval(): def __init__(self, test: Dict, queries: Dict, distance_function: Callable, verbose: bool=True, threshold: int=5, search_method: str=('brute_force_cython' if (not (sys.platform == 'win32')) else 'bktree'), num_dist_workers: int=cpu_count()) -> None: self.test = test self.queries = queries self.distance_invoker = distance_function self.verbose = verbose self.threshold = threshold self.query_results_map = None self.num_dist_workers = num_dist_workers if (search_method == 'bktree'): self._fetch_nearest_neighbors_bktree() elif (search_method == 'brute_force'): self._fetch_nearest_neighbors_brute_force() else: self._fetch_nearest_neighbors_brute_force_cython() def _searcher(self, data_tuple) -> None: (query_key, query_val, search_method_object, thresh) = data_tuple res = search_method_object.search(query=query_val, tol=thresh) res = [i for i in res if (i[0] != query_key)] return res def _get_query_results(self, search_method_object: Union[(BruteForce, BKTree)]) -> None: args = list(zip(list(self.queries.keys()), list(self.queries.values()), ([search_method_object] * len(self.queries)), ([self.threshold] * len(self.queries)))) result_map_list = parallelise(self._searcher, args, self.verbose, num_workers=self.num_dist_workers) result_map = dict(zip(list(self.queries.keys()), result_map_list)) self.query_results_map = {k: [i for i in sorted(v, key=(lambda tup: tup[1]), reverse=False)] for (k, v) in result_map.items()} def _fetch_nearest_neighbors_brute_force(self) -> None: logger.info('Start: Retrieving duplicates using Brute force algorithm') brute_force = BruteForce(self.test, self.distance_invoker) self._get_query_results(brute_force) logger.info('End: Retrieving duplicates using Brute force algorithm') def _fetch_nearest_neighbors_brute_force_cython(self) -> None: logger.info('Start: Retrieving duplicates using Cython Brute force algorithm') brute_force_cython = BruteForceCython(self.test, self.distance_invoker) self._get_query_results(brute_force_cython) logger.info('End: Retrieving duplicates using Cython Brute force algorithm') def _fetch_nearest_neighbors_bktree(self) -> None: logger.info('Start: Retrieving duplicates using BKTree algorithm') built_tree = BKTree(self.test, self.distance_invoker) self._get_query_results(built_tree) logger.info('End: Retrieving duplicates using BKTree algorithm') def retrieve_results(self, scores: bool=False) -> Dict: if scores: return self.query_results_map else: return {k: [i[0] for i in v] for (k, v) in self.query_results_map.items()}
class NodeApplyModule(nn.Module): def __init__(self, in_feats, out_feats, activation): super(NodeApplyModule, self).__init__() self.linear = nn.Linear(in_feats, out_feats) self.activation = activation def forward(self, node): h = self.linear(node.data['h']) h = self.activation(h) return {'h': h}
def plan_and_preprocess(task_string, processes_lowres=8, processes_fullres=3, no_preprocessing=False): from nnunet.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner preprocessing_output_dir_this_task_train = join(preprocessing_output_dir, task_string) cropped_out_dir = join(cropped_output_dir, task_string) maybe_mkdir_p(preprocessing_output_dir_this_task_train) shutil.copy(join(cropped_out_dir, 'dataset_properties.pkl'), preprocessing_output_dir_this_task_train) shutil.copy(join(splitted_4d_output_dir, task_string, 'dataset.json'), preprocessing_output_dir_this_task_train) exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task_train) exp_planner.plan_experiment() if (not no_preprocessing): exp_planner.run_preprocessing((processes_lowres, processes_fullres)) exp_planner = ExperimentPlanner2D(cropped_out_dir, preprocessing_output_dir_this_task_train) exp_planner.plan_experiment() if (not no_preprocessing): exp_planner.run_preprocessing(processes_fullres) if (not no_preprocessing): p = Pool(processes_lowres) stages = [i for i in subdirs(preprocessing_output_dir_this_task_train, join=True, sort=True) if (i.split('/')[(- 1)].find('stage') != (- 1))] for s in stages: print(s.split('/')[(- 1)]) list_of_npz_files = subfiles(s, True, None, '.npz', True) list_of_pkl_files = [(i[:(- 4)] + '.pkl') for i in list_of_npz_files] all_classes = [] for pk in list_of_pkl_files: with open(pk, 'rb') as f: props = pickle.load(f) all_classes_tmp = np.array(props['classes']) all_classes.append(all_classes_tmp[(all_classes_tmp >= 0)]) p.map(add_classes_in_slice_info, zip(list_of_npz_files, list_of_pkl_files, all_classes)) p.close() p.join()
class RRCache(Cache): def __init__(self, maxsize, choice=random.choice, getsizeof=None): Cache.__init__(self, maxsize, getsizeof) self.__choice = choice def choice(self): return self.__choice def popitem(self): try: key = self.__choice(list(self)) except IndexError: raise KeyError(('%s is empty' % type(self).__name__)) from None else: return (key, self.pop(key))
def convert_conllu_to_json(conllu_sents): return [convert_col_sent_to_json(sent) for sent in conllu_sents]
_materialize('core') class Where(TernaryOpBase): in_dtypes = [(DType.bool, i, i) for i in DTYPE_GEN_NON_BOOL] out_dtypes = [(i,) for i in DTYPE_GEN_NON_BOOL] def __init__(self): super().__init__() self.inp_ranks = [rank_all(), rank_all(), rank_all()] self.same_inp_dtypes = True def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]: tgt_shape = broadcast_shapes(*(ish.shape for ish in input_shapes)) dtype = input_shapes[1].dtype return [AbsTensor(tgt_shape, dtype)] def requires(self, input_shapes): return (broadcast_cons(*(ish.shape for ish in input_shapes)) + [z3.BoolVal((input_shapes[1].dtype == input_shapes[2].dtype))]) def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]: (x, y, z) = bcast_rand_ndims(3, out_abs_tensor[0].ndims) return [(x, DType.bool), (y, out_abs_tensor[0].dtype), (z, out_abs_tensor[0].dtype)]
_module() class BFP(BaseModule): def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(BFP, self).__init__(init_cfg) assert (refine_type in [None, 'conv', 'non_local']) self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert (0 <= self.refine_level < self.num_levels) if (self.refine_type == 'conv'): self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif (self.refine_type == 'non_local'): self.refine = NonLocal2d(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs): assert (len(inputs) == self.num_levels) feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if (i < self.refine_level): gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size) else: gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = (sum(feats) / len(feats)) if (self.refine_type is not None): bsf = self.refine(bsf) outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if (i < self.refine_level): residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append((residual + inputs[i])) return tuple(outs)
class Preprocess_LC(): def __init__(self, data, mjd, error): self.N = len(mjd) self.m = np.mean(error) self.mjd = mjd self.data = data self.error = error def Preprocess(self): mjd_out = [] data_out = [] error_out = [] for i in xrange(len(self.data)): if ((self.error[i] < (3 * self.m)) and ((np.absolute((self.data[i] - np.mean(self.data))) / np.std(self.data)) < 5)): mjd_out.append(self.mjd[i]) data_out.append(self.data[i]) error_out.append(self.error[i]) data_out = np.asarray(data_out) mjd_out = np.asarray(mjd_out) error_out = np.asarray(error_out) return [data_out, mjd_out, error_out]
def print_library_summary(configs): library_name = configs[YAMLKeyword.library_name] title = 'Library' header = ['key', 'value'] data = list() data.append(['MACE Model Path', ('%s/%s/%s' % (BUILD_OUTPUT_DIR, library_name, MODEL_OUTPUT_DIR_NAME))]) if (configs[YAMLKeyword.model_graph_format] == ModelFormat.code): data.append(['MACE Model Header Path', ('%s/%s/%s' % (BUILD_OUTPUT_DIR, library_name, MODEL_HEADER_DIR_PATH))]) MaceLogger.summary(StringFormatter.table(header, data, title))
class StyleGANRunner(BaseGANRunner): def __init__(self, config, logger): super().__init__(config, logger) self.lod = getattr(self, 'lod', None) def build_models(self): super().build_models() self.g_smooth_img = self.config.modules['generator'].get('g_smooth_img', 10000) self.models['generator_smooth'] = deepcopy(self.models['generator']) def build_loss(self): super().build_loss() self.running_stats.add(f'Gs_beta', log_format='.4f', log_strategy='CURRENT') def train_step(self, data, **train_kwargs): G = self.get_module(self.models['generator']) D = self.get_module(self.models['discriminator']) Gs = self.get_module(self.models['generator_smooth']) G.synthesis.lod.data.fill_(self.lod) D.lod.data.fill_(self.lod) Gs.synthesis.lod.data.fill_(self.lod) self.set_model_requires_grad('discriminator', True) self.set_model_requires_grad('generator', False) d_loss = self.loss.d_loss(self, data) self.optimizers['discriminator'].zero_grad() d_loss.backward() self.optimizers['discriminator'].step() beta = (0.5 ** ((self.batch_size * self.world_size) / self.g_smooth_img)) self.running_stats.update({'Gs_beta': beta}) self.moving_average_model(model=self.models['generator'], avg_model=self.models['generator_smooth'], beta=beta) if ((self._iter % self.config.get('D_repeats', 1)) == 0): self.set_model_requires_grad('discriminator', False) self.set_model_requires_grad('generator', True) g_loss = self.loss.g_loss(self, data) self.optimizers['generator'].zero_grad() g_loss.backward() self.optimizers['generator'].step() def load(self, **kwargs): super().load(**kwargs) G = self.get_module(self.models['generator']) D = self.get_module(self.models['discriminator']) Gs = self.get_module(self.models['generator_smooth']) if kwargs['running_metadata']: lod = G.synthesis.lod.cpu().tolist() assert (lod == D.lod.cpu().tolist()) assert (lod == Gs.synthesis.lod.cpu().tolist()) self.lod = lod
class ConvBnAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='', skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.0): super(ConvBnAct, self).__init__() self.has_residual = (skip and (stride == 1) and (in_chs == out_chs)) self.drop_path_rate = drop_path_rate self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) self.bn1 = norm_layer(out_chs) self.act1 = act_layer(inplace=True) def feature_info(self, location): if (location == 'expansion'): info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) else: info = dict(module='', hook_type='', num_chs=self.conv.out_channels) return info def forward(self, x): shortcut = x x = self.conv(x) x = self.bn1(x) x = self.act1(x) if self.has_residual: if (self.drop_path_rate > 0.0): x = drop_path(x, self.drop_path_rate, self.training) x += shortcut return x
class ImageCoder(object): def __init__(self): self._sess = tf.Session() self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) cropped = tf.image.resize_images(self._decode_jpeg, [RESIZE_HEIGHT, RESIZE_WIDTH]) cropped = tf.cast(cropped, tf.uint8) self._recoded = tf.image.encode_jpeg(cropped, format='rgb', quality=100) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def resample_jpeg(self, image_data): image = self._sess.run(self._recoded, feed_dict={self._decode_jpeg_data: image_data}) return image
def main(): mode = (argv[2] if (len(argv) > 2) else 'direct') if (mode == 'direct'): words = read_words(argv[1]) find_translations(words) elif (mode == 'collect'): table = read_table(argv[1]) find_translations_to_table(table)
class PlayerActor(Actor): def __init__(self, terminalGraphics, state, name='P'): super(PlayerActor, self).__init__(state, name) self.impatience = 0 self._tg = terminalGraphics def chooseAction(self, world): idx = (self._tg.getChar() - 49) self._tg.stdscr.addstr((self._tg.bottom_row + 2), 0, ('Input command: %d' % idx)) if ((idx >= 0) and (idx <= len(self.actions))): return self.actions[idx] else: return self.actions[0]
def _add_to_tfrecord(filename, tfrecord_writer, offset=0): with tf.gfile.Open(filename, 'r') as f: data = cPickle.load(f) images = data['data'] num_images = images.shape[0] images = images.reshape((num_images, 3, 32, 32)) labels = data['labels'] with tf.Graph().as_default(): image_placeholder = tf.placeholder(dtype=tf.uint8) encoded_image = tf.image.encode_png(image_placeholder) with tf.Session('') as sess: for j in range(num_images): sys.stdout.write(('\r>> Reading file [%s] image %d/%d' % (filename, ((offset + j) + 1), (offset + num_images)))) sys.stdout.flush() image = np.squeeze(images[j]).transpose((1, 2, 0)) label = labels[j] png_string = sess.run(encoded_image, feed_dict={image_placeholder: image}) example = dataset_utils.image_to_tfexample(png_string, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label) tfrecord_writer.write(example.SerializeToString()) return (offset + num_images)
('AuctionMatch') def _auction_match_shape(op): shape1 = op.inputs[0].get_shape().with_rank(3) shape2 = op.inputs[1].get_shape().with_rank(3) return [tf.TensorShape([shape1.dims[0], shape1.dims[1]]), tf.TensorShape([shape2.dims[0], shape2.dims[1]])]
def _convert_sumo_coord_to_car_coord(x_in_sumo_coord, y_in_sumo_coord, a_in_sumo_coord, car_length): a_in_car_coord = ((- a_in_sumo_coord) + 90.0) x_in_car_coord = (x_in_sumo_coord - ((math.cos(((a_in_car_coord / 180.0) * math.pi)) * car_length) / 2)) y_in_car_coord = (y_in_sumo_coord - ((math.sin(((a_in_car_coord / 180.0) * math.pi)) * car_length) / 2)) return (x_in_car_coord, y_in_car_coord, deal_with_phi(a_in_car_coord))
class SequentialAppendList(nn.Sequential): def __init__(self, *args): super(SequentialAppendList, self).__init__(*args) def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: for (i, module) in enumerate(self): if (i == 0): concat_list.append(module(x)) else: concat_list.append(module(concat_list[(- 1)])) x = torch.cat(concat_list, dim=1) return x
def build_vocab(data_path, data_name, caption_file, threshold): counter = Counter() for path in caption_file[data_name]: full_path = os.path.join(os.path.join(data_path, data_name), path) captions = from_txt(full_path) for (i, caption) in enumerate(captions): tokens = nltk.tokenize.word_tokenize(caption.lower().decode('utf-8')) counter.update(tokens) if ((i % 1000) == 0): print(('[%d/%d] tokenized the captions.' % (i, len(captions)))) words = [word for (word, cnt) in counter.items() if (cnt >= threshold)] vocab = Vocabulary() vocab.add_word('<pad>') vocab.add_word('<start>') vocab.add_word('<end>') vocab.add_word('<unk>') for (i, word) in enumerate(words): vocab.add_word(word) return vocab
def collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False): size = max((v.size(0) for v in values)) res = values[0].new(len(values), size).fill_(pad_idx) def copy_tensor(src, dst): assert (dst.numel() == src.numel()) if move_eos_to_beginning: dst[0] = eos_idx dst[1:] = src[:(- 1)] else: dst.copy_(src) for (i, v) in enumerate(values): copy_tensor(v, (res[i][(size - len(v)):] if left_pad else res[i][:len(v)])) return res
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--lr', type=float, default=0.0002) parser.add_argument('--beta1', type=float, default=0.5) parser.add_argument('--beta2', type=float, default=0.999) parser.add_argument('--lambda1', type=int, default=100) parser.add_argument('--batch_size', type=int, default=16) parser.add_argument('--max_epochs', type=int, default=100) parser.add_argument('--cuda', default=True) parser.add_argument('--dataset_dir', type=str, default='train/disentanglement/emotion_length/M030/') parser.add_argument('--model_dir', type=str, default='train/disentanglement/model_M030/') parser.add_argument('--image_dir', type=str, default='train/disentanglement/image_M030/') parser.add_argument('--log_dir', type=str, default='train/disentanglement/log_M030/') parser.add_argument('--device_ids', type=str, default='0') parser.add_argument('--triplet_margin', type=int, default=1) parser.add_argument('--triplet_weight', type=int, default=10) parser.add_argument('--pretrain', type=bool, default=True) parser.add_argument('--resume', type=bool, default=False) parser.add_argument('--resume_dir', type=str, default='/media/thea/Data/New_exp/3_intensity_M030/SER_intensity_3/model/81_pretrain.pth') parser.add_argument('--num_thread', type=int, default=0) parser.add_argument('--weight_decay', type=float, default=0.0004) parser.add_argument('--use_triplet', type=bool, default=False) parser.add_argument('--atpretrained_dir', type=str, default='train/disentanglement/atnet_lstm_18.pth') parser.add_argument('--serpretrained_dir', type=str, default='train/emotion_pretrain/model_M030/SER_99.pkl') parser.add_argument('--pretrained_epoch', type=int) parser.add_argument('--start_epoch', type=int, default=0, help='start from 0') parser.add_argument('--rnn', type=bool, default=True) return parser.parse_args()
_model_architecture('lra', 'flash_lra_imdb') def flash_lra_imdb(args): args.apply_bert_init = getattr(args, 'apply_bert_init', False) args.layer_type = getattr(args, 'layer_type', 'flash') args.encoder_hidden_dim = getattr(args, 'encoder_hidden_dim', 256) args.z_dim = getattr(args, 'z_dim', 64) args.encoder_layers = getattr(args, 'encoder_layers', 4) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 128) args.max_positions = getattr(args, 'max_positions', 4002) base_architecture(args)
class MaxCounter(): __slots__ = ('_c', '_max_element') def __init__(self, it=None): self._c = collections.Counter(it) if (it is None): self._max_element = (- float('inf')) else: self._max_element = max(self._c) def copy(self): new = object.__new__(MaxCounter) new._max_element = self._max_element new._c = self._c.copy() return new def discard(self, x): cnt = self._c[x] if (cnt <= 1): del self._c[x] if (x == self._max_element): try: self._max_element = max(self._c) except ValueError: self._max_element = (- float('inf')) else: self._c[x] = (cnt - 1) def add(self, x): self._c[x] += 1 self._max_element = max(self._max_element, x) def max(self): return self._max_element
def convert(src, dst, depth): if (depth not in arch_settings): raise ValueError('Only support ResNet-50 and ResNet-101 currently') block_nums = arch_settings[depth] caffe_model = load(src, encoding='latin1') blobs = (caffe_model['blobs'] if ('blobs' in caffe_model) else caffe_model) state_dict = OrderedDict() converted_names = set() convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names) convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names) for i in range(1, (len(block_nums) + 1)): for j in range(block_nums[(i - 1)]): if (j == 0): convert_conv_fc(blobs, state_dict, f'res{(i + 1)}_{j}_branch1', f'layer{i}.{j}.downsample.0', converted_names) convert_bn(blobs, state_dict, f'res{(i + 1)}_{j}_branch1_bn', f'layer{i}.{j}.downsample.1', converted_names) for (k, letter) in enumerate(['a', 'b', 'c']): convert_conv_fc(blobs, state_dict, f'res{(i + 1)}_{j}_branch2{letter}', f'layer{i}.{j}.conv{(k + 1)}', converted_names) convert_bn(blobs, state_dict, f'res{(i + 1)}_{j}_branch2{letter}_bn', f'layer{i}.{j}.bn{(k + 1)}', converted_names) for key in blobs: if (key not in converted_names): print(f'Not Convert: {key}') checkpoint = dict() checkpoint['state_dict'] = state_dict torch.save(checkpoint, dst)
def normalityTestF(resid, k_error, t_error): k = kurtosis(resid) t = skew(resid) if ((k > ((- 1) * k_error)) and (k < k_error) and (t > ((- 1) * t_error)) and (t < t_error)): return True return False
def load_single_genre_data(directory, filename_template, genre, filename_test_template=None): lambda_concepts = (lambda d: {'premise': d[0], 'hypothesis': d[1], 'label': d[2], 'premise_concepts': d[3], 'hypothesis_concepts': d[4]}) lambda_no_concepts = (lambda d: {'premise': d[0], 'hypothesis': d[1], 'label': d[2], 'premise_concepts': [], 'hypothesis_concepts': []}) filename = format_processed_filename(directory, filename_template, genre=genre) (data_train, data_dev) = load_pickle(filename) if (len(data_train) > 3): tuple_to_dict = lambda_concepts else: tuple_to_dict = lambda_no_concepts data_train = tuple_to_dict(data_train) data_dev = tuple_to_dict(data_dev) data_test = None if (filename_test_template is not None): filename_test = format_processed_filename(directory, filename_test_template, genre=genre) data_test = load_pickle(filename_test) if (len(data_test) <= 3): tuple_to_dict = lambda_no_concepts data_test = tuple_to_dict(data_test) if (filename_test_template is None): return (data_train, data_dev) else: return (data_train, data_dev, data_test)
class Tensorboard(EventStreamer, EventSink): folder_name = 'tensorboard' def __init__(self, dataroot): from tensorboardX import SummaryWriter self.writer = SummaryWriter(os.path.join(dataroot, self.folder_name)) self.absolute_iteration_counters = {} def _add_row(self, key, data, dtype, iteration): if dtype.startswith('scalar/'): for (subkey, value) in data.items(): self.writer.add_scalar(('%s/%s' % (key, subkey)), value, iteration) elif dtype.startswith('weight/'): for (subkey, value) in data.items(): self.writer.add_histogram(('%s/%s' % (key, subkey)), value, iteration, 'auto') elif (dtype == 'blob'): for (subkey, value) in data.items(): if (value['dtype'] == 'gray'): self.writer.add_image(('%s/%s' % (key, subkey)), np.repeat(np.expand_dims(value['data'], axis=2), 3, axis=2), iteration) if ((value['dtype'] == 'rgb') or (value['dtype'] == 'rgba')): self.writer.add_image(('%s/%s' % (key, subkey)), value['data'], iteration) def add_row(self, epoch, timestamp, relative_iteration, epoch_size, key, data, dtype): if (epoch is None): iteration = 0 elif (relative_iteration is None): iteration = epoch else: iteration = (self.absolute_iteration_counters.get(key, 0) + relative_iteration) self._add_row(key, data, dtype, iteration) def register_epoch_data(self, epoch, data, consts): for (key, item) in data.items(): if (item['relative_iteration'] is None): continue if (key not in self.absolute_iteration_counters): self.absolute_iteration_counters[key] = 0 self.absolute_iteration_counters[key] += item['epoch_size'] if (item['dtype'] in {'scalar/loss', 'scalar/score'}): for (subkey, value) in item['data'].items(): self.writer.add_scalar(('%s/%s_avg' % (key, subkey)), np.mean(value), epoch) elif (item['dtype'] == 'scalar/time'): for (subkey, value) in item['data'].items(): self.writer.add_scalar(('%s/%s_sum' % (key, subkey)), np.sum(value), epoch) def load_epochs_data(self, epochs, consts): for epoch in epochs: for (key, item) in epoch.items(): if (item['relative_iteration'] is None): continue if (key not in self.absolute_iteration_counters): self.absolute_iteration_counters[key] = 0 self.absolute_iteration_counters[key] += item['epoch_size'] return self
def get_class_weights(dataset: WideDeepDataset) -> Tuple[(np.ndarray, int, int)]: weights = (1 / np.unique(dataset.Y, return_counts=True)[1]) minor_class_count = min(np.unique(dataset.Y, return_counts=True)[1]) num_classes = len(np.unique(dataset.Y)) return (weights, minor_class_count, num_classes)
class Weather(object): def __init__(self, weather): self.weather = weather self._sun = Sun(weather.sun_azimuth_angle, weather.sun_altitude_angle) self._storm = Storm(weather.precipitation) def tick(self, delta_seconds): self._sun.tick(delta_seconds) self._storm.tick(delta_seconds) self.weather.cloudiness = self._storm.clouds self.weather.precipitation = self._storm.rain self.weather.precipitation_deposits = self._storm.puddles self.weather.wind_intensity = self._storm.wind self.weather.fog_density = self._storm.fog self.weather.wetness = self._storm.wetness self.weather.sun_azimuth_angle = self._sun.azimuth self.weather.sun_altitude_angle = self._sun.altitude def __str__(self): return ('%s %s' % (self._sun, self._storm))
class AutoModelForDepthEstimation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_PREDICTOR_REGISTRY.register() class DensePoseChartPredictor(nn.Module): def __init__(self, cfg: CfgNode, input_channels: int): super().__init__() dim_in = input_channels n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS dim_out_patches = (cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1) kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL self.ann_index_lowres = ConvTranspose2d(dim_in, n_segm_chan, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1))) self.index_uv_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1))) self.u_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1))) self.v_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1))) self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE initialize_module_params(self) def interp2d(self, tensor_nchw: torch.Tensor): return interpolate(tensor_nchw, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) def forward(self, head_outputs: torch.Tensor): return DensePoseChartPredictorOutput(coarse_segm=self.interp2d(self.ann_index_lowres(head_outputs)), fine_segm=self.interp2d(self.index_uv_lowres(head_outputs)), u=self.interp2d(self.u_lowres(head_outputs)), v=self.interp2d(self.v_lowres(head_outputs)))
def modelA(): model = Sequential() model.add(Conv2D(64, (5, 5), padding='valid', input_shape=(gv.IMAGE_ROWS, gv.IMAGE_COLS, gv.NUM_CHANNELS))) model.add(Activation('relu')) model.add(Conv2D(64, (5, 5))) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(gv.NUM_CLASSES)) return model
class LeNetBase(nn.Module): def __init__(self): super(LeNetBase, self).__init__() self.conv_params = nn.Sequential(nn.Conv2d(1, 20, kernel_size=5), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(20, 50, kernel_size=5), nn.Dropout2d(p=0.5), nn.MaxPool2d(2), nn.ReLU()) self.in_features = ((50 * 4) * 4) def forward(self, x): x = self.conv_params(x) x = x.view(x.size(0), (- 1)) return x
class GaussianCNNBaseline(Baseline): def __init__(self, env_spec, subsample_factor=1.0, regressor_args=None, name='GaussianCNNBaseline'): if ((not isinstance(env_spec.observation_space, akro.Box)) or (not (len(env_spec.observation_space.shape) in (2, 3)))): raise ValueError('{} can only process 2D, 3D akro.Image or akro.Box observations, but received an env_spec with observation_space of type {} and shape {}'.format(type(self).__name__, type(env_spec.observation_space).__name__, env_spec.observation_space.shape)) super().__init__(env_spec) if (regressor_args is None): regressor_args = dict() self._regressor = GaussianCNNRegressor(input_shape=env_spec.observation_space.shape, output_dim=1, subsample_factor=subsample_factor, name=name, **regressor_args) self.name = name self.env_spec = env_spec def fit(self, paths): observations = np.concatenate([p['observations'] for p in paths]) if isinstance(self.env_spec.observation_space, akro.Image): observations = normalize_pixel_batch(observations) returns = np.concatenate([p['returns'] for p in paths]) self._regressor.fit(observations, returns.reshape(((- 1), 1))) def predict(self, path): observations = path['observations'] if isinstance(self.env_spec.observation_space, akro.Image): observations = normalize_pixel_batch(observations) return self._regressor.predict(observations).flatten() def get_param_values(self): return self._regressor.get_param_values() def set_param_values(self, flattened_params): self._regressor.set_param_values(flattened_params) def get_params_internal(self): return self._regressor.get_params_internal()
def get_scene_layout(carla_map): def _lateral_shift(transform, shift): transform.rotation.yaw += 90 return (transform.location + (shift * transform.get_forward_vector())) topology = [x[0] for x in carla_map.get_topology()] topology = sorted(topology, key=(lambda w: w.transform.location.z)) map_dict = dict() precision = 0.05 for waypoint in topology: waypoints = [waypoint] nxt = waypoint.next(precision) if (len(nxt) > 0): nxt = nxt[0] while (nxt.road_id == waypoint.road_id): waypoints.append(nxt) nxt = nxt.next(precision) if (len(nxt) > 0): nxt = nxt[0] else: break left_marking = [_lateral_shift(w.transform, ((- w.lane_width) * 0.5)) for w in waypoints] right_marking = [_lateral_shift(w.transform, (w.lane_width * 0.5)) for w in waypoints] lane = {'waypoints': waypoints, 'left_marking': left_marking, 'right_marking': right_marking} if (map_dict.get(waypoint.road_id) is None): map_dict[waypoint.road_id] = {} map_dict[waypoint.road_id][waypoint.lane_id] = lane waypoints_graph = dict() for road_key in map_dict: for lane_key in map_dict[road_key]: lane = map_dict[road_key][lane_key] for i in range(0, len(lane['waypoints'])): next_ids = [w.id for w in lane['waypoints'][(i + 1):len(lane['waypoints'])]] left_lane_key = ((lane_key - 1) if ((lane_key - 1) != 0) else (lane_key - 2)) right_lane_key = ((lane_key + 1) if ((lane_key + 1) != 0) else (lane_key + 2)) left_lane_waypoint_id = (- 1) if (left_lane_key in map_dict[road_key]): left_lane_waypoints = map_dict[road_key][left_lane_key]['waypoints'] if (i < len(left_lane_waypoints)): left_lane_waypoint_id = left_lane_waypoints[i].id right_lane_waypoint_id = (- 1) if (right_lane_key in map_dict[road_key]): right_lane_waypoints = map_dict[road_key][right_lane_key]['waypoints'] if (i < len(right_lane_waypoints)): right_lane_waypoint_id = right_lane_waypoints[i].id lm = carla_map.transform_to_geolocation(lane['left_marking'][i]) rm = carla_map.transform_to_geolocation(lane['right_marking'][i]) wl = carla_map.transform_to_geolocation(lane['waypoints'][i].transform.location) wo = lane['waypoints'][i].transform.rotation waypoint_dict = {'road_id': road_key, 'lane_id': lane_key, 'position': [wl.latitude, wl.longitude, wl.altitude], 'orientation': [wo.roll, wo.pitch, wo.yaw], 'left_margin_position': [lm.latitude, lm.longitude, lm.altitude], 'right_margin_position': [rm.latitude, rm.longitude, rm.altitude], 'next_waypoints_ids': next_ids, 'left_lane_waypoint_id': left_lane_waypoint_id, 'right_lane_waypoint_id': right_lane_waypoint_id} waypoints_graph[map_dict[road_key][lane_key]['waypoints'][i].id] = waypoint_dict return waypoints_graph
def get_world_size(): if (torch.distributed.is_available() and torch.distributed.is_initialized()): world_size = torch.distributed.get_world_size() else: world_size = 1 return world_size
class InceptionResNetV2(nn.Module): def __init__(self, num_classes=1001): super(InceptionResNetV2, self).__init__() self.input_space = None self.input_size = (299, 299, 3) self.mean = None self.std = None self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2) self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b() self.repeat = nn.Sequential(Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17)) self.mixed_6a = Mixed_6a() self.repeat_1 = nn.Sequential(Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1)) self.mixed_7a = Mixed_7a() self.repeat_2 = nn.Sequential(Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2)) self.block8 = Block8(noReLU=True) self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1) self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False) self.classif = nn.Linear(1536, num_classes) def forward(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) x = self.avgpool_1a(x) x = x.view(x.size(0), (- 1)) x = self.classif(x) return x
def mnasnet0_5(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> MNASNet: print('Converting MNASNet 0.5 to {} mode'.format(MODE_STRING)) return create_torchvision_biomodel(models.mnasnet0_5, MODE, layer_config, pretrained, progress, num_classes)
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif (last_checkpoint is not None): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)) logger.warning(f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}') if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s', training_args) set_seed(training_args.seed) raw_datasets = DatasetDict() if training_args.do_train: raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=data_args.use_auth_token) if (data_args.audio_column_name not in raw_datasets['train'].column_names): raise ValueError(f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(raw_datasets['train'].column_names)}.") if (data_args.text_column_name not in raw_datasets['train'].column_names): raise ValueError(f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. Make sure to set `--text_column_name` to the correct text column - one of {', '.join(raw_datasets['train'].column_names)}.") if (data_args.max_train_samples is not None): raw_datasets['train'] = raw_datasets['train'].select(range(data_args.max_train_samples)) if training_args.do_eval: raw_datasets['eval'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=data_args.use_auth_token) if (data_args.max_eval_samples is not None): raw_datasets['eval'] = raw_datasets['eval'].select(range(data_args.max_eval_samples)) chars_to_ignore_regex = (f"[{''.join(data_args.chars_to_ignore)}]" if (data_args.chars_to_ignore is not None) else None) text_column_name = data_args.text_column_name def remove_special_characters(batch): if (chars_to_ignore_regex is not None): batch['target_text'] = (re.sub(chars_to_ignore_regex, '', batch[text_column_name]).lower() + ' ') else: batch['target_text'] = (batch[text_column_name].lower() + ' ') return batch with training_args.main_process_first(desc='dataset map special characters removal'): raw_datasets = raw_datasets.map(remove_special_characters, remove_columns=[text_column_name], desc='remove special characters from datasets') word_delimiter_token = data_args.word_delimiter_token unk_token = data_args.unk_token pad_token = data_args.pad_token config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token) tokenizer_name_or_path = model_args.tokenizer_name_or_path tokenizer_kwargs = {} if (tokenizer_name_or_path is None): tokenizer_name_or_path = training_args.output_dir vocab_file = os.path.join(tokenizer_name_or_path, 'vocab.json') with training_args.main_process_first(): if (training_args.overwrite_output_dir and os.path.isfile(vocab_file)): os.remove(vocab_file) with training_args.main_process_first(desc='dataset map vocabulary creation'): if (not os.path.isfile(vocab_file)): os.makedirs(tokenizer_name_or_path, exist_ok=True) vocab_dict = create_vocabulary_from_data(raw_datasets, word_delimiter_token=word_delimiter_token, unk_token=unk_token, pad_token=pad_token) with open(vocab_file, 'w') as file: json.dump(vocab_dict, file) tokenizer_kwargs = {'config': (config if (config.tokenizer_class is not None) else None), 'tokenizer_type': (config.model_type if (config.tokenizer_class is None) else None), 'unk_token': unk_token, 'pad_token': pad_token, 'word_delimiter_token': word_delimiter_token} tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_auth_token=data_args.use_auth_token, **tokenizer_kwargs) feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token) config.update({'feat_proj_dropout': model_args.feat_proj_dropout, 'attention_dropout': model_args.attention_dropout, 'hidden_dropout': model_args.hidden_dropout, 'final_dropout': model_args.final_dropout, 'mask_time_prob': model_args.mask_time_prob, 'mask_time_length': model_args.mask_time_length, 'mask_feature_prob': model_args.mask_feature_prob, 'mask_feature_length': model_args.mask_feature_length, 'gradient_checkpointing': training_args.gradient_checkpointing, 'layerdrop': model_args.layerdrop, 'ctc_loss_reduction': model_args.ctc_loss_reduction, 'pad_token_id': tokenizer.pad_token_id, 'vocab_size': len(tokenizer), 'activation_dropout': model_args.activation_dropout}) model = AutoModelForCTC.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, use_auth_token=data_args.use_auth_token) if model_args.freeze_feature_encoder: model.freeze_feature_encoder() dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate if (dataset_sampling_rate != feature_extractor.sampling_rate): raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)) max_input_length = (data_args.max_duration_in_seconds * feature_extractor.sampling_rate) min_input_length = (data_args.min_duration_in_seconds * feature_extractor.sampling_rate) audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers phoneme_language = data_args.phoneme_language def prepare_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch['input_values'] = inputs.input_values[0] batch['input_length'] = len(batch['input_values']) additional_kwargs = {} if (phoneme_language is not None): additional_kwargs['phonemizer_lang'] = phoneme_language batch['labels'] = tokenizer(batch['target_text'], **additional_kwargs).input_ids return batch with training_args.main_process_first(desc='dataset map preprocessing'): vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=num_workers, desc='preprocess datasets') def is_audio_in_length_range(length): return ((length > min_input_length) and (length < max_input_length)) vectorized_datasets = vectorized_datasets.filter(is_audio_in_length_range, num_proc=num_workers, input_columns=['input_length']) eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics} if data_args.preprocessing_only: logger.info(f'Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}') return def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=(- 1)) pred.label_ids[(pred.label_ids == (- 100))] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(pred_ids) label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) metrics = {k: v.compute(predictions=pred_str, references=label_str) for (k, v) in eval_metrics.items()} return metrics if is_main_process(training_args.local_rank): feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) try: processor = AutoProcessor.from_pretrained(training_args.output_dir) except (OSError, KeyError): warnings.warn("Loading a processor from a feature extractor config that does not include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following attribute to your `preprocessor_config.json` file to suppress this warning: `'processor_class': 'Wav2Vec2Processor'`", FutureWarning) processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir) data_collator = DataCollatorCTCWithPadding(processor=processor) decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm]) decay_parameters = [name for name in decay_parameters if ('bias' not in name)] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (n in decay_parameters)], 'weight_decay': training_args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if (n not in decay_parameters)], 'weight_decay': 0.0}] optimizer = bnb.optim.Adam8bit(params=optimizer_grouped_parameters, lr=training_args.learning_rate, betas=(training_args.adam_beta1, training_args.adam_beta2), eps=training_args.adam_epsilon) optimizers = (optimizer, None) trainer = Trainer(model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=(vectorized_datasets['train'] if training_args.do_train else None), eval_dataset=(vectorized_datasets['eval'] if training_args.do_eval else None), tokenizer=feature_extractor, optimizers=optimizers) if training_args.do_train: if (last_checkpoint is not None): checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(vectorized_datasets['train'])) metrics['train_samples'] = min(max_train_samples, len(vectorized_datasets['train'])) trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() results = {} if training_args.do_eval: logger.info('*** Evaluate ***') metrics = trainer.evaluate() max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(vectorized_datasets['eval'])) metrics['eval_samples'] = min(max_eval_samples, len(vectorized_datasets['eval'])) trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) config_name = (data_args.dataset_config_name if (data_args.dataset_config_name is not None) else 'na') kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'automatic-speech-recognition', 'tags': ['automatic-speech-recognition', data_args.dataset_name], 'dataset_args': f'Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}', 'dataset': f'{data_args.dataset_name.upper()} - {config_name.upper()}'} if ('common_voice' in data_args.dataset_name): kwargs['language'] = config_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results
class Adamax(OptimMethod): def __init__(self, learningrate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-38, bigdl_type='float'): super(Adamax, self).__init__(None, bigdl_type, learningrate, beta1, beta2, epsilon)
class BaseBenchmark(): def __init__(self, max_iter: int, log_interval: int, num_warmup: int, logger: Optional[MMLogger]=None): self.max_iter = max_iter self.log_interval = log_interval self.num_warmup = num_warmup self.logger = logger def run(self, repeat_num: int=1) -> dict: assert (repeat_num >= 1) results = [] for _ in range(repeat_num): results.append(self.run_once()) results = self.average_multiple_runs(results) return results def run_once(self) -> dict: raise NotImplementedError() def average_multiple_runs(self, results: List[dict]) -> dict: raise NotImplementedError()
def main(): opt = parse_args() init_logger(opt.log_file) logger.info('Extracting features...') src_nfeats = inputters.get_num_features(opt.data_type, opt.train_dir, 'src') qa_nfeats = inputters.get_num_features(opt.data_type, opt.train_dir, 'qa') tgt_nfeats = inputters.get_num_features(opt.data_type, opt.train_dir, 'tgt') logger.info((' * number of source features: %d.' % src_nfeats)) logger.info((' * number of qa features: %d.' % qa_nfeats)) logger.info((' * number of target features: %d.' % tgt_nfeats)) logger.info('Building `Fields` object...') fields = inputters.get_fields(src_nfeats, qa_nfeats, tgt_nfeats, opt.data_type) logger.info('Building & saving training data...') train_dataset_files = build_save_dataset('train', fields, opt) logger.info('Building & saving validation data...') build_save_dataset('valid', fields, opt) logger.info('Building & saving vocabulary...') build_save_vocab(train_dataset_files, opt.data_type, fields, opt)
def process_isolate(func: Callable, project: sf.Project, **kwargs) -> bool: ctx = multiprocessing.get_context('spawn') passed = ctx.Manager().Value(bool, True) verbosity = sf.getLoggingLevel() process = ctx.Process(target=func, args=(project, verbosity, passed), kwargs=kwargs) process.start() process.join() return passed.value
def gptneox_sample_softmax(ctx: gptneox_context_p, candidates): return _lib.gptneox_sample_softmax(ctx, candidates)
def get_teacher_predictions(model_path: str, examples: List[str], class_names: List[str], hypothesis_template: str, batch_size: int, temperature: float, multi_label: bool, use_fast_tokenizer: bool, no_cuda: bool, fp16: bool): model = AutoModelForSequenceClassification.from_pretrained(model_path) model_config = model.config if ((not no_cuda) and torch.cuda.is_available()): model = nn.DataParallel(model.cuda()) batch_size *= len(model.device_ids) tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=use_fast_tokenizer) (premises, hypotheses) = get_premise_hypothesis_pairs(examples, class_names, hypothesis_template) logits = [] for i in tqdm(range(0, len(premises), batch_size)): batch_premises = premises[i:(i + batch_size)] batch_hypotheses = hypotheses[i:(i + batch_size)] encodings = tokenizer(batch_premises, batch_hypotheses, padding=True, truncation='only_first', return_tensors='pt') with torch.cuda.amp.autocast(enabled=fp16): with torch.no_grad(): outputs = model(**encodings) logits.append(outputs.logits.detach().cpu().float()) entail_id = get_entailment_id(model_config) contr_id = ((- 1) if (entail_id == 0) else 0) logits = torch.cat(logits, dim=0) nli_logits = logits.reshape(len(examples), len(class_names), (- 1))[(..., [contr_id, entail_id])] if multi_label: nli_prob = (nli_logits / temperature).softmax((- 1)) else: nli_prob = (nli_logits / temperature).softmax(1) return nli_prob[(..., 1)]
def load_ResNet18Model(): model = ResNet(Bottleneck, [2, 2, 2, 2]) copy_parameter_from_resnet(model, torchvision.models.resnet18(pretrained=True).state_dict()) return model
def get_view_select_parser(): parser = argparse.ArgumentParser() parser.add_argument('--seed', default=0, type=int) parser.add_argument('--phase', default='train') parser.add_argument('--dataset', default='nyu') parser.add_argument('--num_epoch', default=20, type=int) parser.add_argument('--batch_size', default=16, type=int) parser.add_argument('--learning_rate', default=0.001, type=float) parser.add_argument('--split', default=1, type=int, help='Divide the train dataset into s parts, each as an epoch.') parser.add_argument('--gpus', type=int, nargs='+') parser.add_argument('--log_dir', default='./logs/base') parser.add_argument('--model_saved_path', default='./checkpoint/base') parser.add_argument('--pre_pose_predictor', default='./checkpoint/nyu/pc_net/model.pth') parser.add_argument('--pre_wls', default='./checkpoint/nyu/wls/model.pth') parser.add_argument('--pre_model_path', default=None) parser.add_argument('--resume_training', default=False, type=bool, help='If resume_training is False, log dir will not be remove.') parser.add_argument('--reg_weight', default=0.001, type=float, help='Regularization weight.') parser.add_argument('--learning_decay_rate', default=0.8, help='Learning decay rate each epoch.') parser.add_argument('--d_feature', default=16, type=int) parser.add_argument('--level', default=5, type=int, help='Specify the number of virtual views. Levels 1, 2, 3, 4, 5 have 3, 9, 15, 25, 81 views, respectively.') parser.add_argument('--n_head', default=1, type=int) parser.add_argument('--d_attn', default=256, type=int) parser.add_argument('--d_k', default=64, type=int) parser.add_argument('--d_v', default=64, type=int) parser.add_argument('--d_inner', default=256, type=int) parser.add_argument('--nstack', default=2, type=int, help='The number of stacked hourglass network.') parser.add_argument('--hourglass_features', default=256, type=int, help='The number of features of stacked hourglass network.') parser.add_argument('--light_nstack', default=1, type=int) parser.add_argument('--light_hourglass_features', default=128, type=int, help='The number of features of stacked hourglass network.') parser.add_argument('--dropout_rate', default=0.5, type=float) parser.add_argument('--num_select', default=15, type=float) parser.add_argument('--threshold', default=0.04, type=float) parser.add_argument('--alpha', default=0.5, type=float) parser.add_argument('--num_worker', default=4, type=int, help='Number worker of Dataloader.') parser.add_argument('--noise_sigma', default=10.0, type=float, help='Sigma of Gaussian noise.') parser.add_argument('--max_jitter', default=10.0, type=float, help='Sigma of jittering center of mass.') parser.add_argument('--depth_sigma', default=1.0, type=float, help='Sigma of jittering depth of pixel.') parser.add_argument('--cube_len', default=None, type=float) parser.add_argument('--min_scale', default=1.0, type=float) parser.add_argument('--max_scale', default=1.0, type=float) parser.add_argument('--offset', default=20.0, type=float, help='Offset of bounding box.') parser.add_argument('--hand_thickness', default=20.0, type=float) parser.add_argument('--random_flip', default=False, type=bool, help='Whether flip image randomly.') parser.add_argument('--use_joint', default=False, type=bool) parser.add_argument('--save_result', default=False, type=bool) parser.add_argument('--config', default='./config/nyu/train_pc_net.yaml') return parser