code stringlengths 17 6.64M |
|---|
def main():
utils.setup_default_logging()
(args, args_text) = _parse_args()
args.prefetcher = (not args.no_prefetcher)
args.distributed = False
if ('WORLD_SIZE' in os.environ):
args.distributed = (int(os.environ['WORLD_SIZE']) > 1)
args.device = 'cuda:0'
args.world_size = 1
arg... |
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, grad_accum_steps=1, num_training_steps_per_epoch=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
... |
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
top1_m = utils.AverageMeter()
top5_m = utils.AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_gr... |
class ApexScalerAccum():
state_dict_key = 'amp'
def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, update_grad=True):
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(create_graph=create_graph)
if upd... |
class NativeScalerAccum():
state_dict_key = 'amp_scaler'
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_g... |
def get_config(game_name: str) -> Dict:
'Get experiment configurations.'
config = deepcopy(CONFIG)
config['seed'] = FLAGS.seed
config['benchmark'] = 'atari'
config['sampling'] = FLAGS.sampling
config['mcts'] = (FLAGS.algo == 'mzu')
config['game_name'] = game_name
config['num_simulation... |
def get_learner(config, networks, data_iterator, logger) -> RosmoLearner:
'Get ROSMO learner.'
learner = RosmoLearner(networks, demonstrations=data_iterator, config=config, logger=logger)
return learner
|
def get_actor_env_eval_loop(config, networks, environment, observers, logger) -> Tuple[(RosmoEvalActor, EnvironmentLoop)]:
'Get actor, env and evaluation loop.'
actor = RosmoEvalActor(networks, config)
eval_loop = EvaluationLoop(environment=environment, actor=actor, logger=logger, should_update=False, obs... |
def get_env_loop_observers() -> List[ExtendedEnvLoopObserver]:
'Get environment loop observers.'
observers = []
learning_step_ob = LearningStepObserver()
observers.append(learning_step_ob)
return observers
|
def get_env_data_loader(config) -> Tuple[(dm_env.Environment, Iterator)]:
'Get environment and trajectory data loader.'
trajectory_length = ((config['unroll_steps'] + config['td_steps']) + 1)
(environment, dataset) = atari_env_loader(env_name=config['game_name'], run_number=config['run_number'], dataset_d... |
def get_networks(config, environment) -> Networks:
'Get environment-specific networks.'
environment_spec = make_environment_spec(environment)
logging.info(environment_spec)
networks = make_atari_networks(env_spec=environment_spec, channels=config['channels'], num_bins=config['num_bins'], output_init_s... |
def get_logger_fn(exp_full_name: str, job_name: str, is_eval: bool=False, config: Optional[Dict]=None) -> Logger:
'Get logger function.'
save_data = is_eval
return logger_fn(exp_name=exp_full_name, label=job_name, save_data=(save_data and (not FLAGS.debug)), use_tb=False, use_wb=(FLAGS.use_wb and (not FLA... |
def main(_):
'Main program.'
platform = jax.lib.xla_bridge.get_backend().platform
num_devices = jax.device_count()
logging.warn(f'Compute platform: {platform} with {num_devices} devices.')
logging.info(f'Debug mode: {FLAGS.debug}')
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
cfg... |
def get_config(game_name: str) -> Dict:
'Get experiment configurations.'
config = deepcopy(CONFIG)
config['seed'] = FLAGS.seed
config['benchmark'] = 'bsuite'
config['mcts'] = (FLAGS.algo == 'mzu')
config['game_name'] = game_name
config['batch_size'] = (16 if FLAGS.debug else config.batch_s... |
def get_learner(config, networks, data_iterator, logger) -> RosmoLearner:
'Get ROSMO learner.'
learner = RosmoLearner(networks, demonstrations=data_iterator, config=config, logger=logger)
return learner
|
def get_actor_env_eval_loop(config, networks, environment, observers, logger) -> Tuple[(RosmoEvalActor, EnvironmentLoop)]:
'Get actor, env and evaluation loop.'
actor = RosmoEvalActor(networks, config)
eval_loop = EvaluationLoop(environment=environment, actor=actor, logger=logger, should_update=False, obs... |
def get_env_loop_observers() -> List[ExtendedEnvLoopObserver]:
'Get environment loop observers.'
observers = []
learning_step_ob = LearningStepObserver()
observers.append(learning_step_ob)
return observers
|
def get_logger_fn(exp_full_name: str, job_name: str, is_eval: bool=False, config: Optional[Dict]=None) -> Logger:
'Get logger function.'
save_data = is_eval
return logger_fn(exp_name=exp_full_name, label=job_name, save_data=(save_data and (not FLAGS.debug)), use_tb=False, use_wb=(FLAGS.use_wb and (not FLA... |
def main(_):
'Main program.'
logging.info(f'Debug mode: {FLAGS.debug}')
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
platform = jax.lib.xla_bridge.get_backend().platform
num_devices = jax.device_count()
logging.warn(f'Compute platform: {platform} with {num_devices} devices.')
cfg... |
class RosmoEvalActor(acme.core.Actor):
'ROSMO evaluation actor.'
def __init__(self, networks: Networks, config: Dict) -> None:
'Init ROSMO evaluation actor.'
self._networks = networks
self._environment_specs = networks.environment_specs
self._rng_key = jax.random.PRNGKey(confi... |
def model_simulate(networks: Networks, params: Params, num_bins: int, state: Array, actions_to_simulate: Array) -> AgentOutput:
'Simulate the learned model using one-step look-ahead.'
def fn(state: Array, action: Array) -> Array:
'Dynamics fun for vmap.'
next_state = networks.transition_netwo... |
def one_step_improve(networks: Networks, rng_key: networks_lib.PRNGKey, params: Params, model_root: AgentOutput, num_bins: int, discount_factor: float, num_simulations: int=(- 1), sampling: bool=False) -> Tuple[(Array, Array)]:
'Obtain the one-step look-ahead target policy.'
environment_specs = networks.envir... |
def mcts_improve(networks: Networks, rng_key: networks_lib.PRNGKey, params: Params, model_root: AgentOutput, num_bins: int, discount_factor: float, num_simulations: int, search_depth: int) -> mctx.PolicyOutput:
'Obtain the Monte-Carlo Tree Search target policy.'
def recurrent_fn(params: Params, rng_key: netw... |
class Params(NamedTuple):
'Agent parameters.'
representation: networks_lib.Params
transition: networks_lib.Params
prediction: networks_lib.Params
|
class AgentOutput(NamedTuple):
'Agent prediction output.'
state: Array
policy_logits: Array
value_logits: Array
value: Array
reward_logits: Array
reward: Array
|
def scale_gradient(g: Array, scale: float) -> Array:
'Scale the gradient.\n\n Args:\n g (_type_): Parameters that contain gradients.\n scale (float): Scale.\n\n Returns:\n Array: Parameters with scaled gradients.\n '
return ((g * scale) + (jax.lax.stop_gradient(g) * (1.0 - scale)... |
def scalar_to_two_hot(x: Array, num_bins: int) -> Array:
'A categorical representation of real values.\n\n Ref: https://www.nature.com/articles/s41586-020-03051-4.pdf.\n\n Args:\n x (Array): Scalar data.\n num_bins (int): Number of bins.\n\n Returns:\n Array: Distributional data.\n ... |
def logits_to_scalar(logits: Array, num_bins: int) -> Array:
'The inverse of the scalar_to_two_hot function above.\n\n Args:\n logits (Array): Distributional logits.\n num_bins (int): Number of bins.\n\n Returns:\n Array: Scalar data.\n '
chex.assert_equal(num_bins, logits.shape[... |
def value_transform(x: Array, epsilon: float=0.001) -> Array:
'A non-linear value transformation for variance reduction.\n\n Ref: https://arxiv.org/abs/1805.11593.\n\n Args:\n x (Array): Data.\n epsilon (float, optional): Epsilon. Defaults to 1e-3.\n\n Returns:\n Array: Transformed d... |
def inv_value_transform(x: Array, epsilon: float=0.001) -> Array:
'The inverse of the non-linear value transformation above.\n\n Args:\n x (Array): Data.\n epsilon (float, optional): Epsilon. Defaults to 1e-3.\n\n Returns:\n Array: Inversely transformed data.\n '
return (jnp.sign... |
class Cartpole(_Cartpole):
'Carpole environment.'
def __init__(self, *args: Any, **kwargs: Any) -> None:
'Init env.'
super().__init__(*args, **kwargs)
self.episode_id = 0
self.episode_return = 0
self.bsuite_id = 'cartpole/0'
def reset(self) -> dm_env.TimeStep:
... |
class Catch(_Catch):
'Catch environment.'
def __init__(self, *args: Any, **kwargs: Any) -> None:
'Init env.'
super().__init__(*args, **kwargs)
self.episode_id = 0
self.episode_return = 0
self.bsuite_id = 'catch/0'
def _reset(self) -> dm_env.TimeStep:
self.... |
class MountainCar(_MountainCar):
'Mountain Car environment.'
def __init__(self, *args: Any, **kwargs: Any) -> None:
'Init env.'
super().__init__(*args, **kwargs)
self.episode_id = 0
self.episode_return = 0
self.bsuite_id = 'mountain_car/0'
def _reset(self) -> dm_e... |
def create_bsuite_ds_loader(env_name: str, dataset_name: str, dataset_percentage: int) -> tf.data.Dataset:
'Create BSuite dataset loader.\n\n Args:\n env_name (str): Environment name.\n dataset_name (str): Dataset name.\n dataset_percentage (int): Fraction of data to be used\n\n Returns... |
def env_loader(env_name: str, dataset_dir: str, data_percentage: int=100, batch_size: int=8, trajectory_length: int=1, **_: Any) -> Tuple[(dm_env.Environment, tf.data.Dataset)]:
'Get the environment and dataset.\n\n Args:\n env_name (str): Name of the environment.\n dataset_dir (str): Directory s... |
class _BatchToTransition():
'Creates (s,a,r,f,l) transitions.'
@staticmethod
def create_transitions(batch: Dict[(str, tf.Tensor)]) -> Dict[(str, tf.Tensor)]:
'Create stacked transitions.\n\n Args:\n batch (Dict[str, tf.Tensor]): Data batch\n\n Returns:\n Dict[s... |
def _get_trajectory_dataset_fn(stack_size: int, trajectory_length: int=1) -> Callable[([tf.data.Dataset], tf.data.Dataset)]:
batch_fn = _BatchToTransition().create_transitions
def make_trajectory_dataset(episode: tf.data.Dataset) -> tf.data.Dataset:
"Converts an episode of steps to a dataset of custo... |
def _uniformly_subsampled_atari_data(dataset_name: str, data_percent: int, data_dir: str) -> tf.data.Dataset:
ds_builder = tfds.builder(dataset_name)
data_splits = []
total_num_episode = 0
for (split, info) in ds_builder.info.splits.items():
num_episodes = int(((data_percent / 100) * info.num_... |
def create_atari_ds_loader(env_name: str, run_number: int, dataset_dir: str, stack_size: int=4, data_percentage: int=10, trajectory_fn: Optional[Callable]=None, shuffle_num_episodes: int=1000, shuffle_num_steps: int=50000, trajectory_length: int=10, **_: Any) -> tf.data.Dataset:
'Create Atari dataset loader.\n\n ... |
class _AtariDopamineWrapper(dm_env.Environment):
'Wrapper for Atari Dopamine environmnet.'
def __init__(self, env: gym.Env, max_episode_steps: int=108000):
self._env = env
self._max_episode_steps = max_episode_steps
self._episode_steps = 0
self._reset_next_episode = True
... |
def environment(game: str, stack_size: int) -> dm_env.Environment:
'Atari environment.'
env = atari_lib.create_atari_environment(game_name=game, sticky_actions=True)
env = _AtariDopamineWrapper(env, max_episode_steps=20000)
env = wrappers.FrameStackingWrapper(env, num_frames=stack_size)
return wra... |
def env_loader(env_name: str, run_number: int, dataset_dir: str, stack_size: int=4, data_percentage: int=10, trajectory_fn: Optional[Callable]=None, shuffle_num_episodes: int=1000, shuffle_num_steps: int=50000, trajectory_length: int=10, **_: Any) -> Tuple[(dm_env.Environment, tf.data.Dataset)]:
'Get the environm... |
class EvaluationLoop(acme.EnvironmentLoop):
'Evaluation env-actor loop.'
def run(self, num_episodes: Optional[int]=None, num_steps: Optional[int]=None) -> None:
'Run the evaluation loop.'
if (not ((num_episodes is None) or (num_steps is None))):
raise ValueError('Either "num_episo... |
class ExtendedEnvLoopObserver(observers_lib.EnvLoopObserver):
'Extended env loop observer.'
@abstractmethod
def step(self) -> None:
'Steps the observer.'
@abstractmethod
def restore(self, learning_step: int) -> None:
'Restore the observer state.'
|
class LearningStepObserver(ExtendedEnvLoopObserver):
'Observer to record the learning steps.'
def __init__(self) -> None:
'Init observer.'
super().__init__()
self._learning_step = 0
self._eval_step = 0
self._status = 1
self._train_elapsed = 0.0
self._la... |
class WBLogger(base.Logger):
'Logger for W&B.'
def __init__(self, scope: Optional[str]=None) -> None:
'Init WB logger.'
self._lock = threading.Lock()
self._scope = scope
def write(self, data: Dict[(str, Any)]) -> None:
'Log the data.'
step = data.pop('step', None)... |
class ResultFilter(base.Logger):
'Postprocessing for normalized score.'
def __init__(self, to: base.Logger, game_name: str):
'Init result filter.'
self._to = to
game_name = re.sub('(?<!^)(?=[A-Z])', '_', game_name).lower()
if (game_name in BASELINES):
random_score ... |
def make_sail_logger(exp_name: str, label: str, save_data: bool=True, save_dir: str='./logs', use_tb: bool=False, tb_dir: Optional[str]=None, use_wb: bool=False, config: Optional[dict]=None, time_delta: float=1.0, asynchronous: bool=False, print_fn: Optional[Callable[([str], None)]]=None, serialize_fn: Optional[Calla... |
def logger_fn(exp_name: str, label: str, save_data: bool=False, use_tb: bool=True, use_wb: bool=True, config: Optional[dict]=None, time_delta: float=15.0) -> Logger:
'Get logger function.\n\n Args:\n exp_name (str): Experiment name.\n label (str): Experiment label.\n save_data (bool, optio... |
class Profiler():
'Profiler for python and jax (optional).'
def __init__(self, folder: str, name: str, with_jax: bool=False) -> None:
'Init.'
super().__init__()
self._name = name
self._folder = folder
self._with_jax = with_jax
self._vistracer = VizTracer(output... |
class ActorOutput(NamedTuple):
'Actor output parsed from the dataset.'
observation: Array
reward: Array
is_first: Array
is_last: Array
action: Array
|
class RLUAtari(parameterized.TestCase):
'Test RL Unplugged Atari data loader.'
@staticmethod
def test_data_loader():
'Test data loader.'
dataset_dir = os.path.join(_DATASET_DIR, 'atari')
(_, dataloader) = atari_env_loader(env_name='Asterix', run_number=1, dataset_dir=dataset_dir)
... |
class BSuite(parameterized.TestCase):
'Test BSuite data loader.'
@staticmethod
def test_data_loader():
'Test data loader.'
dataset_dir = os.path.join(_DATASET_DIR, 'bsuite')
(_, dataloader) = bsuite_env_loader(env_name='catch', dataset_dir=dataset_dir)
iterator = iter(data... |
def _update_dict(k, v):
if (k == 'DATASET'):
if (('MEAN' in v) and v['MEAN']):
v['MEAN'] = np.array([(eval(x) if isinstance(x, str) else x) for x in v['MEAN']])
if (('STD' in v) and v['STD']):
v['STD'] = np.array([(eval(x) if isinstance(x, str) else x) for x in v['STD']])
... |
def update_config(config_file):
exp_config = None
with open(config_file) as f:
exp_config = edict(yaml.load(f, Loader=yaml.FullLoader))
for (k, v) in exp_config.items():
if (k in config):
if isinstance(v, dict):
_update_dict(k, v)
... |
def gen_config(config_file):
cfg = dict(config)
for (k, v) in cfg.items():
if isinstance(v, edict):
cfg[k] = dict(v)
with open(config_file, 'w') as f:
yaml.dump(dict(cfg), f, default_flow_style=False)
|
def update_dir(model_dir, log_dir, data_dir):
if model_dir:
config.OUTPUT_DIR = model_dir
if log_dir:
config.LOG_DIR = log_dir
if data_dir:
config.DATA_DIR = data_dir
config.DATASET.ROOT = os.path.join(config.DATA_DIR, config.DATASET.ROOT)
config.TEST.BBOX_FILE = os.path.jo... |
def get_model_name(cfg):
name = '{model}_{num_layers}'.format(model=cfg.MODEL, num_layers=cfg.POSE_RESNET.NUM_LAYERS)
deconv_suffix = ''.join(('d{}'.format(num_filters) for num_filters in cfg.POSE_RESNET.NUM_DECONV_FILTERS))
full_name = '{height}x{width}_{name}_{deconv_suffix}'.format(height=cfg.NETWORK.I... |
class DistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and loa... |
class NodeDistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and... |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'src')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensio... |
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=Tru... |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2... |
class PoseResNet(nn.Module):
def __init__(self, block, layers, cfg, **kwargs):
self.inplanes = 64
self.deconv_with_bias = cfg.POSE_RESNET.DECONV_WITH_BIAS
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.... |
def get_pose_net(cfg, is_train, **kwargs):
num_layers = cfg.POSE_RESNET.NUM_LAYERS
(block_class, layers) = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, cfg, **kwargs)
if is_train:
model.init_weights(cfg.NETWORK.PRETRAINED)
return model
|
def unfold_camera_param(camera):
R = camera['R']
T = camera['T']
fx = camera['fx']
fy = camera['fy']
f = np.array([[fx], [fy]]).reshape((- 1), 1)
c = np.array([[camera['cx']], [camera['cy']]]).reshape((- 1), 1)
k = camera['k']
p = camera['p']
return (R, T, f, c, k, p)
|
def project_point_radial(x, R, T, f, c, k, p):
'\n Args\n x: Nx3 points in world coordinates\n R: 3x3 Camera rotation matrix\n T: 3x1 Camera translation parameters\n f: (scalar) Camera focal length\n c: 2x1 Camera center\n k: 3x1 Camera radial distortion coefficients\n... |
def project_pose(x, camera):
(R, T, f, c, k, p) = unfold_camera_param(camera)
return project_point_radial(x, R, T, f, c, k, p)
|
def world_to_camera_frame(x, R, T):
'\n Args\n x: Nx3 3d points in world coordinates\n R: 3x3 Camera rotation matrix\n T: 3x1 Camera translation parameters\n Returns\n xcam: Nx3 3d points in camera coordinates\n '
xcam = R.dot((x.T - T))
return xcam.T
|
def camera_to_world_frame(x, R, T):
'\n Args\n x: Nx3 points in camera coordinates\n R: 3x3 Camera rotation matrix\n T: 3x1 Camera translation parameters\n Returns\n xcam: Nx3 points in world coordinates\n '
xcam = (R.T.dot(x.T) + T)
return xcam.T
|
def imread(filename, flags=cv2.IMREAD_COLOR):
global _im_zfile
path = filename
pos_at = path.index('@')
if (pos_at == (- 1)):
print(("character '@' is not found from the given path '%s'" % path))
assert 0
path_zip = path[0:pos_at]
path_img = path[(pos_at + 2):]
if (not os.p... |
def xmlread(filename):
global _xml_path_zip
global _xml_zfile
path = filename
pos_at = path.index('@')
if (pos_at == (- 1)):
print(("character '@' is not found from the given path '%s'" % path))
assert 0
path_zip = path[0:pos_at]
path_xml = path[(pos_at + 2):]
if (not o... |
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
def get_host_info():
return '{}@{}'.format(getuser(), gethostname())
|
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--see... |
def match_name_keywords(n, name_keywords):
out = False
for b in name_keywords:
if (b in n):
out = True
break
return out
|
def get_optimizer(model_without_ddp, weight_decay, optim_type):
lr = config.TRAIN.LR
if (model_without_ddp.backbone is not None):
for params in model_without_ddp.backbone.parameters():
params.requires_grad = False
lr_linear_proj_mult = config.DECODER.lr_linear_proj_mult
lr_linear_p... |
def main():
args = parse_args()
utils.init_distributed_mode(args)
print('git:\n {}\n'.format(utils.get_sha()))
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
(logger, final_output_dir, tb_log_... |
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--see... |
def main():
args = parse_args()
(logger, final_output_dir, tb_log_dir) = create_logger(config, args.cfg, 'validate')
device = torch.device(args.device)
utils.init_distributed_mode(args)
print('git:\n {}\n'.format(utils.get_sha()))
if is_main_process():
logger.info(pprint.pformat(args)... |
def create_dataset(dataset, config, min_scale=0.5):
print(config)
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
transform_train = transforms.Compose([transforms.RandomResizedCrop(config['image_size'], scale=(min_scale, 1.0)), transforms.RandomH... |
def create_sampler(datasets, shuffles, num_tasks, global_rank):
samplers = []
for (dataset, shuffle) in zip(datasets, shuffles):
sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)
samplers.append(sampler)
return samplers
|
def create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
loaders = []
for (dataset, sampler, bs, n_worker, is_train, collate_fn) in zip(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
if is_train:
shuffle = (sampler is None)
... |
class coco_karpathy_train(Dataset):
def __init__(self, transform, image_root, ann_root, max_words=30, prompt=''):
'\n image_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n '
url = 'https://storage.googl... |
class coco_karpathy_caption_eval(Dataset):
def __init__(self, transform, image_root, ann_root, split):
'\n image_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n split (string): val or test\n '
ur... |
class coco_karpathy_retrieval_eval(Dataset):
def __init__(self, transform, image_root, ann_root, split, max_words=30):
'\n image_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n split (string): val or test\n ... |
class flickr30k_train(Dataset):
def __init__(self, transform, image_root, ann_root, max_words=30, prompt=''):
'\n image_root (string): Root directory of images (e.g. flickr30k/)\n ann_root (string): directory to store the annotation file\n '
url = 'https://storage.googleapis.... |
class flickr30k_retrieval_eval(Dataset):
def __init__(self, transform, image_root, ann_root, split, max_words=30):
'\n image_root (string): Root directory of images (e.g. flickr30k/)\n ann_root (string): directory to store the annotation file\n split (string): val or test\n '
... |
def create_dataset(dataset, config, min_scale=0.5):
print(config)
transform_train = transforms.Compose([transforms.RandomResizedCrop(config['image_size'], scale=(min_scale, 1.0)), transforms.RandomHorizontalFlip(), RandomAugment(2, 5, isPIL=True, augs=['Identity', 'AutoContrast', 'Brightness', 'Sharpness', 'E... |
def create_sampler(datasets, shuffles, num_tasks, global_rank):
samplers = []
for (dataset, shuffle) in zip(datasets, shuffles):
sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)
samplers.append(sampler)
return samplers
|
def create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
loaders = []
for (dataset, sampler, bs, n_worker, is_train, collate_fn) in zip(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
if is_train:
shuffle = (sampler is None)
... |
def create_dataset(dataset, config, min_scale=0.5):
print(config)
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
transform_train = transforms.Compose([transforms.RandomResizedCrop(config['image_size'], scale=(min_scale, 1.0)), RandomAugment(2, 5... |
def create_sampler(datasets, shuffles, num_tasks, global_rank):
samplers = []
for (dataset, shuffle) in zip(datasets, shuffles):
sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)
samplers.append(sampler)
return samplers
|
def create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
loaders = []
for (dataset, sampler, bs, n_worker, is_train, collate_fn) in zip(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):
if is_train:
shuffle = (sampler is None)
... |
class nlvr_dataset(Dataset):
def __init__(self, transform, image_root, ann_root, split):
'\n image_root (string): Root directory of images \n ann_root (string): directory to store the annotation file\n split (string): train, val or test\n '
urls = {'train': 'https://st... |
class pretrain_dataset(Dataset):
def __init__(self, ann_file, laion_path, transform):
self.img_root = '/dataset'
self.ann_pretrain = None
for f in ann_file:
ann_temp = pd.read_csv(f, sep='\t', header=None)
if (self.ann_pretrain is None):
self.ann_pr... |
class vqa_dataset(Dataset):
def __init__(self, transform, ann_root, vqa_root, vg_root, train_files=[], split='train'):
self.split = split
self.transform = transform
self.vqa_root = vqa_root
self.vg_root = vg_root
if (split == 'train'):
urls = {'vqa_train': 'htt... |
def vqa_collate_fn(batch):
(image_list, question_list, answer_list, weight_list, n) = ([], [], [], [], [])
for (image, question, answer, weights) in batch:
image_list.append(image)
question_list.append(question)
weight_list += weights
answer_list += answer
n.append(len(... |
@torch.no_grad()
def evaluate(model, data_loader, device, config):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Evaluation:'
print_freq = 10
result = []
for (image, image_id) in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(devi... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.