code stringlengths 101 5.91M |
|---|
def get_transform(opt):
transform_list = []
if (opt.resize_or_crop == 'resize_and_crop'):
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'crop'):
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'scale_width'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.fineSize))))
elif (opt.resize_or_crop == 'scale_width_and_crop'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.loadSize))))
transform_list.append(transforms.RandomCrop(opt.fineSize))
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list) |
def get_all_videos(dir, extension='mp4'):
list_video_fn = []
for (dirpath, dirnames, filenames) in os.walk(dir):
for filename in [f for f in filenames if f.endswith(extension)]:
fn = os.path.join(dirpath, filename)
list_video_fn.append(fn)
return list_video_fn |
def _test_products_sign_covariance(dout: int, use_weights: bool):
nbatch = 5
nelec_per_spin = (2, 5)
d = 2
key = jax.random.PRNGKey(0)
(key, subkey) = jax.random.split(key)
inputs = [jax.random.normal(key, (nbatch, n, d)) for n in nelec_per_spin]
flip_sign_inputs = [inputs[0], (- inputs[1])]
same_sign_inputs = [(- inputs[0]), (- inputs[1])]
model = sign_sym.ProductsSignCovariance(dout, weights.get_kernel_initializer('orthogonal'), use_weights=use_weights)
(result, params) = model.init_with_output(subkey, inputs)
chex.assert_shape(result, (nbatch, dout))
flip_sign_result = model.apply(params, flip_sign_inputs)
same_sign_result = model.apply(params, same_sign_inputs)
assert_pytree_allclose(flip_sign_result, (- result))
assert_pytree_allclose(same_sign_result, result) |
def save_scores(experiment: str, index: str, values: dict) -> None:
llms = ['BERT', 'RoBERTa', 'SetFit-MiniLM', 'SetFit-mpnet', 'FLAN-T5-small', 'FLAN-T5-base']
models = ['NB', 'LR', 'KNN', 'SVM', 'XGBoost', 'LightGBM']
Path(f'outputs/csv/').mkdir(parents=True, exist_ok=True)
file = Path(f'outputs/csv/{experiment}.csv')
if file.is_file():
scores = pd.read_csv(f'outputs/csv/{experiment}.csv', index_col=0)
scores.loc[index] = values
else:
if (index in llms):
scores = pd.DataFrame(index=llms, columns=(list(SCORING.keys()) + ['training_time', 'inference_time']))
else:
scores = pd.DataFrame(index=models, columns=(list(SCORING.keys()) + ['training_time', 'inference_time']))
scores.loc[index] = values
scores.to_csv(f'outputs/csv/{experiment}.csv') |
class TestMetaUtils(unittest.TestCase):
def tearDown(self):
destroy_parallel_group()
return super().tearDown()
(torch.cuda.is_available(), 'cpu test')
def test_init_and_reload(self):
with init_empty_weights_with_disk_offload(ignore_tie_weights=False):
model = MyModule(8, 4)
assert (id(model.emb_in.weight) == id(model.emb_out.weight))
reload_meta_module(model)
assert (id(model.emb_in.weight) == id(model.emb_out.weight))
((torch.cuda.is_available() or (GPTNeoXForCausalLM is None)), 'cpu test')
def test_offload_reload(self):
with init_empty_weights_with_disk_offload(ignore_tie_weights=False):
config = {'hidden_size': 32, 'intermediate_size': 128, 'num_attention_heads': 2, 'num_hidden_layers': 2, 'vocab_size': 512}
config = GPTNeoXConfig(**config)
model = GPTNeoXForCausalLM(config)
input_ids = torch.randint(low=0, high=512, size=(16, 24), dtype=torch.long)
labels = torch.rand(16, 24).long()
input_batch = {'input_ids': input_ids, 'labels': labels}
materialize_modules_to_device(model)
model(**input_batch)
(((not torch.cuda.is_available()) or (torch.cuda.device_count() < 2)), 'Must have at least 2 GPUs for tp test')
def test_tp_reload(self):
world_size = 2
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(find_free_port())
mp.spawn(run_tp_materialize, args=(world_size,), nprocs=world_size, join=True)
os.environ['MASTER_ADDR'] = ''
os.environ['MASTER_PORT'] = '' |
class MDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(MDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
act = nn.ReLU(True)
self.scale_idx = 0
self.url = url['r{}f{}'.format(n_resblocks, n_feats)]
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
self.pre_process = nn.ModuleList([nn.Sequential(common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act)) for _ in args.scale])
m_body = [common.ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(n_resblocks)]
m_body.append(conv(n_feats, n_feats, kernel_size))
self.upsample = nn.ModuleList([common.Upsampler(conv, s, n_feats, act=False) for s in args.scale])
m_tail = [conv(n_feats, args.n_colors, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
x = self.pre_process[self.scale_idx](x)
res = self.body(x)
res += x
x = self.upsample[self.scale_idx](res)
x = self.tail(x)
x = self.add_mean(x)
return x
def set_scale(self, scale_idx):
self.scale_idx = scale_idx |
def load_swav_teacher_encoder(args, model, logger, distributed=True):
checkpoint = torch.load(args.distill)
model_checkpoint = model.state_dict()
if distributed:
for key in checkpoint:
if (not key.startswith('module.prototypes')):
model_key = key.replace('module', 'module.teacher')
model_checkpoint[model_key] = checkpoint[key]
logger.info('{} loaded.'.format(model_key))
else:
for key in checkpoint:
if (not key.startswith('module.prototypes')):
model_key = key.replace('module', 'teacher')
model_checkpoint[model_key] = checkpoint[key]
logger.info('{} loaded.'.format(model_key))
model.load_state_dict(model_checkpoint)
return model |
def start_namespace(namespace):
global value_type_prefix
value_type_prefix = (namespace + '.') |
class NonNegativeParametrizer(nn.Module):
pedestal: Tensor
def __init__(self, minimum: float=0, reparam_offset: float=(2 ** (- 18))):
super().__init__()
self.minimum = float(minimum)
self.reparam_offset = float(reparam_offset)
pedestal = (self.reparam_offset ** 2)
self.register_buffer('pedestal', torch.Tensor([pedestal]))
bound = ((self.minimum + (self.reparam_offset ** 2)) ** 0.5)
self.lower_bound = LowerBound(bound)
def init(self, x: Tensor) -> Tensor:
return torch.sqrt(torch.max((x + self.pedestal), self.pedestal))
def forward(self, x: Tensor) -> Tensor:
out = self.lower_bound(x)
out = ((out ** 2) - self.pedestal)
return out |
class Entity(xmlr.Object):
def __init__(self, name=None, pose=None):
self.name = name
self.pose = pose |
def main():
f = open(sys.argv[1], 'rb')
results_json = json.load(f)['utts']
(num_err, num_tot) = (0, 0)
(risk_stat, sum_prob_stat, ref_prob_stat) = ([], [], [])
for (uttid, info) in results_json.items():
try:
hypotheses = info['output']
ref_token = hypotheses[0]['token']
(texts, probs, find_ref) = ([], [], False)
for h in hypotheses:
text = h['rec_token'].replace('<eos>', '').strip()
texts.append(text)
probs.append(math.exp(h['score']))
if (ref_token == text):
ref_prob_stat.append(math.exp(h['score']))
find_ref = True
if (not find_ref):
ref_prob_stat.append(0.0)
edit_dists = [editdistance.eval(ref_token, rec_token) for rec_token in texts]
weighted_probs = [(a * b) for (a, b) in zip(edit_dists, probs)]
risk = (sum(weighted_probs) / (sum(probs) + 1e-10))
risk_stat.append(risk)
sum_prob_stat.append(sum(probs))
num_err += edit_dists[0]
num_tot += len(ref_token.strip().split())
except:
pass
print('### MBR statistics on {} ###'.format(sys.argv[1]))
cer = ((num_err / num_tot) * 100)
print('CER: {:.4f}% {}/{}'.format(cer, num_err, num_tot))
(br_mean, br_deviation) = (np.mean(risk_stat), np.sqrt(np.var(risk_stat)))
print('Mean and Deviation of Bayesian Risk: {:.4f} | {:.4f}'.format(br_mean, br_deviation))
(sum_prob_mean, sum_prob_deviation) = (np.mean(sum_prob_stat), np.sqrt(np.var(sum_prob_stat)))
(ref_prob_mean, ref_prob_deviation) = (np.mean(ref_prob_stat), np.sqrt(np.var(ref_prob_stat)))
print('Mean and Deviation of Accumulated probability: {:.4f} | {:.4f}'.format(sum_prob_mean, sum_prob_deviation))
print('Mean and Deviation of Reference probability: {:.4f} | {:.4f}'.format(ref_prob_mean, ref_prob_deviation)) |
class WeightedRandomSampler(Sampler):
def __init__(self, weights, num_samples, replacement=True):
self.weights = torch.DoubleTensor(weights)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
def __len__(self):
return self.num_samples |
def get_center_bbox(mesh: Type[trimesh.base.Trimesh]) -> Type[np.ndarray]:
return (0.5 * (np.min(mesh.vertices, axis=0) + np.max(mesh.vertices, axis=0))) |
def test_standard_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
anchor_generator_cfg = dict(type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert (anchor_generator.num_base_priors == anchor_generator.num_base_anchors)
assert (anchor_generator.num_base_priors == [3, 3])
assert (anchor_generator is not None) |
class OCC_DukeMTMCreID(BaseImageDataset):
dataset_dir = 'Occluded_Duke'
def __init__(self, root='', verbose=True, pid_begin=0, **kwargs):
super(OCC_DukeMTMCreID, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = '
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self.pid_begin = pid_begin
self._download_data()
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print('=> DukeMTMC-reID loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams, self.num_train_vids) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams, self.num_query_vids) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams, self.num_gallery_vids) = self.get_imagedata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print('This dataset has been downloaded.')
return
print('Creating directory {}'.format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print('Downloading DukeMTMC-reID dataset')
urllib.request.urlretrieve(self.dataset_url, fpath)
print('Extracting files')
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
cam_container = set()
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
assert (1 <= camid <= 8)
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, (self.pid_begin + pid), camid, 1))
cam_container.add(camid)
print(cam_container, 'cam_container')
return dataset |
class UnetBlock(nn.Module):
def __init__(self, up_in, x_in, n_out):
super().__init__()
up_out = x_out = (n_out // 2)
self.x_conv = nn.Conv2d(x_in, x_out, 1)
self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)
self.bn = nn.BatchNorm2d(n_out)
def forward(self, up_p, x_p):
up_p = self.tr_conv(up_p)
x_p = self.x_conv(x_p)
cat_p = torch.cat([up_p, x_p], dim=1)
return self.bn(F.relu(cat_p)) |
class RobotMultiTCNRegression(AbstractAgentBasedModel):
def __init__(self, taskdef, *args, **kwargs):
super(RobotMultiTCNRegression, self).__init__(*args, **kwargs)
self.taskdef = taskdef
self.model = None
self.dropout_rate = 0.5
self.num_filters = 128
self.combined_dense_size = 128
self.num_frames = 10
self.tcn_filters = 128
self.num_tcn_levels = 2
self.tcn_dense_size = 1024
self.buffer_img = []
self.buffer_arm = []
self.buffer_gripper = []
self.imgs = deque()
self.q = deque()
self.gripper = deque()
def _makeModel(self, features, arm, gripper, arm_cmd, gripper_cmd, *args, **kwargs):
img_shape = features.shape[1:]
if (len(img_shape) == 3):
img_shape = ((self.num_frames,) + img_shape)
arm_size = arm.shape[(- 1)]
if (len(gripper.shape) > 1):
gripper_size = gripper.shape[(- 1)]
else:
gripper_size = 1
(ins, x) = GetEncoder3D(img_shape, arm_size, gripper_size, self.dropout_rate, filters=self.num_filters, pre_tiling_layers=1, post_tiling_layers=3, kernel_size=[3, 3, 3], time_distributed=self.num_frames, dropout=True, leaky=True, tile=True)
x = Flatten()(x)
x = Dense(self.combined_dense_size)(x)
arm_out = Dense(arm_size)(x)
gripper_out = Dense(gripper_size)(x)
model = Model(ins, [arm_out, gripper_out])
optimizer = self.getOptimizer()
model.compile(loss='mse', optimizer=optimizer)
self.model = model
def train(self, features, arm, gripper, arm_cmd, gripper_cmd, example, label, *args, **kwargs):
[features, arm, gripper, arm_cmd, gripper_cmd] = SplitIntoChunks(datasets=[features, arm, gripper, arm_cmd, gripper_cmd], labels=example, reward=None, chunk_length=self.num_frames, front_padding=False, rear_padding=True)
arm_cmd_target = LastInChunk(arm_cmd)
gripper_cmd_target = LastInChunk(gripper_cmd)
self._makeModel(features, arm, gripper, arm_cmd, gripper_cmd, *args, **kwargs)
self.model.summary()
self.model.fit(x=[features, arm, gripper], y=[arm_cmd_target, gripper_cmd_target], epochs=self.epochs, initial_epoch=self.initial_epoch, batch_size=self.batch_size)
def plot(self):
pass
def predict(self, world):
world.history_length = self.num_frames
if (self.model is None):
raise RuntimeError('model is missing')
'\n Store or create the set of input features we need for the TCN\n '
features = world.getHistoryMatrix()
if isinstance(features, list):
assert (len(features) == len(self.model.inputs))
if (self.model is None):
raise RuntimeError('model is missing')
features = [f.reshape(((1,) + f.shape)) for f in features]
res = self.model.predict(features)
print(res)
return res |
class L1Dist(nn.Module):
def forward(self, pred, target):
return torch.abs((pred - target)).sum() |
class TestRecurrentIterator(unittest.TestCase, TestCheckpointableIterator):
def setUp(self):
data = list(range(53))
self.expected_result = [0]
for i in data[1:]:
self.expected_result.append((self.expected_result[(- 1)] + i))
def step_function(prev_state, item):
output = (item + prev_state)
new_state = output
return (new_state, output)
self.iterator = RecurrentIterator(NativeCheckpointableIterator(data), step_function, initial_state=0) |
class Workspace(object):
def __init__(self, cfg):
self.work_dir = os.getcwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
self.logger = Logger(((self.work_dir + ',env=') + cfg.env), save_tb=cfg.log_save_tb, log_frequency=cfg.log_frequency_step, agent=cfg.agent.name, action_repeat=cfg.action_repeat)
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.env = make_env(cfg)
cfg.agent.params.obs_shape = self.env.observation_space.shape
cfg.agent.params.action_shape = self.env.action_space.shape
cfg.agent.params.action_range = [float(self.env.action_space.low.min()), float(self.env.action_space.high.max())]
self.agent = hydra.utils.instantiate(cfg.agent)
self.replay_buffer = ReplayBuffer(self.env.observation_space.shape, self.env.action_space.shape, cfg.replay_buffer_capacity, self.cfg.image_pad, self.device)
self.video_recorder = VideoRecorder((((self.work_dir + ',env=') + cfg.env) if cfg.save_video else None))
self.step = 0
def evaluate(self):
average_episode_reward = 0
for episode in range(self.cfg.num_eval_episodes):
obs = self.env.reset()
self.video_recorder.init(enabled=(episode == 0))
done = False
episode_reward = 0
episode_step = 0
while (not done):
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
(obs, reward, done, info) = self.env.step(action)
self.video_recorder.record(self.env)
episode_reward += reward
episode_step += 1
average_episode_reward += episode_reward
self.video_recorder.save(f'{self.step}.mp4')
average_episode_reward /= self.cfg.num_eval_episodes
self.logger.log('eval/episode_reward', average_episode_reward, self.step)
self.logger.dump(self.step)
def update_optimizer(self, scale=0.01):
self.agent.update_optimizer(scale)
def reuse_head(self, reuse_model):
for (reuse, new) in zip(reuse_model.actor.named_parameters(), self.agent.actor.named_parameters()):
reuse_name = reuse[0]
reuse_param = reuse[1]
new_name = new[0]
new_param = new[1]
if ('trunk' in reuse_name):
print(reuse_name)
new_param.data.copy_(reuse_param.data)
for (reuse, new) in zip(reuse_model.critic.named_parameters(), self.agent.critic.named_parameters()):
reuse_name = reuse[0]
reuse_param = reuse[1]
new_name = new[0]
new_param = new[1]
if (('Q1' in reuse_name) or ('Q2' in reuse_name)):
print(reuse_name)
new_param.data.copy_(reuse_param.data)
for (reuse, new) in zip(reuse_model.critic.named_parameters(), self.agent.critic_target.named_parameters()):
reuse_name = reuse[0]
reuse_param = reuse[1]
new_name = new[0]
new_param = new[1]
if (('Q1' in reuse_name) or ('Q2' in reuse_name)):
print(reuse_name)
new_param.data.copy_(reuse_param.data)
def run(self, reuse, round, reuse_model):
if reuse:
self.agent.log_alpha = torch.tensor(np.log(0.0001)).to('cuda')
self.agent.log_alpha.requires_grad = True
self.update_optimizer(scale=self.cfg.scale)
self.agent.set_reuse()
if (round == 1):
self.reuse_head(reuse_model)
(episode, episode_reward, episode_step, done) = (0, 0, 1, True)
start_time = time.time()
while ((self.step * self.cfg.action_repeat) <= (self.cfg.num_train_steps * round)):
if done:
if (self.step > 0):
self.logger.log('train/duration', (time.time() - start_time), self.step)
start_time = time.time()
self.logger.dump(self.step, save=(self.step > self.cfg.num_seed_steps))
if ((self.step % self.cfg.eval_frequency) == 0):
self.logger.log('eval/episode', episode, self.step)
self.evaluate()
self.logger.log('train/episode_reward', episode_reward, self.step)
obs = self.env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
self.logger.log('train/episode', episode, self.step)
if (self.step < self.cfg.num_seed_steps):
action = self.env.action_space.sample()
else:
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=True)
if (self.step >= self.cfg.num_seed_steps):
for _ in range(self.cfg.num_train_iters):
self.agent.update(self.replay_buffer, self.logger, self.step)
(next_obs, reward, done, info) = self.env.step(action)
done = float(done)
done_no_max = (0 if ((episode_step + 1) == self.env._max_episode_steps) else done)
episode_reward += reward
self.replay_buffer.add(obs, action, reward, next_obs, done, done_no_max)
if (((self.step * self.cfg.action_repeat) % 20000) == 0):
print('save the {}-th model'.format(self.step))
self.agent.save_model(((self.work_dir + ',env=') + self.cfg.env), self.step)
obs = next_obs
episode_step += 1
self.step += 1 |
def save_model(model: torch.nn.Module, path, compression='fp32'):
path = Path(path)
path.mkdir(parents=path.parent, exist_ok=True)
if hasattr(model, '_save'):
model._save(path, compression=compression)
else:
meta_path = (Path(path) / 'nano_model_meta.yml')
metadata = {'ModelType': 'PytorchModel', 'checkpoint': 'saved_weight.pt', 'compression': 'fp32'}
checkpoint_path = (path / metadata['checkpoint'])
if (compression == 'bf16'):
bf16_sd = transform_state_dict_to_dtype(model.state_dict(), dtype='bf16')
torch.save(bf16_sd, checkpoint_path)
metadata['compression'] = 'bf16'
else:
torch.save(model.state_dict(), checkpoint_path)
with open(meta_path, 'w+') as f:
yaml.safe_dump(metadata, f) |
def Yogi(model_param, lr=0.01, betas=(0.9, 0.999), eps=0.001, initial_accumulator=1e-06, weight_decay=0):
optimizer = optim.Yogi(model_param, lr=lr, betas=betas, eps=eps, initial_accumulator=initial_accumulator, weight_decay=weight_decay)
return optimizer |
(version_base=None, config_path='../config', config_name='main')
def main(cfg: DictConfig):
cmp_cfg = cfg['cmp']
seed = (random.getrandbits(32) if (cmp_cfg['seed'] is None) else cmp_cfg['seed'])
EXEC_LOG.info(f'Using seed {seed}')
model_cfg = cfg['model']
ModelType = Model.init(model_cfg['type'], cfg['backend']['target'])
ModelType.add_seed_setter()
if isinstance(model_cfg['path'], ListConfig):
model_paths = model_cfg['path']
if (cmp_cfg['save'] is not None):
assert isinstance(cmp_cfg['save'], ListConfig), 'With multiple models compiled-and-executed together, the `save` must also be a list of paths.'
else:
model_paths = [model_cfg['path']]
output_dirs = cmp_cfg['save']
if isinstance(output_dirs, (int, float, str)):
output_dirs = [Path(output_dirs)]
for (i, model_path) in enumerate(model_paths):
try:
model = ModelType.load(model_path)
except Exception:
EXEC_LOG.error(f'Failed to load {model_path}: {traceback.format_exc()}')
if model_cfg['skip_err']:
continue
model_basename = os.path.basename(os.path.normpath(model_path))
test_inputs = None
test_outputs = None
provider = 'unknown'
if (cmp_cfg['raw_input'] is not None):
EXEC_LOG.info('Using raw input from {} as oracle'.format(cmp_cfg['raw_input']))
test_inputs = pickle.load(Path(cmp_cfg['raw_input']).open('rb'))
assert isinstance(test_inputs, dict), 'Raw input type should be Dict[str, np.ndarray]'
provider = 'raw input from {}'.format(cmp_cfg['raw_input'])
if (test_inputs is None):
oracle_path = None
if ('auto' == cmp_cfg['oracle']):
oracle_path = model_path.replace(model_basename, 'oracle.pkl')
if (not os.path.exists(oracle_path)):
oracle_path = None
elif (cmp_cfg['oracle'] is not None):
oracle_path = cmp_cfg['oracle']
if (oracle_path is not None):
EXEC_LOG.info('Using oracle from {}'.format(oracle_path))
res = pickle.load(Path(oracle_path).open('rb'))
test_inputs = res['input']
test_outputs = res['output']
provider = res['provider']
if (test_inputs is None):
EXEC_LOG.info('Generating input data from BackendFactory.make_random_input')
test_inputs = BackendFactory.make_random_input(model.input_like)
provider = f'random inputs'
oracle = Oracle(test_inputs, test_outputs, provider)
testcase = TestCase(model, oracle)
this_fac = BackendFactory.init(cfg['backend']['type'], target=cfg['backend']['target'], optmax=cfg['backend']['optmax'])
output_dir = (None if (output_dirs is None) else output_dirs[i])
verify_testcase(cmp_cfg, this_fac, testcase, output_dir) |
class CorNetXMLCNN(nn.Module):
def __init__(self, dropout, labels_num, dynamic_pool_length, bottleneck_dim, num_filters, **kwargs):
super(CorNetXMLCNN, self).__init__()
self.xmlcnn = XMLCNN(dropout, labels_num, dynamic_pool_length, bottleneck_dim, num_filters, **kwargs)
self.cornet = CorNet(labels_num, **kwargs)
def forward(self, input_variables):
raw_logits = self.xmlcnn(input_variables)
cor_logits = self.cornet(raw_logits)
return cor_logits |
def main(args=None):
rclpy.init(args=args)
visualizer = VisualizerNode()
try:
rclpy.spin(visualizer)
except KeyboardInterrupt:
print('Visualization is terminated')
finally:
visualizer.destroy_node()
print('Visualization stopped cleanly')
rclpy.shutdown() |
def plot_counties(df, variable_to_distribute, variables_to_display, state=None, logcolor=False):
from bokeh.sampledata.us_counties import data as counties
counties = {code: county for (code, county) in counties.items() if (county['state'] == state.lower())}
county_xs = [county['lons'] for county in counties.values()]
county_ys = [county['lats'] for county in counties.values()]
if (variable_to_distribute in variables_to_display):
variables_to_display.remove(variable_to_distribute)
colors = palettes.RdBu11
min_value = df[variable_to_distribute].min()
max_value = df[variable_to_distribute].max()
gran = ((max_value - min_value) / float(len(colors)))
index_range = [(min_value + (x * gran)) for x in range(len(colors))]
county_colors = []
variable_dictionary = {}
variable_dictionary['county_names'] = [county['name'] for county in counties.values()]
variable_dictionary['x'] = county_xs
variable_dictionary['y'] = county_ys
variable_dictionary[re.sub('[^\\w]', '', variable_to_distribute)] = []
for vd in variables_to_display:
variable_dictionary[re.sub('[^\\w]', '', vd)] = []
for county_id in counties:
StateCountyID = (str(county_id[0]).zfill(2) + str(county_id[1]).zfill(3))
if (StateCountyID in list(df['countyFIPS'].values)):
temp_var = df[(df['countyFIPS'] == StateCountyID)][variable_to_distribute].values[0]
variable_dictionary[re.sub('[^\\w]', '', variable_to_distribute)].append(temp_var)
for vd in variables_to_display:
variable_dictionary[re.sub('[^\\w]', '', vd)].append(round(float(df[(df['countyFIPS'] == StateCountyID)][vd].values), 2))
color_idx = list((temp_var - np.array(index_range))).index(min((x for x in list((temp_var - np.array(index_range))) if (x >= 0))))
county_colors.append(colors[color_idx])
else:
variable_dictionary[re.sub('[^\\w]', '', variable_to_distribute)].append(0.0)
county_colors.append('#A9A9A9')
for vd in variables_to_display:
variable_dictionary[re.sub('[^\\w]', '', vd)].append(0.0)
variable_dictionary['color'] = county_colors
source = ColumnDataSource(data=variable_dictionary)
TOOLS = 'pan,wheel_zoom,box_zoom,reset,hover,save'
if logcolor:
mapper = LogColorMapper(palette=colors, low=min_value, high=max_value)
else:
mapper = LinearColorMapper(palette=colors, low=min_value, high=max_value)
color_bar = ColorBar(color_mapper=mapper, location=(0, 0), orientation='horizontal', title=variable_to_distribute, ticker=FixedTicker(ticks=index_range))
p = figure(title=variable_to_distribute, toolbar_location='left', tools=TOOLS, plot_width=1100, plot_height=700, x_axis_location=None, y_axis_location=None)
p.patches('x', 'y', source=source, fill_alpha=0.7, fill_color='color', line_color='#884444', line_width=2)
hover = p.select_one(HoverTool)
hover.point_policy = 'follow_mouse'
tool_tips = [('County ', '_names')]
for key in variable_dictionary.keys():
if (key not in ['x', 'y', 'color', 'county_names']):
tool_tips.append((key, (('' + re.sub('[^\\w]', '', key)) + '{1.11}')))
hover.tooltips = tool_tips
p.add_layout(color_bar, 'below')
return p |
def own_ce(x, soft_cluster, weight, theta):
if (weight is None):
LogSoftmax = F.log_softmax(x, 1)
else:
total_weight = []
for i in range(soft_cluster.shape[0]):
k = torch.argmax(soft_cluster, dim=1)[i].item()
total_weight.append(((weight * 1) / weight[k]))
total_weight = torch.stack(total_weight, dim=0)
e_x = torch.exp((x - torch.max(x, dim=(- 1))[0].unsqueeze(1).repeat(1, x.shape[1])))
weighted_logits = (e_x * adaptive_weight(x, soft_cluster, total_weight, theta))
LogSoftmax = torch.log(((e_x / torch.sum(weighted_logits, dim=1).reshape((- 1), 1)) + 1e-08))
result = (soft_cluster * LogSoftmax)
nllloss = (- torch.mean(result, dim=1))
nllloss = torch.mean(nllloss, dim=0)
return nllloss |
def color_normal_eqution(latex_contents, latex_file, color_name):
all_begin_brace_list = get_all_begin_brace_nodes(latex_contents, latex_file, search_str_bg='\\[', search_str_ed='\\]')
for begin_brace_list in all_begin_brace_list:
begin_brace = begin_brace_list[(- 1)]
content = begin_brace.get_braced_content(latex_contents)
if (len(''.join(content)) <= 5):
continue
latex_contents = add_color_begin_end_command(latex_contents, begin_brace, color_name, inner_outer='outer')
begin_brace_double_dollar = get_all_dollar_symbol(latex_contents, latex_file)
for begin_brace in begin_brace_double_dollar:
content = begin_brace.get_braced_content(latex_contents)
triggrt_word = ['\\includegraphics']
is_equ = True
for w in triggrt_word:
if (w in ''.join(content)):
is_equ = False
if (not is_equ):
continue
latex_contents = add_color_begin_end_command(latex_contents, begin_brace, color_name, inner_outer='outer')
return latex_contents |
def process_queue(instance_id, queue_url, kill_on_fail):
global curr_com
t = threading.Thread(target=watch_for_instance_death, args=(queue_url, instance_id))
t.daemon = True
t.start()
log_file = open('/tmp/queue_log', 'a+', 1)
while True:
try:
output = subprocess.check_output(['aws', 'sqs', 'receive-message', '--queue-url', queue_url, '--wait-time-seconds', '20'])
except subprocess.CalledProcessError:
if kill_on_fail:
kill_instance(instance_id)
else:
return
if (not output):
continue
messages = json.loads(output)
message = messages['Messages'][0]
subprocess.check_call(['aws', 'sqs', 'delete-message', '--queue-url', queue_url, '--receipt-handle', message['ReceiptHandle']])
curr_com = message['Body']
com = message['Body']
mk_tmp_file = (lambda suffix: open(os.path.join('/tmp', '{}.{}'.format(message['MessageId'], suffix)), 'w'))
with mk_tmp_file('stdout') as stdout, mk_tmp_file('stderr') as stderr:
log_file.write((com + '\n'))
log_file.write('--> stdout: {}, stderr: {}\n\n'.format(stdout.name, stderr.name))
proc = subprocess.Popen(message['Body'], shell=True, cwd=os.environ['ITHEMAL_HOME'], stdout=stdout, stderr=stderr)
proc.wait()
if (not proc.returncode):
os.unlink(stderr.name)
curr_com = None
if proc.returncode:
error_msg = 'Command `{}` failed with exit code {} on instance {}'.format(message['Body'], proc.returncode, instance_id)
subprocess.call([os.path.join(os.environ['ITHEMAL_HOME'], 'aws', 'ping_slack.py'), error_msg]) |
def test_benchmark_dataset():
for i in generator_lmdb('/data/ocr/reg/evaluation/IC15_2077', rgb=False):
print(i) |
def original_monotonic(vec1, vec2, vec3):
'Taken verbatim from
increasing_dims = (vec1 > vec2)
decreasing_dims = (vec1 < vec2)
equal_dims = (vec1 == vec2)
vec3_greater_vec1 = (vec3 >= vec1)
vec3_greater_vec2 = (vec3 >= vec2)
vec3_lesser_vec1 = (vec3 <= vec1)
vec3_lesser_vec2 = (vec3 <= vec2)
vec3_equal_vec1 = (vec3 == vec1)
vec3_equal_vec2 = (vec3 == vec2)
valid = ((((increasing_dims * vec3_lesser_vec1) * vec3_greater_vec2) + ((decreasing_dims * vec3_greater_vec1) * vec3_lesser_vec2)) + ((equal_dims * vec3_equal_vec1) * vec3_equal_vec2))
return valid |
class Hourglass(nn.Module):
def __init__(self, in_planes, batchNorm=True):
super(Hourglass, self).__init__()
self.batchNorm = batchNorm
self.conv1 = conv3d_bn_relu(self.batchNorm, in_planes, (in_planes * 2), kernel_size=3, stride=2, padding=1, bias=False)
self.conv2 = conv3d_bn(self.batchNorm, (in_planes * 2), (in_planes * 2), kernel_size=3, stride=1, padding=1, bias=False)
self.conv3 = conv3d_bn_relu(self.batchNorm, (in_planes * 2), (in_planes * 2), kernel_size=3, stride=2, padding=1, bias=False)
self.conv4 = conv3d_bn_relu(self.batchNorm, (in_planes * 2), (in_planes * 2), kernel_size=3, stride=1, padding=1, bias=False)
self.conv5 = deconv3d_bn(self.batchNorm, (in_planes * 2), (in_planes * 2), kernel_size=3, padding=1, output_padding=1, stride=2, bias=False)
self.conv6 = deconv3d_bn(self.batchNorm, (in_planes * 2), in_planes, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False)
def forward(self, x, presqu, postsqu):
out = self.conv1(x)
pre = self.conv2(out)
if (postsqu is not None):
pre = F.relu((pre + postsqu), inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre)
out = self.conv4(out)
if (presqu is not None):
post = F.relu((self.conv5(out) + presqu), inplace=True)
else:
post = F.relu((self.conv5(out) + pre), inplace=True)
out = self.conv6(post)
return (out, pre, post) |
def path2str(path: T_path) -> str:
assert isinstance(path, (Path, str)), type(path)
return str(path) |
_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return (activation_fn(x, name=name) if activation_fn else x) |
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm((hidden_states + input_tensor))
return hidden_states |
def loss_D_fn(P, D, options, images, gen_images):
gen_images = gen_images.detach()
N = images.size(0)
all_images = torch.cat([images, gen_images], dim=0)
d_all = D(all_images)
(d_real, d_gen) = (d_all[:N], d_all[N:])
if (options['loss'] == 'nonsat'):
d_loss = (F.softplus(d_gen).mean() + F.softplus((- d_real)).mean())
elif (options['loss'] == 'wgan'):
d_loss = (d_gen.mean() - d_real.mean())
elif (options['loss'] == 'hinge'):
d_loss = (F.relu((1.0 + d_gen), inplace=True).mean() + F.relu((1.0 - d_real), inplace=True).mean())
elif (options['loss'] == 'lsgan'):
d_loss_real = ((d_real - 1.0) ** 2).mean()
d_loss_fake = (d_gen ** 2).mean()
d_loss = (0.5 * (d_loss_real + d_loss_fake))
else:
raise NotImplementedError()
penalty = compute_penalty(P.penalty, P=P, D=D, all_images=all_images, images=images, gen_images=gen_images, d_real=d_real, d_gen=d_gen, lbd=options['lbd'], lbd2=options['lbd2'])
return (d_loss, {'penalty': penalty, 'd_real': d_real.mean(), 'd_gen': d_gen.mean()}) |
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if (e.errno != errno.EEXIST):
raise |
def glu(x):
'Gated Linear Units from
(x, x_h) = tf.split(x, 2, axis=(- 1))
return (tf.sigmoid(x) * x_h) |
def register_nnModule_class():
logger.info('Analyzing nn.Module class definitions in all files ...')
for cl in globals.list_code_line_instance:
parent_class_has_nnModule = (list((set(cl.parent_class_name) & set(['nn.Module', 'torch.nn.Module', 'nn.Sequential', 'torch.Sequential', '_BaseAutoModelClass']))) != [])
if (cl.is_class_def_line and parent_class_has_nnModule):
CD = ClassDefinition(class_name=cl.class_name, file_path=cl.file_path, class_def_line_idx=cl.class_def_line_idx, parent_class_name=cl.parent_class_name)
CD.print_info()
globals.list_class_name.append(cl.class_name)
globals.list_class_def_instance.append(CD)
search_scope = globals.list_class_name
do_search = True
while do_search:
list_child_class_name = []
for cl in globals.list_code_line_instance:
parent_class_has_nnModule = (list((set(cl.parent_class_name) & set(search_scope))) != [])
if (cl.is_class_def_line and parent_class_has_nnModule):
CD = ClassDefinition(class_name=cl.class_name, file_path=cl.file_path, class_def_line_idx=cl.class_def_line_idx, parent_class_name=cl.parent_class_name)
CD.print_info()
globals.list_class_name.append(cl.class_name)
globals.list_class_def_instance.append(CD)
list_child_class_name.append(cl.class_name)
search_scope = list_child_class_name
if (len(search_scope) == 0):
do_search = False
globals.list_class_name = list(set(globals.list_class_name))
logger.debug(f'class name count: {len(globals.list_class_name)}')
logger.debug(f'class name list : {globals.list_class_name}') |
class PPON(nn.Module):
def __init__(self, in_nc, nf, nb, out_nc, alpha=1.0, upscale=4, act_type='lrelu'):
super(PPON, self).__init__()
self.alpha = alpha
n_upscale = int(math.log(upscale, 2))
if (upscale == 3):
n_upscale = 1
fea_conv = B.conv_layer(in_nc, nf, kernel_size=3)
rb_blocks = [B.RRBlock_32() for _ in range(nb)]
LR_conv = B.conv_layer(nf, nf, kernel_size=3)
ssim_branch = [B.RRBlock_32() for _ in range(2)]
gan_branch = [B.RRBlock_32() for _ in range(2)]
upsample_block = B.upconv_block
if (upscale == 3):
upsampler = upsample_block(nf, nf, 3, act_type=act_type)
upsampler_ssim = upsample_block(nf, nf, 3, act_type=act_type)
upsampler_gan = upsample_block(nf, nf, 3, act_type=act_type)
else:
upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
upsampler_ssim = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
upsampler_gan = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
HR_conv0_S = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1_S = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
HR_conv0_P = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1_P = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
self.CFEM = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)))
self.SFEM = B.sequential(*ssim_branch)
self.PFEM = B.sequential(*gan_branch)
self.CRM = B.sequential(*upsampler, HR_conv0, HR_conv1)
self.SRM = B.sequential(*upsampler_ssim, HR_conv0_S, HR_conv1_S)
self.PRM = B.sequential(*upsampler_gan, HR_conv0_P, HR_conv1_P)
def forward(self, x):
out_CFEM = self.CFEM(x)
out_c = self.CRM(out_CFEM)
out_SFEM = self.SFEM(out_CFEM)
out_s = (self.SRM(out_SFEM) + out_c)
out_PFEM = self.PFEM(out_SFEM)
out_p = ((self.alpha * self.PRM(out_PFEM)) + out_s)
return (out_c, out_s, out_p) |
def make_dummy_metropolis_fn():
def proposal_fn(params, data, key):
del params
return ((data + jnp.array([1, 2, 3, 4])), key)
def acceptance_fn(params, data, proposed_data):
del params, proposed_data
return jnp.array([True, False, True, False], dtype=bool)
def update_data_fn(data, proposed_data, move_mask):
pos_mask = jnp.reshape(move_mask, (((- 1),) + ((len(data.shape) - 1) * (1,))))
return jnp.where(pos_mask, proposed_data, data)
metrop_step_fn = mcmc.metropolis.make_metropolis_step(proposal_fn, acceptance_fn, update_data_fn)
return metrop_step_fn |
def diaresnet200b(**kwargs):
return get_diaresnet(blocks=200, conv1_stride=False, model_name='diaresnet200b', **kwargs) |
class DotProduct(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='sim', opt={}, dropout=None):
super(DotProduct, self).__init__()
assert (x1_dim == x2_dim)
self.opt = opt
self.prefix = prefix
self.scale_on = opt.get('{}_scale'.format(self.prefix), False)
self.scalor = (1.0 / numpy.power(x2_dim, 0.5))
def forward(self, x1, x2):
assert (x1.size(2) == x2.size(2))
scores = x1.bmm(x2.transpose(1, 2))
if self.scale_on:
scores *= self.scalor
return scores |
class Response():
def __init__(self) -> None:
self.data: Union[(Dict[(str, Any)], List[Dict[(str, Any)]])] = {}
self.command: Dict[(str, Any)] = {} |
def main():
parser = argparse.ArgumentParser(description='Export model to the onnx format')
parser.add_argument('--config-file', default='configs/FCOS-Detection/R_50_1x.yaml', metavar='FILE', help='path to config file')
parser.add_argument('--width', default=0, type=int)
parser.add_argument('--height', default=0, type=int)
parser.add_argument('--level', default=0, type=int)
parser.add_argument('--output', default='output/fcos.onnx', metavar='FILE', help='path to the output onnx file')
parser.add_argument('--opts', help="Modify config options using the command-line 'KEY VALUE' pairs", default=[], nargs=argparse.REMAINDER)
cfg = get_cfg()
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.BACKBONE.FREEZE_AT = 0
cfg.MODEL.RESNETS.NORM = 'BN'
cfg.MODEL.BASIS_MODULE.NORM = 'BN'
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
logger = setup_logger(output=output_dir)
logger.info(cfg)
model = build_model(cfg)
model.eval()
model.to(cfg.MODEL.DEVICE)
logger.info('Model:\n{}'.format(model))
checkpointer = DetectionCheckpointer(model)
_ = checkpointer.load(cfg.MODEL.WEIGHTS)
logger.info('load Model:\n{}'.format(cfg.MODEL.WEIGHTS))
(height, width) = (800, 1088)
if (args.width > 0):
width = args.width
if (args.height > 0):
height = args.height
input_names = ['input_image']
dummy_input = torch.zeros((1, 3, height, width)).to(cfg.MODEL.DEVICE)
output_names = []
if isinstance(model, BlendMask):
patch_blendmask(cfg, model, output_names)
if isinstance(model, ProposalNetwork):
patch_ProposalNetwork(cfg, model, output_names)
if hasattr(model, 'proposal_generator'):
if isinstance(model.proposal_generator, FCOS):
patch_fcos(cfg, model.proposal_generator)
patch_fcos_head(cfg, model.proposal_generator.fcos_head)
torch.onnx.export(model, dummy_input, args.output, verbose=True, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=True)
logger.info('Done. The onnx model is saved into {}.'.format(args.output)) |
def get_hrnet_encoder(cfg, init_weight=True, global_mode=False, **kwargs):
model = PoseHighResolutionNet(cfg, global_mode=global_mode)
if init_weight:
if (cfg.HR_MODEL.PRETR_SET in ['imagenet']):
model.init_weights(cfg.HR_MODEL.PRETRAINED_IM)
logger.info('loaded HRNet imagenet pretrained model')
elif (cfg.HR_MODEL.PRETR_SET in ['coco']):
model.init_weights(cfg.HR_MODEL.PRETRAINED_COCO)
logger.info('loaded HRNet coco pretrained model')
else:
model.init_weights()
return model |
def ground_caption(captions, n_ground=1, prefix='describe visual inputs:', sort=True):
n_boxes = len(captions)
if sort:
ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values
else:
ground_indices = torch.randperm(n_boxes)[:n_ground]
ground_indices = ground_indices.tolist()
source_text = [prefix]
target_text = []
if (n_ground == 1):
idx = ground_indices[0]
source_text.append(f'<vis_extra_id_{idx}>')
target_text.append(f'{captions[idx]}')
else:
for (j, idx) in enumerate(ground_indices):
source_text.append(f'<vis_extra_id_{idx}>')
target_text.append(f'<extra_id_{j}>')
target_text.append(f'{captions[idx]}')
source_text = ' '.join(source_text)
target_text = ' '.join(target_text)
return (source_text, target_text) |
def _sample_generator(G, num_samples):
latent_samples = G.sample_latent(num_samples)
generated_data = G(latent_samples)
return generated_data |
def filename_to_url(filename, cache_dir=None):
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise EnvironmentError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise EnvironmentError('file {} not found'.format(meta_path))
with open(meta_path, encoding='utf-8') as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag) |
def insert_first_match(cur_page_cls, box, specific_text):
assert (specific_text != None)
def overlap_len(min1, len1, min2, len2):
min_ = min1
max_ = (min1 + len1)
if (min1 > min2):
min_ = min2
if ((min1 + len1) < (min2 + len2)):
max_ = (min2 + len2)
return max(0, ((len1 + len2) - (max_ - min_)))
need_insert_pos = (- 1)
for cl_id in range(len(cur_page_cls)):
cl_box = cur_page_cls[cl_id]['box']
overlap_len_ = overlap_len(box[0], (box[2] - box[0]), cl_box[0], (cl_box[2] - cl_box[0]))
if ((box[3] < cl_box[3]) and ((overlap_len_ / max(min((box[2] - box[0]), (cl_box[2] - cl_box[0])), 1)) > 0.7)):
need_insert_pos = cl_id
break
if (need_insert_pos == (- 1)):
cur_page_cls.append({'box': box, 'page': cur_page_cls[0]['page'], 'text': specific_text})
return cur_page_cls
else:
new_cls = cur_page_cls[:need_insert_pos]
new_cls.append({'box': box, 'page': cur_page_cls[0]['page'], 'text': specific_text})
new_cls.extend(cur_page_cls[need_insert_pos:])
return new_cls |
def nasnet_large_arg_scope(weight_decay=5e-05, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': True, 'fused': True}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d], weights_regularizer=weights_regularizer, weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d], activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc |
class MomentumAgent(TradingAgent):
def __init__(self, id: int, symbol, starting_cash, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, min_size=20, max_size=50, wake_up_freq: NanosecondTime=str_to_ns('60s'), poisson_arrival=True, order_size_model=None, subscribe=False, log_orders=False) -> None:
super().__init__(id, name, type, random_state, starting_cash, log_orders)
self.symbol = symbol
self.min_size = min_size
self.max_size = max_size
self.size = (self.random_state.randint(self.min_size, self.max_size) if (order_size_model is None) else None)
self.order_size_model = order_size_model
self.wake_up_freq = wake_up_freq
self.poisson_arrival = poisson_arrival
if self.poisson_arrival:
self.arrival_rate = self.wake_up_freq
self.subscribe = subscribe
self.subscription_requested = False
self.mid_list: List[float] = []
self.avg_20_list: List[float] = []
self.avg_50_list: List[float] = []
self.log_orders = log_orders
self.state = 'AWAITING_WAKEUP'
def kernel_starting(self, start_time: NanosecondTime) -> None:
super().kernel_starting(start_time)
def wakeup(self, current_time: NanosecondTime) -> None:
can_trade = super().wakeup(current_time)
if (self.subscribe and (not self.subscription_requested)):
super().request_data_subscription(L2SubReqMsg(symbol=self.symbol, freq=int(.0), depth=1))
self.subscription_requested = True
self.state = 'AWAITING_MARKET_DATA'
elif (can_trade and (not self.subscribe)):
self.get_current_spread(self.symbol)
self.state = 'AWAITING_SPREAD'
def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None:
super().receive_message(current_time, sender_id, message)
if ((not self.subscribe) and (self.state == 'AWAITING_SPREAD') and isinstance(message, QuerySpreadResponseMsg)):
(bid, _, ask, _) = self.get_known_bid_ask(self.symbol)
self.place_orders(bid, ask)
self.set_wakeup((current_time + self.get_wake_frequency()))
self.state = 'AWAITING_WAKEUP'
elif (self.subscribe and (self.state == 'AWAITING_MARKET_DATA') and isinstance(message, MarketDataMsg)):
(bids, asks) = (self.known_bids[self.symbol], self.known_asks[self.symbol])
if (bids and asks):
self.place_orders(bids[0][0], asks[0][0])
self.state = 'AWAITING_MARKET_DATA'
def place_orders(self, bid: int, ask: int) -> None:
if (bid and ask):
self.mid_list.append(((bid + ask) / 2))
if (len(self.mid_list) > 20):
self.avg_20_list.append(MomentumAgent.ma(self.mid_list, n=20)[(- 1)].round(2))
if (len(self.mid_list) > 50):
self.avg_50_list.append(MomentumAgent.ma(self.mid_list, n=50)[(- 1)].round(2))
if ((len(self.avg_20_list) > 0) and (len(self.avg_50_list) > 0)):
if (self.order_size_model is not None):
self.size = self.order_size_model.sample(random_state=self.random_state)
if (self.size > 0):
if (self.avg_20_list[(- 1)] >= self.avg_50_list[(- 1)]):
self.place_limit_order(self.symbol, quantity=self.size, side=Side.BID, limit_price=ask)
else:
self.place_limit_order(self.symbol, quantity=self.size, side=Side.ASK, limit_price=bid)
def get_wake_frequency(self) -> NanosecondTime:
if (not self.poisson_arrival):
return self.wake_up_freq
else:
delta_time = self.random_state.exponential(scale=self.arrival_rate)
return int(round(delta_time))
def ma(a, n=20):
ret = np.cumsum(a, dtype=float)
ret[n:] = (ret[n:] - ret[:(- n)])
return (ret[(n - 1):] / n) |
class DPTDepthModel(DPT):
def __init__(self, path=None, non_negative=True, **kwargs):
features = (kwargs['features'] if ('features' in kwargs) else 256)
head_features_1 = (kwargs['head_features_1'] if ('head_features_1' in kwargs) else features)
head_features_2 = (kwargs['head_features_2'] if ('head_features_2' in kwargs) else 32)
kwargs.pop('head_features_1', None)
kwargs.pop('head_features_2', None)
head = nn.Sequential(nn.Conv2d(head_features_1, (head_features_1 // 2), kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d((head_features_1 // 2), head_features_2, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), (nn.ReLU(True) if non_negative else nn.Identity()), nn.Identity())
super().__init__(head, **kwargs)
if (path is not None):
self.load(path)
def forward(self, x):
return super().forward(x).squeeze(dim=1) |
def hernquist_vcirc(r, a_scale=1.0, m=1.0, G=1.0):
v_circ = np.sqrt((((G * m) * r) * ((r + a_scale) ** (- 2))))
return v_circ |
class IntermediateRosNode():
def __init__(self, idx_env=0, laser_scan_publish_rate: int=0):
self._robot_frame_id = 'arena_robot_{:02d}'.format(idx_env)
self._header_seq_id = 0
rospy.init_node('arena_env{:02d}_redirecter'.format(idx_env), anonymous=True)
self._idx_env = idx_env
if (laser_scan_publish_rate == 0):
self._is_laser_scan_publish_asyn = False
else:
self._is_laser_scan_publish_asyn = True
self._laser_scan_cache = deque(maxlen=3)
self._laser_scan_cache_lock = threading.Lock()
self._laser_scan_pub_rate = laser_scan_publish_rate
self._laser_scan_pub_rater = rospy.Rate(hz=laser_scan_publish_rate)
self._new_laser_scan_received = False
self.start_time = rospy.Time.now()
self._setSubPub()
def _setSubPub(self, laser_scan_publish_rate: int=0):
namespace_sub = 'arena2d/env_{:d}/'.format(self._idx_env)
namespace_pub = 'arena2d_intermediate/'
self._pub_laser_scan = rospy.Publisher((namespace_pub + 'laserscan'), LaserScan, queue_size=1, tcp_nodelay=True)
self._tf_rospos = tf.TransformBroadcaster()
rospy.loginfo('intermediate node is waiting for connecting env[{:02d}]'.format(self._idx_env))
times = 0
self._sub = rospy.Subscriber((namespace_sub + 'response'), Arena2dResp, self._arena2dRespCallback, tcp_nodelay=True)
while (self._sub.get_num_connections() == 0):
time.sleep(0.1)
times += 1
rospy.loginfo('Successfully connected with arena-2d simulator, took {:3.1f}s.'.format((0.1 * times)))
def _arena2dRespCallback(self, resp: Arena2dResp):
curr_time = rospy.Time.now()
robot_pos = resp.robot_pos
self._tf_rospos.sendTransform((robot_pos.x, robot_pos.y, 0), tft.quaternion_from_euler(0, 0, robot_pos.theta), curr_time, self._robot_frame_id, 'world')
laser_scan = resp.observation
laser_scan.angle_min = 0
laser_scan.angle_max = (2 * np.pi)
laser_scan.angle_increment = (np.pi / 180)
laser_scan.range_min = 0
laser_scan.range_max = 5
laser_scan.header.frame_id = self._robot_frame_id
laser_scan.header.seq = self._header_seq_id
laser_scan.header.stamp = curr_time
self._header_seq_id += 1
if (not self._is_laser_scan_publish_asyn):
self._pub_laser_scan.publish(laser_scan)
else:
with self._laser_scan_cache_lock:
self._laser_scan_cache.append(laser_scan)
def run(self):
while (not rospy.is_shutdown()):
if self._is_laser_scan_publish_asyn:
with self._laser_scan_cache_lock:
len_cache = len(self._laser_scan_cache)
if (len_cache == 0):
continue
else:
latest_laser_scan = self._laser_scan_cache[(- 1)]
self._pub_laser_scan.publish(latest_laser_scan)
if ((len_cache == 3) and ((rospy.Time.now().to_sec() - self.start_time.to_sec()) > 10)):
interact_rate = ((2 * 1) / (self._laser_scan_cache[(- 1)].header.stamp.to_sec() - self._laser_scan_cache[0].header.stamp.to_sec()))
rospy.logwarn_once('The rate [{:3.1f} FPS] of republishment of the laser scan is lower compared to the receivings [approximately {:3.1f} FPS], therefore some the them are discareded'.format(self._laser_scan_pub_rate, interact_rate))
while (len(self._laser_scan_cache) != 1):
self._laser_scan_cache.popleft()
self._laser_scan_pub_rater.sleep()
else:
rospy.spin() |
def create_dataset(args):
model_path = args.model_path
if (not os.path.exists(model_path)):
os.makedirs(model_path)
result_path = os.path.join(model_path, 'translations')
if (not os.path.exists(result_path)):
os.makedirs(result_path)
vocab_path = os.path.join(model_path, 'vocab')
if (not os.path.exists(vocab_path)):
os.makedirs(vocab_path)
data_path = args.data_path
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
params = json.load(open(args.config, 'r'))
src_max_vocab = params['{}_vocab_size'.format(src_lang)]
tgt_max_vocab = params['{}_vocab_size'.format(tgt_lang)]
NMTDataSet(data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab, subword=args.subword, create_vocab=True) |
def get_j(input):
check_input(input)
if (input.dim() < 4):
nb_hidden = input.size()[(- 1)]
else:
nb_hidden = input.size()[1]
if (input.dim() == 2):
return input.narrow(1, (nb_hidden // 2), (nb_hidden // 4))
if (input.dim() == 3):
return input.narrow(2, (nb_hidden // 2), (nb_hidden // 4))
if (input.dim() >= 4):
return input.narrow(1, (nb_hidden // 2), (nb_hidden // 4)) |
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) |
def sample_qtables(minqual=8, maxqual=25, training=True, default_quality=10):
if training:
qual = tf.random_uniform(shape=[1], minval=minqual, maxval=maxqual)
else:
qual = default_quality
return (get_std_jpeg_qtable(qual), qual) |
def test_implicit_subscript_symbol_does_not_bump_ts():
cells = {0: 'lst = [] + [0, 1]', 1: 'logging.info(lst)', 2: 'logging.info(lst[0])'}
run_all_cells(cells)
response = flow().check_and_link_multiple_cells()
assert (response.waiting_cells == set())
assert (response.ready_cells == set()) |
class ATAC_FCNHead(HybridBlock):
def __init__(self, head_act, useReLU, in_channels, channels, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
super(ATAC_FCNHead, self).__init__()
with self.name_scope():
self.block = nn.HybridSequential()
inter_channels = (in_channels // 4)
with self.block.name_scope():
self.block.add(nn.Conv2D(in_channels=in_channels, channels=inter_channels, kernel_size=3, padding=1, use_bias=False))
self.block.add(norm_layer(in_channels=inter_channels, **({} if (norm_kwargs is None) else norm_kwargs)))
if (head_act == 'prelu'):
self.block.add(nn.PReLU())
elif (head_act == 'relu'):
self.block.add(nn.Activation('relu'))
elif (head_act == 'xUnit'):
self.block.add(xUnit(channels=inter_channels))
elif (head_act == 'SpaATAC'):
self.block.add(SpaATAC(skernel=3, channels=inter_channels, dilation=1, useReLU=useReLU))
elif (head_act == 'ChaATAC'):
self.block.add(ChaATAC(channels=inter_channels, useReLU=useReLU, useGlobal=False))
elif (head_act == 'SeqATAC'):
self.block.add(SeqATAC(skernel=3, channels=inter_channels, dilation=1, useReLU=useReLU))
else:
raise ValueError('Unknown act_type')
self.block.add(nn.Dropout(0.1))
self.block.add(nn.Conv2D(in_channels=inter_channels, channels=channels, kernel_size=1))
def hybrid_forward(self, F, x):
return self.block(x) |
def plot_image_from_w(w, G):
img = get_image_from_w(w, G)
pillow_image = Image.fromarray(img)
plt.imshow(pillow_image)
plt.show() |
def choose_item(items):
while True:
try:
idx = int(input('Choose number: '))
return items[idx]
except Exception:
print('Invalid choice. Try again.') |
class BoxE(BaseModel):
def __init__(self, entity_dict_len, relation_dict_len, embedding_dim):
super(BoxE, self).__init__(model_name='BoxE')
self.embedding_dim = embedding_dim
self.entity_dict_len = entity_dict_len
self.relation_dict_len = relation_dict_len
self.entity_embedding_base = nn.Embedding(entity_dict_len, embedding_dim)
self.entity_embedding_trans = nn.Embedding(entity_dict_len, embedding_dim)
self.relation_embedding_center1 = nn.Embedding(relation_dict_len, embedding_dim)
self.relation_embedding_width1 = nn.Embedding(relation_dict_len, embedding_dim)
self.relation_embedding_center2 = nn.Embedding(relation_dict_len, embedding_dim)
self.relation_embedding_width2 = nn.Embedding(relation_dict_len, embedding_dim)
self._reset_param()
def _reset_param(self):
nn.init.xavier_uniform_(self.entity_embedding_base.weight.data)
nn.init.xavier_uniform_(self.entity_embedding_trans.weight.data)
nn.init.xavier_uniform_(self.relation_embedding_center1.weight.data)
nn.init.xavier_uniform_(self.relation_embedding_width1.weight.data)
nn.init.xavier_uniform_(self.relation_embedding_center2.weight.data)
nn.init.xavier_uniform_(self.relation_embedding_width2.weight.data)
def loss(self, data):
pos_data = self.data_to_device(data)
neg_data = self.model_negative_sampler.create_negative(data)
neg_data = self.data_to_device(neg_data)
pos_score = self.forward(pos_data)
neg_score = self.forward(neg_data)
return self.model_loss(pos_score, neg_score)
def forward(self, sample):
(batch_h, batch_r, batch_t) = (sample[0], sample[1], sample[2])
h_base = self.entity_embedding_base(batch_h)
t_base = self.entity_embedding_base(batch_t)
h_trans = self.entity_embedding_trans(batch_h)
t_trans = self.entity_embedding_trans(batch_t)
h = (h_base + t_trans)
t = (t_base + h_trans)
c1 = self.relation_embedding_center1(batch_r)
c2 = self.relation_embedding_center2(batch_r)
w1 = (torch.abs(self.relation_embedding_width1(batch_r)) + 1)
w2 = (torch.abs(self.relation_embedding_width2(batch_r)) + 1)
score = (torch.norm(self.distance(h, c1, w1), p=2.0, dim=(- 1)) + torch.norm(self.distance(t, c2, w2), p=2.0, dim=(- 1)))
return score
def distance(self, e, c, w):
score = None
K = ((0.5 * (w - 1)) * (w - torch.reciprocal(w)))
if self._is_in_box(e, c, w):
score = (torch.abs((e - c)) / w)
else:
score = ((torch.abs((e - c)) * w) - K)
return score
def _is_in_box(self, e, c, w):
u = (c + ((w - 1) / 2))
l = (c - ((w - 1) / 2))
if (torch.any((e > u)).item() or torch.any((e < l)).item()):
return False
else:
return True |
class BaselineEstimator(nn.Module):
def __init__(self, img_feature_dim=1024, azi_classes=24, ele_classes=12, inp_classes=24):
super(BaselineEstimator, self).__init__()
self.img_encoder = resnet.resnet18(num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True), nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True), nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im):
img_feature = self.img_encoder(im)
x = self.compress(img_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
reg_azi = self.fc_reg_azi(x)
reg_ele = self.fc_reg_ele(x)
reg_inp = self.fc_reg_inp(x)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp] |
def innermost_tqdm():
if (hasattr(tqdm, '_instances') and (len(tqdm._instances) > 0)):
return max(tqdm._instances, key=(lambda x: x.pos))
else:
return None |
def find_unique_words_in_dataset(talks_read, talk_names, talk_idx, monolingual, include_idx_set_members=False):
talk_is_included = (lambda c: ((c in talk_idx) if include_idx_set_members else (c not in talk_idx)))
word_set = set()
for (k, c) in enumerate(talks_read):
if (monolingual or talk_is_included(talk_names[k])):
for u in c[0]:
for word in u:
word_set.add(word.lower())
return word_set |
def use_opencv2():
try:
major_version = cv2.__version__.split('.')[0]
except TypeError:
major_version = 4
return (major_version == '2') |
class MultidatasetEpochBatchIterator(iterators.EpochBatchIterating):
def __init__(self, dataset, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1):
assert isinstance(dataset, OrderedDict)
assert len(dataset)
assert isinstance(dataset[next(iter(dataset))], FairseqDataset)
self.iterators = []
self.epoch = epoch
for (key, dt) in dataset.items():
epoch_iter = iterators.EpochBatchIterator(dataset=dt, collate_fn=dt.collater, batch_sampler=batch_sampler[key], seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=0, epoch=epoch)
self.iterators.append(epoch_iter)
def __len__(self):
return sum((len(itr) for itr in self.iterators))
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
return MultiItr([itr.next_epoch_itr(shuffle=shuffle, fix_batches_to_gpus=fix_batches_to_gpus) for itr in self.iterators])
def end_of_epoch(self):
return all((itr.end_of_epoch() for itr in self.iterators))
def next_epoch_idx(self):
epochs = [itr.next_epoch_idx for itr in self.iterators]
self.epoch = epochs[0]
assert all(((epoch == self.epoch) for epoch in epochs))
return self.epoch
def iterations_in_epoch(self):
return sum((itr.iterations_in_epoch for itr in self.iterators))
def state_dict(self):
return {'iterators': [it.state_dict() for it in self.iterators], 'epoch': self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
for (it, d) in zip(self.iterators, state_dict['iterators']):
it.load_state_dict(d) |
def _config_debug(config):
if config.debug:
config.num_steps = 2
config.eval_period = 1
config.log_period = 1
config.save_period = 1
config.val_num_batches = 2
config.test_num_batches = 2 |
def get_named_client_logger(name: str, host: str='localhost', port: int=logging.handlers.DEFAULT_TCP_LOGGING_PORT) -> 'PicklableClientLogger':
logger = PicklableClientLogger(name=name, host=host, port=port)
return logger |
def pgtrain(optims_gen, optims_dis, generator, agent, discriminator, bsize, embed_dim, trainSample, validSample, testSample, val_acc_best, val_preck_best, val_loss_best, action_num, max_length, recom_length, gen_ratio=0.1, n_epochs=5, write_item='click_gen.txt', write_target='tar_gen.txt', write_reward='reward_gen.txt', write_action='action_gen.txt', plot_fig=True, pretrain=False):
outputdir = 'model_output'
outputmodelname = 'simu.model.pth'
lrshrink = 5
minlr = 1e-05
loss_fn_target = nn.CrossEntropyLoss()
loss_fn_reward = nn.BCEWithLogitsLoss()
loss_fn_target.size_average = True
loss_fn_target.to(device)
loss_fn_reward.size_average = True
loss_fn_reward.to(device)
inner_val_preck_best = val_preck_best
inner_val_acc_best = val_acc_best
inner_loss_best = val_loss_best
epoch = 1
eval_type = 'valid'
g_step = 1
d_step = 1
evalacc_all = [val_acc_best]
evalpreck_all = [val_preck_best]
(optim_fn_gen, optim_params_gen) = get_optimizer(optims_gen)
(optim_fn_dis, optim_params_dis) = get_optimizer(optims_dis)
optimizer_dis = optim_fn_dis(filter((lambda p: p.requires_grad), discriminator.parameters()), **optim_params_dis)
params_agent = list(agent.parameters())
params_usr = list(generator.parameters())
optimizer_agent = optim_fn_gen(filter((lambda p: p.requires_grad), params_agent), **optim_params_gen)
optimizer_usr = optim_fn_gen(filter((lambda p: p.requires_grad), params_usr), **optim_params_gen)
while (epoch <= n_epochs):
print('\nAdversarial Policy Gradient Training!')
subnum = 8000
for i in range(g_step):
print('G-step')
if pretrain:
print('For Pretraining')
_ = train_gen_pg_each(generator, agent, discriminator, epoch, trainSample, trainSample.length(), optimizer_agent, optimizer_usr, bsize, embed_dim, recom_length, max_length, action_num, device, 0, pretrain)
else:
print('For Policy Gradient Update')
_ = train_gen_pg_each(generator, agent, discriminator, epoch, trainSample, subnum, optimizer_agent, optimizer_usr, bsize, embed_dim, recom_length, max_length, action_num, device, 0.1, pretrain)
print('Agent evaluation!')
(eval_acc, eval_preck) = evaluate_agent(agent, epoch, bsize, recom_length, validSample, testSample, device, eval_type='valid')
print('User model evaluation!')
_ = evaluate_user(generator, epoch, bsize, recom_length, validSample, testSample, loss_fn_target, loss_fn_reward, device, eval_type)
print('Interaction evaluation!')
_ = evaluate_interaction((generator, agent), epoch, bsize, recom_length, validSample, testSample, loss_fn_target, loss_fn_reward, device, eval_type)
evalacc_all.append(eval_acc)
evalpreck_all.append(eval_preck)
if ((eval_type == 'valid') and (epoch <= n_epochs)):
print('saving model at epoch {0}'.format(epoch))
if (not os.path.exists(outputdir)):
os.makedirs(outputdir)
torch.save(agent.state_dict(), os.path.join(outputdir, ('irecGan_agent3.' + outputmodelname)))
torch.save(generator.state_dict(), os.path.join(outputdir, ('irecGan_gen3.' + outputmodelname)))
inner_val_acc_best = eval_acc
inner_val_preck_best = eval_preck
if (not pretrain):
print('\nD-step')
for i in range(d_step):
shutil.copy('click_gen_real.txt', write_item)
shutil.copy('reward_gen_real.txt', write_reward)
shutil.copy('tar_gen_real.txt', write_target)
shutil.copy('action_gen_real.txt', write_action)
(_, _, _, _) = gen_fake(generator, agent, trainSample, bsize, embed_dim, device, write_item, write_target, write_reward, write_action, action_num, max_length, recom_length)
(clicklist, _) = ReadSeq(write_item, write_reward, write_action, write_target)
(trainindex_dis, validindex_dis, testindex_dis) = split_index(0.7, 0.1, len(clicklist), True)
(trainSample_dis, validSample_dis, testSample_dis) = sampleSplit(trainindex_dis, validindex_dis, testindex_dis, clicklist, 2, recom_length, 'dis')
(discriminator, _, _) = train_dis(optims_dis, discriminator, bsize, embed_dim, recom_length, trainSample_dis, validSample_dis, testSample_dis)
epoch += 1
if (plot_fig == True):
save_plot(n_epochs, 1, evalacc_all, 'pg_accuracy6.png')
save_plot(n_epochs, 1, evalpreck_all, 'pg_map6.png')
return (inner_val_acc_best, inner_val_preck_best) |
class TestTrackers(unittest.TestCase):
def setUp(self):
self.data_dir = 'data'
self.tracker = IdentityTracker()
def tearDown(self):
pass
def test_identity_tracker(self):
root_dir = os.path.join(self.data_dir, 'GOT-10k')
dataset = GOT10k(root_dir, subset='val')
(img_files, anno) = random.choice(dataset)
(boxes, times) = self.tracker.track(img_files, anno[0], visualize=True)
self.assertEqual(boxes.shape, anno.shape)
self.assertEqual(len(times), len(anno)) |
def test_digits_sqrt_lazy_object():
model = FeatureBasedSelection(100, 'sqrt', optimizer=LazyGreedy())
model.fit(X_digits)
assert_array_equal(model.ranking, digits_sqrt_ranking)
assert_array_almost_equal(model.gains, digits_sqrt_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class MySpatialPyramidPooling(nn.Module):
def __init__(self, channels_in, channels_out, level_num, spp_channels, level_channels, grid=(8, 4, 2, 1), bn_momentum=0.1):
super(MySpatialPyramidPooling, self).__init__()
self.grid = grid
self.level_num = level_num
self.SPP_BN = _BNReluConv(channels_in, spp_channels, kernel_size=1, bn_momentum=bn_momentum)
self.spp = nn.Sequential()
for i in range(level_num):
self.spp.add_module(('SPP' + str(i)), _BNReluConv(spp_channels, level_channels, kernel_size=1, bn_momentum=bn_momentum))
final_cat_channels = (spp_channels + (level_num * level_channels))
self.SPP_FUSE = _BNReluConv(final_cat_channels, channels_out, kernel_size=1, bn_momentum=bn_momentum)
def forward(self, x):
feature_size = x.size()[2:4]
width_divide_height = (feature_size[1] / feature_size[0])
spp_base = self.SPP_BN(x)
levels = []
levels.append(spp_base)
for i in range(self.level_num):
pool_dst_size = (self.grid[i], max(1, round((self.grid[i] * width_divide_height))))
spp_pool = F.adaptive_avg_pool2d(spp_base, pool_dst_size)
level = self.spp[i].forward(spp_pool)
level = upsample(level, feature_size)
levels.append(level)
spp_cat = torch.cat(levels, 1)
out = self.SPP_FUSE(spp_cat)
return out |
def print_info(s):
print((((((TerminalColors.OKBLUE + '[') + get_time()) + '] ') + str(s)) + TerminalColors.ENDC)) |
class BilinearMasked(Bilinear, _BaseRealMixin):
def forward(self, input1, input2):
return F.bilinear(input1, input2, self.weight_masked, self.bias) |
class FangraphsPitchingStatsTable(FangraphsDataTable):
STATS_CATEGORY: FangraphsStatsCategory = FangraphsStatsCategory.PITCHING
DEFAULT_STAT_COLUMNS: List[FangraphsStatColumn] = FangraphsPitchingStats.ALL()
ROW_ID_FUNC: RowIdFunction = player_row_id_func
ROW_ID_NAME = 'IDfg'
_cache()
def fetch(self, *args, **kwargs):
return super().fetch(*args, **kwargs)
def _postprocess(self, data: pd.DataFrame) -> pd.DataFrame:
if ('WAR' in data.columns):
new_position = min(7, (len(data.columns) - 1))
columns = data.columns.tolist()
columns.insert(new_position, columns.pop(columns.index('WAR')))
data = data.reindex(columns=columns)
return self._sort(data, ['WAR', 'W'], ascending=False) |
class TreeIterator():
def __init__(self, tree, order='pre'):
self.tree = tree
self.pos = [0]
self.order = order
def __iter__(self):
return self
def __next__(self):
while True:
if (len(self.pos) == 0):
raise StopIteration
ans = None
if ((self.order == 'pre') and (self.pos[(- 1)] == 0)):
ans = self.tree
if (self.pos[(- 1)] < len(self.tree.subtrees)):
self.tree = self.tree.subtrees[self.pos[(- 1)]]
self.pos[(- 1)] += 1
self.pos.append(0)
else:
if (self.order == 'post'):
ans = self.tree
self.tree = self.tree.parent
self.pos.pop()
if (ans is not None):
return ans |
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, pw_act=False, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0):
super(DepthwiseSeparableConv, self).__init__()
assert (stride in [1, 2])
norm_kwargs = (norm_kwargs or {})
self.has_residual = (((stride == 1) and (in_chs == out_chs)) and (not noskip))
self.drop_connect_rate = drop_connect_rate
self.conv_dw = select_conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
if ((se_ratio is not None) and (se_ratio > 0.0)):
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = nn.Identity()
self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
self.act2 = (act_layer(inplace=True) if pw_act else nn.Identity())
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if (self.drop_connect_rate > 0.0):
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x |
def map_dataset(examples: dict[(str, list[str])], args: 'Args', context: TokenizationContext) -> dict:
instructions = examples['instruction']
responses = examples['response']
prompts = [MAGICODER_PROMPT.format(instruction=instruction, response='') for instruction in instructions]
completions = responses
assert (len(prompts) == len(completions))
prompt_config = EncodingConfig(add_bos=True, add_eos=False)
completion_config = EncodingConfig(add_bos=False, add_eos=True)
prompt_id_batches = context.encode(prompt_config, prompts)
completion_id_batches = context.encode(completion_config, completions)
assert (len(prompt_id_batches) == len(completion_id_batches))
untruncated_input_ids = [(instruction_ids + response_ids) for (instruction_ids, response_ids) in zip(prompt_id_batches, completion_id_batches)]
exceeding_length = [(len(input_id) > args.max_training_seq_length) for input_id in untruncated_input_ids]
input_ids = [input_id[:args.max_training_seq_length] for input_id in untruncated_input_ids]
labels = [(list(map((lambda _: IGNORED_INDEX), instruction_ids)) + response_ids)[:args.max_training_seq_length] for (instruction_ids, response_ids) in zip(prompt_id_batches, completion_id_batches)]
assert (len(input_ids) == len(labels))
for (input_id_batch, label_batch) in zip(input_ids, labels):
assert (len(input_id_batch) == len(label_batch))
print(context.decode(DecodingConfig.default(), input_ids[0:])[0])
return {'input_ids': input_ids, 'labels': labels, 'exceeding_length': exceeding_length} |
class LossTracker():
def __init__(self, output_folder='.'):
self.tracks = OrderedDict()
self.epochs = []
self.means_over_epochs = OrderedDict()
self.output_folder = output_folder
def update(self, d):
for (k, v) in d.items():
if (k not in self.tracks):
self.add(k)
self.tracks[k] += v
def add(self, name, pytorch=True):
assert (name not in self.tracks), 'Name is already used'
if pytorch:
track = RunningMeanTorch()
else:
track = RunningMean()
self.tracks[name] = track
self.means_over_epochs[name] = []
return track
def register_means(self, epoch):
self.epochs.append(epoch)
for key in self.means_over_epochs.keys():
if (key in self.tracks):
value = self.tracks[key]
self.means_over_epochs[key].append(value.mean())
value.reset()
else:
self.means_over_epochs[key].append(None)
with open(os.path.join(self.output_folder, 'log.csv'), mode='w') as csv_file:
fieldnames = (['epoch'] + list(self.tracks.keys()))
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(fieldnames)
for i in range(len(self.epochs)):
writer.writerow(([self.epochs[i]] + [self.means_over_epochs[x][i] for x in self.tracks.keys()]))
def __str__(self):
result = ''
for (key, value) in self.tracks.items():
result += ('%s: %.7f, ' % (key, value.mean()))
return result[:(- 2)]
def plot(self):
plt.figure(figsize=(12, 8))
for key in self.tracks.keys():
plt.plot(self.epochs, self.means_over_epochs[key], label=key)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc=4)
plt.grid(True)
plt.tight_layout()
plt.savefig(os.path.join(self.output_folder, 'plot.png'))
plt.close()
def state_dict(self):
return {'tracks': self.tracks, 'epochs': self.epochs, 'means_over_epochs': self.means_over_epochs}
def load_state_dict(self, state_dict):
self.tracks = state_dict['tracks']
self.epochs = state_dict['epochs']
self.means_over_epochs = state_dict['means_over_epochs']
counts = list(map(len, self.means_over_epochs.values()))
if (len(counts) == 0):
counts = [0]
m = min(counts)
if (m < len(self.epochs)):
self.epochs = self.epochs[:m]
for key in self.means_over_epochs.keys():
if (len(self.means_over_epochs[key]) > m):
self.means_over_epochs[key] = self.means_over_epochs[key][:m] |
_criterion('mmloss')
class MMCriterion(FairseqCriterion):
def __init__(self, task):
super().__init__(task)
self.mmtask = task.mmtask
def forward(self, model, sample):
outputs = self.mmtask(model, sample)
(loss, loss_scalar, max_len, batch_size, sample_size) = (outputs['loss'], outputs['loss_scalar'], outputs['max_len'], outputs['batch_size'], outputs['sample_size'])
logging_output = {'loss': loss_scalar, 'ntokens': (max_len * batch_size), 'nsentences': batch_size, 'sample_size': sample_size}
return (loss, 1, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum((log.get('loss', 0.0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', (loss_sum / sample_size), round=3)
def logging_outputs_can_be_summed() -> bool:
return True |
class GreedyOptimizer(PathOptimizer):
__slots__ = ('costmod', 'temperature', 'simplify', '_optimize_fn')
def __init__(self, costmod=1.0, temperature=0.0, simplify=True, accel='auto'):
self.costmod = costmod
self.temperature = temperature
self.simplify = simplify
self._optimize_fn = get_optimize_greedy(accel)
def maybe_update_defaults(self, **kwargs):
opts = {'costmod': self.costmod, 'temperature': self.temperature, 'simplify': self.simplify}
opts.update(kwargs)
return opts
def ssa_path(self, inputs, output, size_dict, **kwargs):
return self._optimize_fn(inputs, output, size_dict, use_ssa=True, **self.maybe_update_defaults(**kwargs))
def search(self, inputs, output, size_dict, **kwargs):
from ..core import ContractionTree
ssa_path = self.ssa_path(inputs, output, size_dict, **kwargs)
return ContractionTree.from_path(inputs, output, size_dict, ssa_path=ssa_path)
def __call__(self, inputs, output, size_dict, **kwargs):
return self._optimize_fn(inputs, output, size_dict, use_ssa=False, **self.maybe_update_defaults(**kwargs)) |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = SerialSampler(EnvCls=gym_make, env_kwargs=config['env'], CollectorCls=CpuResetCollector, **config['sampler'])
algo = PPO(optim_kwargs=config['optim'], **config['algo'])
agent = MujocoFfAgent(model_kwargs=config['model'], **config['agent'])
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = ('ppo_' + config['env']['id'])
with logger_context(log_dir, run_ID, name, config):
runner.train() |
def main():
(df_by_lk, df_berlin_cases_sum, df_berlin_deaths_sum) = fetch_and_clean_data()
df_by_lk_deaths = pd.concat([df_by_lk[c] for c in df_by_lk if str(c).endswith('_deaths')], axis=1)
df_by_lk_deaths.rename(columns={c: int(c.split('_')[0]) for c in df_by_lk_deaths}, inplace=True)
df_by_lk_cases = pd.concat([df_by_lk[c] for c in df_by_lk if ('_' not in str(c))], axis=1)
df_by_bl_cases = aggregate_by_bland(df_by_lk_cases)
df_by_bl_deaths = aggregate_by_bland(df_by_lk_deaths)
log.info('build sum for each sample')
df_by_bl_cases['sum_cases'] = df_by_bl_cases.sum(axis=1)
df_by_bl_deaths['sum_deaths'] = df_by_bl_deaths.sum(axis=1)
df_by_lk_cases['sum_cases'] = (df_by_lk_cases.sum(axis=1) - df_berlin_cases_sum)
df_by_lk_deaths['sum_deaths'] = (df_by_lk_deaths.sum(axis=1) - df_berlin_deaths_sum)
lib.io.write_csv_timeseries(df_by_bl_cases, 'cases-rki-by-state.csv')
lib.io.write_csv_timeseries(df_by_bl_deaths, 'deaths-rki-by-state.csv')
lib.io.write_csv_timeseries(df_by_lk_cases, 'cases-rki-by-ags.csv')
lib.io.write_csv_timeseries(df_by_lk_deaths, 'deaths-rki-by-ags.csv')
log.info('done') |
class PILRandomGaussianBlur(object):
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.0):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = (np.random.rand() <= self.prob)
if (not do_it):
return img
return img.filter(ImageFilter.GaussianBlur(radius=random.uniform(self.radius_min, self.radius_max))) |
def wrd_name(trn):
split = trn.split('.')
return (('.'.join(split[:(- 1)]) + '.wrd.') + split[(- 1)]) |
class calculate_metrics():
def divide_chunks(self, l, n=2):
for i in range(0, len(l), n):
(yield l[i:(i + n)])
return
def parse_pred_ans(self, pred_ans):
pred_label = None
if (pred_ans in ['yes', 'no']):
pred_label = pred_ans
else:
prefix_pred_ans = pred_ans[:4]
if ('yes' in prefix_pred_ans):
pred_label = 'yes'
elif ('no' in prefix_pred_ans):
pred_label = 'no'
else:
pred_label = 'other'
return pred_label
def compute_metric(self, gts, preds):
assert (len(gts) == len(preds))
label_map = {'yes': 1, 'no': 0, 'other': (- 1)}
gts = [label_map[x] for x in gts]
preds = [label_map[x] for x in preds]
acc = accuracy_score(gts, preds)
clean_gts = []
clean_preds = []
other_num = 0
for (gt, pred) in zip(gts, preds):
if (pred == (- 1)):
other_num += 1
continue
clean_gts.append(gt)
clean_preds.append(pred)
conf_mat = confusion_matrix(clean_gts, clean_preds, labels=[1, 0])
precision = precision_score(clean_gts, clean_preds, average='binary')
recall = recall_score(clean_gts, clean_preds, average='binary')
(tp, fn) = conf_mat[0]
(fp, tn) = conf_mat[1]
metric_dict = dict()
metric_dict = {'TP': tp, 'FN': fn, 'TN': tn, 'FP': fp, 'precision': precision, 'recall': recall, 'other_num': other_num, 'acc': acc}
return metric_dict
def process_result(self, results_dir):
model_score_dict = dict()
for (eval_type, task_name_list) in eval_type_dict.items():
print('', eval_type, '')
scores = 0
task_score_dict = dict()
for task_name in task_name_list:
print(task_name)
task_txt = os.path.join(results_dir, (task_name + '.txt'))
lines = open(task_txt, 'r').readlines()
chunk_lines = list(self.divide_chunks(lines))
img_num = len(chunk_lines)
task_other_ans_num = 0
task_score = 0
acc_plus_correct_num = 0
gts = []
preds = []
for img_items in chunk_lines:
assert (len(img_items) == 2)
img_correct_num = 0
for img_item in img_items:
try:
(img_name, question, gt_ans, pred_ans) = img_item.split('\t')
except:
print('img_item: {}'.format(img_item))
gt_ans = gt_ans.lower()
pred_ans = pred_ans.lower()
assert (gt_ans in ['yes', 'no'])
pred_ans = self.parse_pred_ans(pred_ans)
assert (pred_ans in ['yes', 'no', 'other'])
gts.append(gt_ans)
preds.append(pred_ans)
if (gt_ans == pred_ans):
img_correct_num += 1
if (pred_ans not in ['yes', 'no']):
task_other_ans_num += 1
if (img_correct_num == 2):
acc_plus_correct_num += 1
metric_dict = self.compute_metric(gts, preds)
acc_plus = (acc_plus_correct_num / img_num)
metric_dict['acc_plus'] = acc_plus
for (k, v) in metric_dict.items():
if (k in ['acc', 'acc_plus']):
task_score += (v * 100)
task_score_dict[task_name] = task_score
scores += task_score
print('total score:', scores, '\n')
for (task_name, score) in task_score_dict.items():
print('\t', task_name, ' score:', score)
print('\n')
return |
def define2DBoolVarArrayArray(gurobiModel, sizeX, sizeY, name):
return gurobiModel.addVars(sizeX, sizeY, vtype=GRB.BINARY, name=name) |
def find_classes(folder: Path) -> FilePathList:
classes = [d for d in folder.iterdir() if (d.is_dir() and (not d.name.startswith('.')))]
assert (len(classes) > 0)
return sorted(classes, key=(lambda d: d.name)) |
_registry(operator_type='BatchMatMulV2')
class BatchMatMulV2(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'tensorflow'):
self._attr['transpose_a'] = node.attr['adj_x'].b
self._attr['transpose_b'] = node.attr['adj_y'].b |
def KMeans(feat, n_clusters=2):
kmeans = cluster.KMeans(n_clusters=n_clusters, n_jobs=multiprocessing.cpu_count(), random_state=0).fit(feat)
return kmeans.labels_ |
class LossylessDataModule(LightningDataModule):
def __init__(self, data_dir=DIR, val_size=0.1, test_size=None, num_workers=16, batch_size=128, val_batch_size=None, seed=123, reload_dataloaders_every_epoch=False, dataset_kwargs={}):
super().__init__()
self.data_dir = data_dir
self.val_size = val_size
self.test_size = test_size
self.num_workers = num_workers
self.batch_size = batch_size
self.val_batch_size = (batch_size if (val_batch_size is None) else val_batch_size)
self.seed = seed
self.dataset_kwargs = dataset_kwargs
self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch
def Dataset(self):
raise NotImplementedError()
def get_train_dataset(self, **dataset_kwargs):
raise NotImplementedError()
def get_val_dataset(self, **dataset_kwargs):
raise NotImplementedError()
def get_test_dataset(self, **dataset_kwargs):
raise NotImplementedError()
def prepare_data(self):
raise NotImplementedError()
def mode(self):
raise NotImplementedError()
def dataset(self):
dataset = self.train_dataset
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
return dataset
def set_info_(self):
dataset = self.dataset
(self.target_is_clf, self.aux_is_clf) = dataset.get_is_clf()
(self.target_shape, self.aux_shape) = dataset.get_shapes()
self.shape = dataset.shapes['input']
self.additional_target = dataset.additional_target
def balancing_weights(self):
return dict()
def setup(self, stage=None):
if ((stage == 'fit') or (stage is None)):
self.train_dataset = self.get_train_dataset(**self.dataset_kwargs)
self.set_info_()
self.val_dataset = self.get_val_dataset(**self.dataset_kwargs)
if ((stage == 'test') or (stage is None)):
self.test_dataset = self.get_test_dataset(**self.dataset_kwargs)
def train_dataloader(self, batch_size=None, train_dataset=None, **kwargs):
dkwargs = kwargs.pop('dataset_kwargs', {})
if (self.reload_dataloaders_every_epoch or (len(dkwargs) > 0)):
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
train_dataset = self.get_train_dataset(**curr_kwargs)
if (train_dataset is None):
train_dataset = self.train_dataset
if (batch_size is None):
batch_size = self.batch_size
return DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True, **kwargs)
def val_dataloader(self, batch_size=None, **kwargs):
dkwargs = kwargs.pop('dataset_kwargs', {})
if (self.reload_dataloaders_every_epoch or (len(dkwargs) > 0)):
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
self.val_dataset = self.get_val_dataset(**curr_kwargs)
if (batch_size is None):
batch_size = self.val_batch_size
return DataLoader(self.val_dataset, batch_size=batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, **kwargs)
def test_dataloader(self, batch_size=None, **kwargs):
dkwargs = kwargs.pop('dataset_kwargs', {})
if (self.reload_dataloaders_every_epoch or (len(dkwargs) > 0)):
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
self.test_dataset = self.get_test_dataset(**curr_kwargs)
if (batch_size is None):
batch_size = self.val_batch_size
return DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, **kwargs)
def eval_dataloader(self, is_eval_on_test, **kwargs):
if is_eval_on_test:
return self.test_dataloader(**kwargs)
else:
return self.val_dataloader(**kwargs) |
def test_amuse_LogarithmicHaloPotential():
lp = potential.LogarithmicHaloPotential(normalize=1.0)
tmax = 2.0
(vo, ro) = (210.0, 8.5)
o = Orbit([1.0, 0.1, 1.1, 0.3, 0.1, 0.4], ro=ro, vo=vo)
run_orbitIntegration_comparison(o, lp, tmax, vo, ro, tol=0.03)
return None |
class Constant(AbsOpBase):
in_dtypes = [()]
out_dtypes = [(i,) for i in DTYPE_GEN_ALL]
def __str__(self) -> str:
return ((self.name() + ' ') + str(self.extra_attrs).replace(':', '='))
def __init__(self, dim: int):
super().__init__()
self.dim = dim
self.inp_ranks = []
self.out_ranks = [(dim,)]
self.abs_tensor: AbsTensor = None
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
SanityCheck.eq(len(input_shapes), 0)
return [self.abs_tensor]
def requires(self, input_shapes: List[AbsTensor]) -> List[Union[(z3.BoolRef, bool)]]:
SanityCheck.eq(len(input_shapes), 0)
return []
def __str__(self):
return 'Constant'
def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]:
pass
def input_like(self):
return []
def output_like(self):
return [self.abs_tensor] |
class CityScapes(MyDataset):
def __init__(self, args, transform=None, target_transform=None, augment=False, split='train', resize=False, imsize=256):
CLASSES = ['<eos>', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
self.classes = CLASSES
self.num_classes = len(self.classes)
self.max_seq_len = args.gt_maxseqlen
self.image_files = glob.glob(os.path.join(args.cityscapes_dir, 'leftImg8bit', split, '*', '*.png'))
self.ins_files = [w.replace('/leftImg8bit/', '/gtFine/') for w in self.image_files]
self.ins_files = [w.replace('_leftImg8bit.png', '_gtFine_instanceIds.png') for w in self.ins_files]
self.seg_files = [w.replace('/leftImg8bit/', '/gtFine/') for w in self.image_files]
self.seg_files = [w.replace('_leftImg8bit.png', '_gtFine_labelIds.png') for w in self.seg_files]
self.transform = transform
self.target_transform = target_transform
self.batch_size = args.batch_size
self.no_run_coco_eval = True
self.crop = args.crop
self.flip = augment
if (augment and (not resize)):
self.augmentation_transform = RandomAffine(rotation_range=args.rotation, translation_range=args.translation, shear_range=args.shear, interp='nearest')
elif (augment and resize):
self.augmentation_transform = RandomAffine(rotation_range=args.rotation, translation_range=args.translation, shear_range=args.shear, zoom_range=(args.zoom, 1), interp='nearest')
else:
self.augmentation_transform = None
self.zoom = args.zoom
self.augment = augment
self.imsize = imsize
self.resize = resize
def get_raw_sample(self, index):
image_file = os.path.join(self.image_files[index])
ins_file = os.path.join(self.ins_files[index])
img = Image.open(image_file).convert('RGB')
ins = np.array(Image.open(ins_file))
seg = (ins.copy() / 1000)
seg[(seg == 29)] = 0
seg[(seg == 30)] = 0
seg[(seg > 0)] -= 23
seg[(seg == 8)] = 6
seg[(seg == 9)] = 7
seg[(seg == 10)] = 8
seg_aux = seg.copy()
seg_aux[(seg_aux > 0)] = 1
ins = (ins * seg_aux)
ins[(ins < 24000)] = 0
unique_ids = np.unique(ins)
for i in range(len(unique_ids)):
ins[(ins == unique_ids[i])] = i
return (img, ins, seg) |
class ConvTranspose3d(_ConvTransposeMixin, _ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super(ConvTranspose3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, True, output_padding, groups, bias)
def forward(self, input, output_size=None):
output_padding = self._output_padding(input, output_size)
return conv_transpose3d_same_padding(input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation) |
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
mask = torch.stack([example['mask'] for example in examples])
return {'pixel_values': pixel_values, 'bool_masked_pos': mask} |
class ResUNet(ME.MinkowskiNetwork):
NORM_TYPE = None
BLOCK_NORM_TYPE = 'BN'
CHANNELS = [None, 32, 64, 128]
TR_CHANNELS = [None, 32, 64, 64]
REGION_TYPE = ME.RegionType.HYPER_CUBE
def __init__(self, in_channels=3, out_channels=32, bn_momentum=0.1, conv1_kernel_size=3, normalize_feature=False, D=3):
ME.MinkowskiNetwork.__init__(self, D)
NORM_TYPE = self.NORM_TYPE
BLOCK_NORM_TYPE = self.BLOCK_NORM_TYPE
CHANNELS = self.CHANNELS
TR_CHANNELS = self.TR_CHANNELS
REGION_TYPE = self.REGION_TYPE
self.normalize_feature = normalize_feature
self.conv1 = ME.MinkowskiConvolution(in_channels=in_channels, out_channels=CHANNELS[1], kernel_size=conv1_kernel_size, stride=1, dilation=1, bias=False, dimension=D)
self.norm1 = get_norm(NORM_TYPE, CHANNELS[1], bn_momentum=bn_momentum, dimension=D)
self.block1 = get_block(BLOCK_NORM_TYPE, CHANNELS[1], CHANNELS[1], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv2 = ME.MinkowskiConvolution(in_channels=CHANNELS[1], out_channels=CHANNELS[2], kernel_size=3, stride=2, dilation=1, bias=False, dimension=D)
self.norm2 = get_norm(NORM_TYPE, CHANNELS[2], bn_momentum=bn_momentum, dimension=D)
self.block2 = get_block(BLOCK_NORM_TYPE, CHANNELS[2], CHANNELS[2], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv3 = ME.MinkowskiConvolution(in_channels=CHANNELS[2], out_channels=CHANNELS[3], kernel_size=3, stride=2, dilation=1, bias=False, dimension=D)
self.norm3 = get_norm(NORM_TYPE, CHANNELS[3], bn_momentum=bn_momentum, dimension=D)
self.block3 = get_block(BLOCK_NORM_TYPE, CHANNELS[3], CHANNELS[3], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv3_tr = ME.MinkowskiConvolutionTranspose(in_channels=CHANNELS[3], out_channels=TR_CHANNELS[3], kernel_size=3, stride=2, dilation=1, bias=False, dimension=D)
self.norm3_tr = get_norm(NORM_TYPE, TR_CHANNELS[3], bn_momentum=bn_momentum, dimension=D)
self.block3_tr = get_block(BLOCK_NORM_TYPE, TR_CHANNELS[3], TR_CHANNELS[3], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv2_tr = ME.MinkowskiConvolutionTranspose(in_channels=(CHANNELS[2] + TR_CHANNELS[3]), out_channels=TR_CHANNELS[2], kernel_size=3, stride=2, dilation=1, bias=False, dimension=D)
self.norm2_tr = get_norm(NORM_TYPE, TR_CHANNELS[2], bn_momentum=bn_momentum, dimension=D)
self.block2_tr = get_block(BLOCK_NORM_TYPE, TR_CHANNELS[2], TR_CHANNELS[2], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv1_tr = ME.MinkowskiConvolution(in_channels=(CHANNELS[1] + TR_CHANNELS[2]), out_channels=TR_CHANNELS[1], kernel_size=1, stride=1, dilation=1, bias=False, dimension=D)
self.final = ME.MinkowskiConvolution(in_channels=TR_CHANNELS[1], out_channels=out_channels, kernel_size=1, stride=1, dilation=1, bias=True, dimension=D)
def forward(self, x):
out_s1 = self.conv1(x)
out_s1 = self.norm1(out_s1)
out_s1 = self.block1(out_s1)
out = MEF.relu(out_s1)
out_s2 = self.conv2(out)
out_s2 = self.norm2(out_s2)
out_s2 = self.block2(out_s2)
out = MEF.relu(out_s2)
out_s4 = self.conv3(out)
out_s4 = self.norm3(out_s4)
out_s4 = self.block3(out_s4)
out = MEF.relu(out_s4)
out = self.conv3_tr(out)
out = self.norm3_tr(out)
out = self.block3_tr(out)
out_s2_tr = MEF.relu(out)
out = ME.cat(out_s2_tr, out_s2)
out = self.conv2_tr(out)
out = self.norm2_tr(out)
out = self.block2_tr(out)
out_s1_tr = MEF.relu(out)
out = ME.cat(out_s1_tr, out_s1)
out = self.conv1_tr(out)
out = MEF.relu(out)
out = self.final(out)
if self.normalize_feature:
return ME.SparseTensor((out.F / (torch.norm(out.F, p=2, dim=1, keepdim=True) + 1e-08)), coordinate_map_key=out.coordinate_map_key, coordinate_manager=out.coordinate_manager)
else:
return out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.