code stringlengths 101 5.91M |
|---|
def folder2lmdb(dpath, fn_list, write_frequency=5000):
directory = osp.expanduser(osp.join(dpath))
print(('Loading dataset from %s' % directory))
if (args.extension == '.npz'):
dataset = Folder(directory, loader=raw_npz_reader, extension='.npz', fn_list=fn_list)
else:
dataset = Folder(directory, loader=raw_npy_reader, extension='.npy', fn_list=fn_list)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=(lambda x: x))
lmdb_path = osp.join(('%s.lmdb' % directory))
isdir = os.path.isdir(lmdb_path)
print(('Generate LMDB to %s' % lmdb_path))
db = lmdbdict(lmdb_path, mode='w', key_method='ascii', value_method='identity')
tsvfile = open(args.output_file, 'a')
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
names = []
all_keys = []
for (idx, data) in enumerate(tqdm.tqdm(data_loader)):
(name, byte, npz) = data[0]
if (npz is not None):
db[name] = byte
all_keys.append(name)
names.append({'image_id': name, 'status': str((npz is not None))})
if ((idx % write_frequency) == 0):
print(('[%d/%d]' % (idx, len(data_loader))))
print('writing')
db.flush()
for name in names:
writer.writerow(name)
names = []
tsvfile.flush()
print('writing finished')
for name in names:
writer.writerow(name)
tsvfile.flush()
tsvfile.close()
print('Flushing database ...')
db.flush()
del db |
def vectorize(data, w2i, story_len, s_sent_len, q_sent_len):
ret_data = []
for d in data:
tmp_story = d[0]
story = []
for s in tmp_story:
sent = word_to_index(s, w2i)
sent += ([0] * (s_sent_len - len(sent)))
story.append(sent)
while (len(story) < story_len):
story.append(([0] * s_sent_len))
story = story[:story_len]
q = word_to_index(d[1], w2i)
pad_q = (q_sent_len - len(q))
q += ([0] * pad_q)
a = word_to_index(d[2], w2i)
ret_data.append((story, q, a))
return ret_data |
class SpecialTokensMixin():
SPECIAL_TOKENS_ATTRIBUTES = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens']
def __init__(self, verbose=True, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.verbose = verbose
for (key, value) in kwargs.items():
if (value is None):
continue
if (key in self.SPECIAL_TOKENS_ATTRIBUTES):
if (key == 'additional_special_tokens'):
assert isinstance(value, (list, tuple)), f'Value {value} is not a list or tuple'
assert all((isinstance(t, (str, AddedToken)) for t in value)), 'One of the tokens is not a string or an AddedToken'
setattr(self, key, value)
elif isinstance(value, (str, AddedToken)):
setattr(self, key, value)
else:
raise TypeError(f'special token {key} has to be either str or AddedToken but got: {type(value)}')
def sanitize_special_tokens(self) -> int:
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
def add_special_tokens(self, special_tokens_dict: Dict[(str, Union[(str, AddedToken)])]) -> int:
if (not special_tokens_dict):
return 0
added_tokens = 0
for (key, value) in special_tokens_dict.items():
assert (key in self.SPECIAL_TOKENS_ATTRIBUTES), f'Key {key} is not a special token'
if self.verbose:
logger.info(f'Assigning {value} to the {key} key of the tokenizer')
setattr(self, key, value)
if (key == 'additional_special_tokens'):
assert (isinstance(value, (list, tuple)) and all((isinstance(t, (str, AddedToken)) for t in value))), f'Tokens {value} for key {key} should all be str or AddedToken instances'
added_tokens += self.add_tokens(value, special_tokens=True)
else:
assert isinstance(value, (str, AddedToken)), f'Token {value} for key {key} should be a str or an AddedToken instance'
added_tokens += self.add_tokens([value], special_tokens=True)
return added_tokens
def add_tokens(self, new_tokens: Union[(str, AddedToken, List[Union[(str, AddedToken)]])], special_tokens: bool=False) -> int:
if (not new_tokens):
return 0
if (not isinstance(new_tokens, (list, tuple))):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
def _add_tokens(self, new_tokens: Union[(List[str], List[AddedToken])], special_tokens: bool=False) -> int:
raise NotImplementedError
def bos_token(self) -> str:
if ((self._bos_token is None) and self.verbose):
logger.error('Using bos_token, but it is not set yet.')
return None
return str(self._bos_token)
def eos_token(self) -> str:
if ((self._eos_token is None) and self.verbose):
logger.error('Using eos_token, but it is not set yet.')
return None
return str(self._eos_token)
def unk_token(self) -> str:
if ((self._unk_token is None) and self.verbose):
logger.error('Using unk_token, but it is not set yet.')
return None
return str(self._unk_token)
def sep_token(self) -> str:
if ((self._sep_token is None) and self.verbose):
logger.error('Using sep_token, but it is not set yet.')
return None
return str(self._sep_token)
def pad_token(self) -> str:
if ((self._pad_token is None) and self.verbose):
logger.error('Using pad_token, but it is not set yet.')
return None
return str(self._pad_token)
def cls_token(self) -> str:
if ((self._cls_token is None) and self.verbose):
logger.error('Using cls_token, but it is not set yet.')
return None
return str(self._cls_token)
def mask_token(self) -> str:
if ((self._mask_token is None) and self.verbose):
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
def additional_special_tokens(self) -> List[str]:
if ((self._additional_special_tokens is None) and self.verbose):
logger.error('Using additional_special_tokens, but it is not set yet.')
return None
return [str(tok) for tok in self._additional_special_tokens]
_token.setter
def bos_token(self, value):
self._bos_token = value
_token.setter
def eos_token(self, value):
self._eos_token = value
_token.setter
def unk_token(self, value):
self._unk_token = value
_token.setter
def sep_token(self, value):
self._sep_token = value
_token.setter
def pad_token(self, value):
self._pad_token = value
_token.setter
def cls_token(self, value):
self._cls_token = value
_token.setter
def mask_token(self, value):
self._mask_token = value
_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def bos_token_id(self) -> Optional[int]:
if (self._bos_token is None):
return None
return self.convert_tokens_to_ids(self.bos_token)
def eos_token_id(self) -> Optional[int]:
if (self._eos_token is None):
return None
return self.convert_tokens_to_ids(self.eos_token)
def unk_token_id(self) -> Optional[int]:
if (self._unk_token is None):
return None
return self.convert_tokens_to_ids(self.unk_token)
def sep_token_id(self) -> Optional[int]:
if (self._sep_token is None):
return None
return self.convert_tokens_to_ids(self.sep_token)
def pad_token_id(self) -> Optional[int]:
if (self._pad_token is None):
return None
return self.convert_tokens_to_ids(self.pad_token)
def pad_token_type_id(self) -> int:
return self._pad_token_type_id
def cls_token_id(self) -> Optional[int]:
if (self._cls_token is None):
return None
return self.convert_tokens_to_ids(self.cls_token)
def mask_token_id(self) -> Optional[int]:
if (self._mask_token is None):
return None
return self.convert_tokens_to_ids(self.mask_token)
def additional_special_tokens_ids(self) -> List[int]:
return self.convert_tokens_to_ids(self.additional_special_tokens)
_token_id.setter
def bos_token_id(self, value):
self._bos_token = self.convert_tokens_to_ids(value)
_token_id.setter
def eos_token_id(self, value):
self._eos_token = self.convert_tokens_to_ids(value)
_token_id.setter
def unk_token_id(self, value):
self._unk_token = self.convert_tokens_to_ids(value)
_token_id.setter
def sep_token_id(self, value):
self._sep_token = self.convert_tokens_to_ids(value)
_token_id.setter
def pad_token_id(self, value):
self._pad_token = self.convert_tokens_to_ids(value)
_token_id.setter
def cls_token_id(self, value):
self._cls_token = self.convert_tokens_to_ids(value)
_token_id.setter
def mask_token_id(self, value):
self._mask_token = self.convert_tokens_to_ids(value)
_special_tokens_ids.setter
def additional_special_tokens_ids(self, values):
self._additional_special_tokens = [self.convert_tokens_to_ids(value) for value in values]
def special_tokens_map(self) -> Dict[(str, Union[(str, List[str])])]:
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, ('_' + attr))
if attr_value:
set_attr[attr] = (type(attr_value)((str(attr_value_sub) for attr_value_sub in attr_value)) if isinstance(attr_value, (list, tuple)) else str(attr_value))
return set_attr
def special_tokens_map_extended(self) -> Dict[(str, Union[(str, AddedToken, List[Union[(str, AddedToken)]])])]:
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, ('_' + attr))
if attr_value:
set_attr[attr] = attr_value
return set_attr
def all_special_tokens(self) -> List[str]:
all_toks = [str(s) for s in self.all_special_tokens_extended]
return all_toks
def all_special_tokens_extended(self) -> List[Union[(str, AddedToken)]]:
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = (all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]))
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
def all_special_ids(self) -> List[int]:
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids |
def get_files(config: Namespace, train: bool=True) -> Union[(List, Tuple[(List, ...)])]:
norm_paths = sorted(glob(os.path.join(config.datasets_dir, 'DDR-dataset', 'healthy', '*.jpg')))
anom_paths = sorted(glob(os.path.join(config.datasets_dir, 'DDR', 'unhealthy', 'images', '*.png')))
segmentations = sorted(glob(os.path.join(config.datasets_dir, 'DDR-dataset', 'unhealthy', 'segmentations', '*.png')))
if train:
return norm_paths[757:]
else:
return (norm_paths[:733], anom_paths, ([0] * 733), ([1] * 757), segmentations) |
class NoamOpt(RateOpt):
def __init__(self, optimizer, model_size, factor, warmup):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def rate(self):
step = self._step
return (self.factor * ((self.model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5)))))) |
(inp=arrays(shape=(3, 2, 10), dtype=np.float, elements=hypothesis.strategies.floats((- 100), 100)), log_scale=booleans())
def test_hard_volume(inp, log_scale) -> None:
box = TFBoxTensor(tf.Variable(inp))
expected = TFHardVolume(log_scale)(box)
res = TFVolume(log_scale)(box)
assert np.allclose(res, expected) |
def get_incoming_shape(incoming):
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif (type(incoming) in [np.array, list, tuple]):
return np.shape(incoming)
else:
raise Exception('Invalid incoming layer.') |
def cascade_resnet_3d_avg(pr, net, input_layer, n=5, nf=64, b=lasagne.init.Constant, frame_dist=range(5), **kwargs):
shape = lasagne.layers.get_output_shape(input_layer)
n_channel = shape[1]
divide_by_n = (kwargs['cascade_i'] != 0)
k = (3, 3, 3)
net[(pr + 'kavg')] = l.AverageInKspaceLayer([input_layer, net['mask']], shape, frame_dist=frame_dist, divide_by_n=divide_by_n, clipped=False)
net[(pr + 'conv1')] = l.Conv3D(net[(pr + 'kavg')], nf, k, b=b(), name=(pr + 'conv1'))
for i in xrange(2, n):
net[(pr + ('conv%d' % i))] = l.Conv3D(net[(pr + ('conv%d' % (i - 1)))], nf, k, b=b(), name=(pr + ('conv%d' % i)))
net[(pr + 'conv_aggr')] = l.Conv3DAggr(net[(pr + ('conv%d' % (n - 1)))], n_channel, k, b=b(), name=(pr + 'conv_aggr'))
net[(pr + 'res')] = l.ResidualLayer([net[(pr + 'conv_aggr')], input_layer], name=(pr + 'res'))
output_layer = net[(pr + 'res')]
return (net, output_layer) |
def prepare_sentence(tokenizer, text):
model_max_tokens = 512
has_sos_eos = True
max_tokens = model_max_tokens
if has_sos_eos:
max_tokens -= 2
sliding_window_size = (max_tokens // 2)
if (not hasattr(prepare_sentence, 'sos_id')):
(prepare_sentence.sos_id, prepare_sentence.eos_id) = tokenizer.encode('', add_special_tokens=True)
print(prepare_sentence.sos_id, prepare_sentence.eos_id)
tokenized_text = tokenizer.basic_tokenizer.tokenize(text, never_split=tokenizer.all_special_tokens)
tokenized_to_id_indicies = []
tokenids_chunks = []
tokenids_chunk = []
for (index, token) in enumerate((tokenized_text + [None])):
if (token is not None):
tokens = tokenizer.wordpiece_tokenizer.tokenize(token)
if ((token is None) or ((len(tokenids_chunk) + len(tokens)) > max_tokens)):
tokenids_chunks.append((([prepare_sentence.sos_id] + tokenids_chunk) + [prepare_sentence.eos_id]))
if (sliding_window_size > 0):
tokenids_chunk = tokenids_chunk[(- sliding_window_size):]
else:
tokenids_chunk = []
if (token is not None):
tokenized_to_id_indicies.append((len(tokenids_chunks), len(tokenids_chunk), (len(tokenids_chunk) + len(tokens))))
tokenids_chunk.extend(tokenizer.convert_tokens_to_ids(tokens))
return (tokenized_text, tokenized_to_id_indicies, tokenids_chunks) |
class PdeEvaluate():
def __init__(self, binding_pde):
self.binding_pde = binding_pde
def __call__(self, inputs: Variables) -> Variables:
result = Variables()
for node in self.binding_pde.sub_nodes:
sub_inputs = {k: v for (k, v) in Variables(inputs).items() if ((k in node.inputs) or (k in node.derivatives))}
r = node.evaluate(sub_inputs)
result.update(r)
return result |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
config['eval_env']['game'] = config['env']['game']
sampler = SerialSampler(EnvCls=AtariEnv, env_kwargs=config['env'], CollectorCls=ResetCollector, TrajInfoCls=AtariTrajInfo, eval_env_kwargs=config['eval_env'], **config['sampler'])
algo = DQN(optim_kwargs=config['optim'], **config['algo'])
agent = AtariDqnAgent(model_kwargs=config['model'], **config['agent'])
runner = MinibatchRlEval(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = config['env']['game']
with logger_context(log_dir, run_ID, name, config):
runner.train() |
class StraightThroughEstimator(torch.autograd.Function):
def forward(ctx, input_):
out = (input_ > 0).float()
return out
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input |
def MARE_np(pred, true, mask_value=None):
if (mask_value != None):
mask = np.where((true > mask_value), True, False)
true = true[mask]
pred = pred[mask]
return np.divide(np.sum(np.absolute((true - pred))), np.sum(true)) |
def test_bisenetv1_feature_fusion_module():
ffm = FeatureFusionModule(16, 32)
assert (ffm.conv1.in_channels == 16)
assert (ffm.conv1.out_channels == 32)
assert (ffm.conv1.kernel_size == (1, 1))
assert (ffm.gap.output_size == (1, 1))
assert (ffm.conv_atten[0].in_channels == 32)
assert (ffm.conv_atten[0].out_channels == 32)
assert (ffm.conv_atten[0].kernel_size == (1, 1))
ffm = FeatureFusionModule(16, 16)
x1 = torch.randn(2, 8, 8, 16)
x2 = torch.randn(2, 8, 8, 16)
x_out = ffm(x1, x2)
assert (x_out.shape == torch.Size([2, 16, 8, 16])) |
def load_data_from_dir(instance_dir, image_size=256, pad_size=0.1, skip_indices=()):
image_dir = osp.join(instance_dir, 'images')
mask_dir = osp.join(instance_dir, 'masks')
data_dict = {'images_og': [], 'images': [], 'masks': [], 'masks_dt': [], 'bbox': [], 'image_centers': [], 'crop_scales': []}
for (i, image_path) in enumerate(sorted(glob(osp.join(image_dir, '*.jpg')))):
if (i in skip_indices):
continue
image_name = osp.basename(image_path)
mask_path = osp.join(mask_dir, image_name.replace('jpg', 'png'))
image_og = Image.open(image_path).convert('RGB')
mask = Image.open(mask_path).convert('L')
bbox = get_bbox(((np.array(mask) / 255.0) > 0.5))
center = ((bbox[:2] + bbox[2:]) / 2.0)
s = ((max((bbox[2:] - bbox[:2])) / 2.0) * (1 + pad_size))
square_bbox = np.concatenate([(center - s), (center + s)]).astype(int)
image = image_util.crop_image(image_og, square_bbox)
image = (np.array(image.resize((image_size, image_size), Image.LANCZOS)) / 255.0)
mask = image_util.crop_image(mask, square_bbox)
mask = np.array(mask.resize((image_size, image_size), Image.BILINEAR))
mask = ((mask / 255.0) > 0.5)
(image_center, crop_scale) = compute_crop_parameters(image_og.size, square_bbox)
data_dict['bbox'].append(square_bbox)
data_dict['crop_scales'].append(crop_scale)
data_dict['image_centers'].append(image_center)
data_dict['images'].append(image)
data_dict['images_og'].append(image_og)
data_dict['masks'].append(mask)
data_dict['masks_dt'].append(compute_distance_transform(mask))
for (k, v) in data_dict.items():
if (k != 'images_og'):
data_dict[k] = np.stack(v)
if osp.exists(osp.join(instance_dir, 'metadata.json')):
metadata = json.load(open(osp.join(instance_dir, 'metadata.json')))
data_dict['extents'] = metadata['extents']
azimuths = metadata['azimuths']
elevations = metadata['elevations']
(R, T) = pytorch3d.renderer.look_at_view_transform(dist=2, elev=elevations, azim=azimuths)
data_dict['initial_poses'] = R.tolist()
return data_dict |
class SlidingWindowSpotClipSampler(SpotClipSampler):
def __init__(self, data_source: Spot, window_num_frames: int=32, overlap_window: int=1, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.window_num_frames = window_num_frames
self.overlap_window = overlap_window
self._shuffle = shuffle
self.correct_window_per_video = ((self.data_source._annotated_videos.num_frames_per_video - overlap_window) // (window_num_frames - overlap_window)).to(dtype=torch.int)
self.one_more_window_per_video = (((self.data_source._annotated_videos.num_frames_per_video - overlap_window) % (window_num_frames - overlap_window)) > 0).to(dtype=torch.int)
self.total_windows_per_video = (self.correct_window_per_video + self.one_more_window_per_video)
self.total_windows = self.total_windows_per_video.sum(0)
self.window_to_sample = self.total_windows
self._precompute_indices()
def _precompute_indices(self) -> List[Any]:
indices = [None for _ in range(self.total_windows)]
video_idx = 0
global_idx = 0
for i in range(len(self.total_windows_per_video)):
for j in range(self.correct_window_per_video[i]):
clip_start_frame = (j * (self.window_num_frames - self.overlap_window))
clip_end_frame = ((clip_start_frame + self.window_num_frames) - 1)
indices[global_idx] = (video_idx, clip_start_frame, clip_end_frame)
global_idx += 1
if self.one_more_window_per_video[i]:
duration = self.data_source._annotated_videos.num_frames_per_video[i]
clip_start_frame = (floor(duration) - self.window_num_frames)
clip_end_frame = ((clip_start_frame + self.window_num_frames) - 1)
indices[global_idx] = (video_idx, clip_start_frame, clip_end_frame)
global_idx += 1
video_idx += 1
self._raw_indices = indices
def __iter__(self) -> List[Any]:
indices = self._raw_indices
if self._shuffle:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return self.window_to_sample
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, window_num_frames={self.window_num_frames}, overlap_window={self.overlap_window}, shuffle={self._shuffle}, seed={self.seed})' |
_grad()
def test(loader, model, evaluator, device):
total_loss = 0
N = 0
for data in loader:
(data, y, num_graphs) = (data.to(device), data.y, data.num_graphs)
loss = (model(data).squeeze() - y).abs().mean()
total_loss += (loss.item() * num_graphs)
N += num_graphs
test_loss = (total_loss / N)
test_perf = (- test_loss)
return (test_perf, test_loss) |
class FairseqOptimizer(object):
def __init__(self, args, params):
super().__init__()
self.args = args
self.params = list(params)
def add_args(parser):
pass
def optimizer(self):
if (not hasattr(self, '_optimizer')):
raise NotImplementedError
if (not isinstance(self._optimizer, torch.optim.Optimizer)):
raise ValueError('_optimizer must be an instance of torch.optimizers.Optimizer')
return self._optimizer
def optimizer_config(self):
raise NotImplementedError
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self.optimizer.load_state_dict(state_dict)
if ((optimizer_overrides is not None) and (len(optimizer_overrides) > 0)):
for group in self.optimizer.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
loss.backward()
def multiply_grads(self, c):
for p in self.params:
if (p.grad is not None):
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
if (max_norm > 0):
return torch.nn.utils.clip_grad_norm_(self.params, max_norm)
else:
return math.sqrt(sum(((p.grad.data.norm() ** 2) for p in self.params if (p.grad is not None))))
def step(self, closure=None):
self.optimizer.step(closure)
def zero_grad(self):
for group in self.optimizer.param_groups:
for p in group['params']:
p.grad = None
self.optimizer.zero_grad() |
class StableVideoDiffusion():
def __init__(self, device, fp16=True, t_range=[0.02, 0.98]):
super().__init__()
self.guidance_type = ['sds', 'pixel reconstruction', 'latent reconstruction'][1]
self.device = device
self.dtype = (torch.float16 if fp16 else torch.float32)
pipe = StableVideoDiffusionPipeline.from_pretrained('stabilityai/stable-video-diffusion-img2vid', torch_dtype=torch.float16, variant='fp16')
pipe.to(device)
self.pipe = pipe
self.num_train_timesteps = (self.pipe.scheduler.config.num_train_timesteps if (self.guidance_type == 'sds') else 25)
self.pipe.scheduler.set_timesteps(self.num_train_timesteps, device=device)
self.min_step = int((self.num_train_timesteps * t_range[0]))
self.max_step = int((self.num_train_timesteps * t_range[1]))
self.alphas = self.pipe.scheduler.alphas_cumprod.to(self.device)
self.embeddings = None
self.image = None
self.target_cache = None
_grad()
def get_img_embeds(self, image):
self.image = Image.fromarray(np.uint8((image * 255)))
def encode_image(self, image):
image = ((image * 2) - 1)
latents = self.pipe._encode_vae_image(image, self.device, num_videos_per_prompt=1, do_classifier_free_guidance=False)
latents = (self.pipe.vae.config.scaling_factor * latents)
return latents
def refine(self, pred_rgb, steps=25, strength=0.8, min_guidance_scale: float=1.0, max_guidance_scale: float=3.0):
batch_size = pred_rgb.shape[0]
pred_rgb = pred_rgb.to(self.dtype)
pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False)
latents = self.encode_image(pred_rgb_512)
latents = latents.unsqueeze(0)
if (strength == 0):
init_step = 0
latents = torch.randn_like(latents)
else:
init_step = int((steps * strength))
latents = self.pipe.scheduler.add_noise(latents, torch.randn_like(latents), self.pipe.scheduler.timesteps[init_step:(init_step + 1)])
target = self.pipe(image=self.image, height=512, width=512, latents=latents, denoise_beg=init_step, denoise_end=steps, output_type='frame', num_frames=batch_size, min_guidance_scale=min_guidance_scale, max_guidance_scale=max_guidance_scale, num_inference_steps=steps, decode_chunk_size=1).frames[0]
target = ((target + 1) * 0.5)
target = target.permute(1, 0, 2, 3)
return target
def train_step(self, pred_rgb, step_ratio=None, min_guidance_scale: float=1.0, max_guidance_scale: float=3.0):
batch_size = pred_rgb.shape[0]
pred_rgb = pred_rgb.to(self.dtype)
pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False)
latents = self.encode_image(pred_rgb_512)
latents = latents.unsqueeze(0)
if (step_ratio is not None):
t = np.round(((1 - step_ratio) * self.num_train_timesteps)).clip(self.min_step, self.max_step)
t = torch.full((1,), t, dtype=torch.long, device=self.device)
else:
t = torch.randint(self.min_step, (self.max_step + 1), (1,), dtype=torch.long, device=self.device)
w = (1 - self.alphas[t]).view(1, 1, 1, 1)
if (self.guidance_type == 'sds'):
with torch.no_grad():
t = (self.num_train_timesteps - t.item())
noise = torch.randn_like(latents)
latents_noisy = self.pipe.scheduler.add_noise(latents, noise, self.pipe.scheduler.timesteps[t:(t + 1)])
noise_pred = self.pipe(image=self.image, height=512, width=512, latents=latents_noisy, output_type='noise', denoise_beg=t, denoise_end=(t + 1), min_guidance_scale=min_guidance_scale, max_guidance_scale=max_guidance_scale, num_frames=batch_size, num_inference_steps=self.num_train_timesteps).frames[0]
grad = (w * (noise_pred - noise))
grad = torch.nan_to_num(grad)
target = (latents - grad).detach()
loss = ((0.5 * F.mse_loss(latents.float(), target, reduction='sum')) / latents.shape[1])
print(loss.item())
return loss
elif (self.guidance_type == 'pixel reconstruction'):
if (self.target_cache is None):
with torch.no_grad():
self.target_cache = self.pipe(image=self.image, height=512, width=512, output_type='frame', num_frames=batch_size, num_inference_steps=self.num_train_timesteps, decode_chunk_size=1).frames[0]
self.target_cache = ((self.target_cache + 1) * 0.5)
self.target_cache = self.target_cache.permute(1, 0, 2, 3)
loss = ((0.5 * F.mse_loss(pred_rgb_512.float(), self.target_cache.detach().float(), reduction='sum')) / latents.shape[1])
print(loss.item())
return loss
elif (self.guidance_type == 'latent reconstruction'):
if (self.target_cache is None):
with torch.no_grad():
self.target_cache = self.pipe(image=self.image, height=512, width=512, output_type='latent', num_frames=batch_size, num_inference_steps=self.num_train_timesteps).frames[0]
loss = ((0.5 * F.mse_loss(latents.float(), self.target_cache.detach().float(), reduction='sum')) / latents.shape[1])
print(loss.item())
return loss |
def makeFoldersNeededForTestingSession(absMainOutputFolder, sessionName):
print('Creating necessary folders for testing session...')
createMainOutputFolder(absMainOutputFolder)
folderForLogs = (absMainOutputFolder + '/logs/')
createLogsFolder(folderForLogs)
folderForPredictions = (absMainOutputFolder + '/predictions')
createFolderForPredictions(folderForPredictions)
folderForSessionResults = ((folderForPredictions + '/') + sessionName)
createFolderForSessionResults(folderForSessionResults)
folderForSegmAndProbMaps = (folderForSessionResults + '/predictions/')
createFolderForSegmAndProbMaps(folderForSegmAndProbMaps)
folderForFeatures = (folderForSessionResults + '/features/')
createFolderForFeatures(folderForFeatures)
return [folderForLogs, folderForSegmAndProbMaps, folderForFeatures] |
def main():
args = parser.parse_args()
accelerator = Accelerator(cpu=args.no_cuda)
if ('mobilenet' in args.arch):
import torchvision.models.quantization as models
else:
import torchvision.models as models
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None)
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args, accelerator)
return
if args.tune:
(model, train_loader, val_loader, optimizer) = accelerator.prepare(model, train_loader, val_loader, optimizer)
def train_func(model):
epochs = 8
iters = 30
for nepoch in range(epochs):
model.train()
cnt = 0
for (image, target) in train_loader:
print('.', end='')
cnt += 1
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
accelerator.backward(loss)
optimizer.step()
if (cnt >= iters):
break
if (nepoch > 3):
model.apply(torch.quantization.disable_observer)
if (nepoch > 2):
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
import copy
from neural_compressor import QuantizationAwareTrainingConfig
from neural_compressor.training import prepare_compression
model = copy.deepcopy(model)
conf = QuantizationAwareTrainingConfig()
compression_manager = prepare_compression(model, conf)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
train_func(model)
compression_manager.callbacks.on_train_end()
model._model = accelerator.unwrap_model(model._model)
compression_manager.save(args.tuned_checkpoint)
return
if (args.performance or args.accuracy):
model.eval()
if args.int8:
from neural_compressor.utils.pytorch import load
new_model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model, dataloader=val_loader)
else:
new_model = model
if args.performance:
from neural_compressor.config import BenchmarkConfig
from neural_compressor import benchmark
b_conf = BenchmarkConfig(warmup=5, iteration=args.iter, cores_per_instance=4, num_of_instance=1)
benchmark.fit(new_model, b_conf, b_dataloader=val_loader)
if args.accuracy:
validate(val_loader, new_model, criterion, args, accelerator)
return |
class AntBulletEnv(WalkerBaseBulletEnv):
def __init__(self, reward_include_progress):
self.robot = Ant()
WalkerBaseBulletEnv.__init__(self, self.robot, reward_include_progress) |
def possible_weight_names(name, n=10):
name = name.decode()
(yield name)
parts = name.split('/')
for i in range(1, (n + 1)):
(yield str('{}_{}/{}'.format(parts[0], i, parts[1]))) |
def named_sparsity(module, prefix='', **kwargs):
warnings.warn("Since v2020.06 module's buffers are also accounted by `named_sparsity`.", FutureWarning)
(n_dropout, p_service) = ({}, set())
for (name, mod) in module.named_modules(prefix=prefix):
if isinstance(mod, SparsityStats):
name = (name + ('.' if name else ''))
p_service.update(((name + k) for k in mod.__sparsity_ignore__))
n_dropout.update(mod.sparsity(**kwargs))
for (name, par) in module.named_parameters(prefix=prefix):
if (name not in p_service):
(yield (name, (n_dropout.get(id(par), 0.0), par.numel())))
for (name, buf) in module.named_buffers(prefix=prefix):
if (name not in p_service):
(yield (name, (n_dropout.get(id(buf), 0.0), buf.numel()))) |
_torch
def retccl(tile_px, **kwargs):
from .retccl import RetCCLFeatures
return RetCCLFeatures(center_crop=(tile_px != 256), **kwargs) |
def Reduced_ResNet18(nclasses, nf=20, bias=True):
return ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf, bias) |
def fill_missing_embarked(data):
freq_port = data['Embarked'].mode()[0]
data['Embarked'] = data['Embarked'].fillna(freq_port)
data['Embarked'] = data['Embarked'].map({'S': 0, 'Q': 1, 'C': 2}).astype(int)
return data |
class MixtureOfGaussians(nn.Module):
def __init__(self, z_shape, num_mixtures=10):
super().__init__()
self.z_shape = z_shape
self.z_dim = np.prod(z_shape)
self.k = num_mixtures
self.z_pre = torch.nn.Parameter((torch.randn(1, (2 * self.k), self.z_dim).to(args.device) / np.sqrt((self.k * self.z_dim))))
self.pi = torch.nn.Parameter((torch.ones(self.k).to(args.device) / self.k), requires_grad=False)
def sample_gaussian(self, m, v):
sample = torch.randn(m.shape).to(args.device)
z = (m + ((v ** 0.5) * sample))
return z
def log_sum_exp(self, x, dim=0):
max_x = torch.max(x, dim)[0]
new_x = (x - max_x.unsqueeze(dim).expand_as(x))
return (max_x + new_x.exp().sum(dim).log())
def log_mean_exp(self, x, dim):
return (self.log_sum_exp(x, dim) - np.log(x.size(dim)))
def log_normal(self, x, m, v):
const = (((- 0.5) * x.size((- 1))) * torch.log((2 * torch.tensor(np.pi))))
log_det = ((- 0.5) * torch.sum(torch.log(v), dim=(- 1)))
log_exp = ((- 0.5) * torch.sum((((x - m) ** 2) / v), dim=(- 1)))
log_prob = ((const + log_det) + log_exp)
return log_prob
def log_normal_mixture(self, z, m, v):
z = z.view(z.shape[0], 1, (- 1))
log_probs = self.log_normal(z, m, v)
log_prob = self.log_mean_exp(log_probs, 1)
return log_prob
def gaussian_parameters(self, h, dim=(- 1)):
(m, h) = torch.split(h, (h.size(dim) // 2), dim=dim)
v = (F.softplus(h) + 1e-08)
return (m, v)
def sample(self, n_samples=1, **kwargs):
idx = torch.distributions.categorical.Categorical(self.pi).sample((n_samples,))
(m, v) = self.gaussian_parameters(self.z_pre.squeeze(0), dim=0)
(m, v) = (m[idx], v[idx])
z_samples = self.sample_gaussian(m, v)
return z_samples.view(z_samples.shape[0], *self.z_shape)
def log_p(self, z, **kwargs):
return self.forward(z)
def forward(self, z, dim=None, **kwargs):
(m, v) = self.gaussian_parameters(self.z_pre, dim=1)
log_p_z = self.log_normal_mixture(z, m, v)
return log_p_z
def __str__(self):
return 'MixtureOfGaussians' |
class SequenceNormFunc(torch.autograd.Function):
def forward(ctx: FunctionCtx, x: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, num_groups: Optional[int]=None, eps: float=1e-05, length_last: bool=False) -> torch.Tensor:
if (num_groups is None):
(y, count, mean, rstd) = mega2_ops.sequence_norm_fwd(x, gamma, beta, padding_mask, eps, length_last)
else:
(y, count, mean, rstd) = mega2_ops.group_sequence_norm_fwd(x, gamma, beta, padding_mask, num_groups, eps, length_last)
ctx.save_for_backward(x, count, mean, rstd, gamma, padding_mask)
ctx.num_groups = num_groups
ctx.length_last = length_last
return y
def backward(ctx: FunctionCtx, y_grad: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor])]:
(x, count, mean, rstd, gamma, padding_mask) = ctx.saved_tensors
num_groups = ctx.num_groups
length_last = ctx.length_last
if (num_groups is None):
(x_grad, gamma_grad, beta_grad) = mega2_ops.sequence_norm_bwd(y_grad, x, count, mean, rstd, gamma, padding_mask, length_last)
else:
(x_grad, gamma_grad, beta_grad) = mega2_ops.group_sequence_norm_bwd(y_grad, x, count, mean, rstd, gamma, padding_mask, num_groups, length_last)
return (x_grad, gamma_grad, beta_grad, None, None, None, None) |
def get_config_headers():
ans = defaultdict(str)
ans['init'] = (('# This file was created by the command:\n# ' + ' '.join(sys.argv)) + '\n# It contains the input of the network and is used in\n# accumulating stats for an LDA-like transform of the\n# input features.\n')
ans['ref'] = (('# This file was created by the command:\n# ' + ' '.join(sys.argv)) + '\n# It contains the entire neural network, but with those\n# components that would normally require fixed vectors/matrices\n# read from disk, replaced with random initialization\n# (this applies to the LDA-like transform and the\n# presoftmax-prior-scale, if applicable). This file\n# is used only to work out the left-context and right-context\n# of the network.\n')
ans['final'] = (('# This file was created by the command:\n# ' + ' '.join(sys.argv)) + '\n# It contains the entire neural network.\n')
return ans |
def test_invalid_blackbox_explainer():
class InvalidBlackboxExplainer(ExplainerMixin):
explainer_type = 'blackbox'
available_explanations = ['local']
assert (not _is_valid_blackbox_explainer(LinearRegression))
assert (not _is_valid_blackbox_explainer(InvalidBlackboxExplainer))
assert (not _is_valid_blackbox_explainer(NotEvenAnExplainer)) |
class SimpleCNNMNIST_header(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=10):
super(SimpleCNNMNIST_header, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(input_dim, hidden_dims[0])
self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
def forward(self, x):
x = self.pool(self.relu(self.conv1(x)))
x = self.pool(self.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 4) * 4))
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return x |
_model
def tf_efficientnet_b4_ns(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model |
class TransR(Model):
def _transfer(self, transfer_matrix, embeddings):
return matmul_func(embeddings, transfer_matrix)
def _calc(self, h, t, r):
h = tf.nn.l2_normalize(h, (- 1))
t = tf.nn.l2_normalize(t, (- 1))
r = tf.nn.l2_normalize(r, (- 1))
return abs(((h + r) - t))
def embedding_def(self):
config = self.get_config()
self.ent_embeddings = tf.get_variable(name='ent_embeddings', shape=[config.entTotal, config.ent_size], initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_embeddings = tf.get_variable(name='rel_embeddings', shape=[config.relTotal, config.rel_size], initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.transfer_matrix = tf.get_variable(name='transfer_matrix', shape=[config.relTotal, (config.ent_size * config.rel_size)], initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.parameter_lists = {'ent_embeddings': self.ent_embeddings, 'rel_embeddings': self.rel_embeddings, 'transfer_matrix': self.transfer_matrix}
def loss_def(self):
config = self.get_config()
(pos_h, pos_t, pos_r) = self.get_positive_instance(in_batch=True)
(neg_h, neg_t, neg_r) = self.get_negative_instance(in_batch=True)
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, neg_r)
pos_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, pos_r), [(- 1), config.ent_size, config.rel_size])
p_h = self._transfer(pos_matrix, pos_h_e)
p_t = self._transfer(pos_matrix, pos_t_e)
p_r = pos_r_e
if (config.negative_rel == 0):
n_h = self._transfer(pos_matrix, neg_h_e)
n_t = self._transfer(pos_matrix, neg_t_e)
n_r = neg_r_e
else:
neg_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, neg_r), [(- 1), config.ent_size, config.rel_size])
n_h = self._transfer(neg_matrix, neg_h_e)
n_t = self._transfer(neg_matrix, neg_t_e)
n_r = neg_r_e
_p_score = self._calc(p_h, p_t, p_r)
_n_score = self._calc(n_h, n_t, n_r)
p_score = tf.reduce_sum(_p_score, (- 1), keep_dims=True)
n_score = tf.reduce_sum(_n_score, (- 1), keep_dims=True)
self.loss = tf.reduce_mean(tf.maximum(((p_score - n_score) + config.margin), 0))
def predict_def(self):
config = self.get_config()
(predict_h, predict_t, predict_r) = self.get_predict_instance()
predict_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, predict_h), [1, (- 1), config.ent_size])
predict_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, predict_t), [1, (- 1), config.ent_size])
predict_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, predict_r), [1, (- 1), config.rel_size])
predict_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, predict_r[0]), [1, config.ent_size, config.rel_size])
h_e = tf.reshape(self._transfer(predict_matrix, predict_h_e), [(- 1), config.rel_size])
t_e = tf.reshape(self._transfer(predict_matrix, predict_t_e), [(- 1), config.rel_size])
r_e = predict_r_e
self.predict = tf.reduce_sum(self._calc(h_e, t_e, r_e), (- 1), keep_dims=True) |
class ActorArgs():
def __init__(self, actor_name, executor, args=[], kargs={}):
self.actor_name = actor_name
self.executor = executor
self.args = args
self.kargs = kargs
def get(self, key, default_value=None):
return getattr(self, key, default_value) |
def extract_flow(args):
from tools.infer_flownet2 import infer
output_file = infer(args)
flow_list = [x for x in os.listdir(output_file) if ('.flo' in x)]
flow_start_no = min([int(x[:5]) for x in flow_list])
zero_flow = cvb.read_flow(os.path.join(output_file, flow_list[0]))
cvb.write_flow((zero_flow * 0), os.path.join(output_file, ('%05d.rflo' % flow_start_no)))
args.DATA_ROOT = output_file |
def segnet(image_shape: tuple, num_classes: int, class_weights=None, lcn: bool=True, dropout_rate: float=None, optimizer=SGD(lr=0.1, momentum=0.9), pretrain_encoder: bool=True, bn_train: bool=True) -> Model:
div = int((2 ** 5))
for dim in image_shape[:(- 1)]:
if (dim % div):
msg = 'dimension ({}) must be divisible by {}'.format(dim, div)
raise ValueError(msg)
inputs = Input(image_shape, name='SegNet_input')
x = Lambda((lambda x: (x / 255.0)), name='pixel_norm')(inputs)
if lcn:
x = LocalContrastNormalization()(x)
(x, pool_1) = _encode(x, (2 * [64]), bn_train=bn_train)
(x, pool_2) = _encode(x, (2 * [128]), bn_train=bn_train)
(x, pool_3) = _encode(x, (3 * [256]), bn_train=bn_train)
x = Dropout(dropout_rate)(x)
(x, pool_4) = _encode(x, (3 * [512]), bn_train=bn_train)
x = Dropout(dropout_rate)(x)
(x, pool_5) = _encode(x, (3 * [512]), bn_train=bn_train)
x = Dropout(dropout_rate)(x)
x = _decode(x, pool_5, (3 * [512]), bn_train=bn_train)
x = Dropout(dropout_rate)(x)
x = _decode(x, pool_4, [512, 512, 256], bn_train=bn_train)
x = Dropout(dropout_rate)(x)
x = _decode(x, pool_3, [256, 256, 128], bn_train=bn_train)
x = Dropout(dropout_rate)(x)
x = _decode(x, pool_2, [128, 64], bn_train=bn_train)
x = _decode(x, pool_1, [64], bn_train=bn_train)
x = _classify(x, num_classes)
model = Model(inputs=[inputs], outputs=[x], name='SegNet')
model.compile(optimizer=optimizer, loss=build_categorical_crossentropy(class_weights), metrics=[build_categorical_accuracy(weights=class_weights)])
if pretrain_encoder:
_transfer_vgg16_encoder(model)
return model |
_task('dummy_mt')
class DummyMTTask(FairseqTask):
def add_args(parser):
parser.add_argument('--dict-size', default=49996, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8)
seq = ((torch.arange((args.tokens_per_sample + 1)) + dictionary.pad()) + 1)
self.dummy_src = seq[:(- 1)]
self.dummy_tgt = seq[1:]
def setup_task(cls, args, **kwargs):
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
if (self.args.max_sentences is not None):
bsz = self.args.max_sentences
else:
bsz = max(1, (self.args.max_tokens // self.args.tokens_per_sample))
tgt = torch.stack([self.dummy_tgt for _ in range(bsz)])
self.datasets[split] = DummyDataset({'id': 1, 'net_input': {'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]), 'src_lengths': torch.full((bsz,), self.args.tokens_per_sample, dtype=torch.long), 'prev_output_tokens': tgt.clone()}, 'target': tgt, 'nsentences': bsz, 'ntokens': (bsz * self.args.tokens_per_sample)}, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample)
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
def get_loaders(dataset, device, data_root, make_valid_loader, train_batch_size, valid_batch_size, test_batch_size):
print('Loading data...', end='', flush=True, file=sys.stderr)
if (dataset in ['cifar10', 'svhn', 'mnist', 'fashion-mnist']):
(train_dset, valid_dset, test_dset) = get_image_datasets(dataset, data_root, make_valid_loader)
elif (dataset in ['miniboone', 'hepmass', 'power', 'gas', 'bsds300']):
(train_dset, valid_dset, test_dset) = get_tabular_datasets(dataset, data_root)
elif (dataset == 'linear-gaussian'):
(train_dset, valid_dset, test_dset) = get_linear_gaussian_datasets()
else:
(train_dset, valid_dset, test_dset) = get_2d_datasets(dataset)
print('Done.', file=sys.stderr)
train_loader = get_loader(train_dset, device, train_batch_size, drop_last=True)
if make_valid_loader:
valid_loader = get_loader(valid_dset, device, valid_batch_size, drop_last=False)
else:
valid_loader = None
test_loader = get_loader(test_dset, device, test_batch_size, drop_last=False)
return (train_loader, valid_loader, test_loader) |
def main(dataset_name, dataset_path, model_name, epoch, learning_rate, batch_size, weight_decay, device, save_dir):
device = torch.device(device)
dataset = get_dataset(dataset_name, dataset_path)
train_length = int((len(dataset) * 0.8))
valid_length = int((len(dataset) * 0.1))
test_length = ((len(dataset) - train_length) - valid_length)
(train_dataset, valid_dataset, test_dataset) = torch.utils.data.random_split(dataset, (train_length, valid_length, test_length))
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8)
valid_data_loader = DataLoader(valid_dataset, batch_size=batch_size, num_workers=8)
test_data_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8)
model = get_model(model_name, dataset).to(device)
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=learning_rate, weight_decay=weight_decay)
early_stopper = EarlyStopper(num_trials=2, save_path=f'{save_dir}/{model_name}.pt')
for epoch_i in range(epoch):
train(model, optimizer, train_data_loader, criterion, device)
auc = test(model, valid_data_loader, device)
print('epoch:', epoch_i, 'validation: auc:', auc)
if (not early_stopper.is_continuable(model, auc)):
print(f'validation: best auc: {early_stopper.best_accuracy}')
break
auc = test(model, test_data_loader, device)
print(f'test auc: {auc}') |
class BaseLoader(ImageCollection):
def __init__(self, split, path, regex, load_func=None, lmdb_env=None, files=None):
if (files is not None):
super(BaseLoader, self).__init__(files, load_func=load_func)
elif (not (lmdb_env == None)):
key_db = osp.basename(path)
with lmdb_env.begin() as txn:
_files_vec = txn.get(key_db.encode()).decode().split('|')
_files = [bytes(osp.join(path, f).encode()) for f in _files_vec]
super(BaseLoader, self).__init__(_files, load_func=load_func)
else:
super(BaseLoader, self).__init__(osp.join(((path + '/') + regex)), load_func=load_func)
self.name = osp.basename(path)
self.split = split
def __str__(self):
return "< class: '{}' name: '{}', frames: {} >".format(type(self).__name__, self.name, len(self)) |
class BaseRerankFinetuner():
def compute_loss(self, model, inputs, return_outputs=False):
scores = model(**inputs).logits.reshape((- 1), (1 + self.args.neg_per_query))
labels = torch.zeros((len(scores),), dtype=torch.long, device=scores.device)
loss = F.cross_entropy(scores, labels)
return ((loss, scores) if return_outputs else loss)
def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> Dict[(str, float)]:
metrics = validate_during_training(self, eval_dataset, ignore_keys, metric_key_prefix)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def floating_point_ops(self, inputs: Dict[(str, Union[(torch.Tensor, Any)])]):
return 0 |
def pad_collate(batch):
(sentence, length, label) = zip(*batch)
sentence = pad_sequence(sentence, padding_value=1)
length = torch.tensor(length)
label = torch.tensor(label)
return (sentence, length, label) |
def chamfer_distance(src, dst, src_weight=1.0, dst_weight=1.0, criterion_mode='l2', reduction='mean'):
if (criterion_mode == 'smooth_l1'):
criterion = smooth_l1_loss
elif (criterion_mode == 'l1'):
criterion = l1_loss
elif (criterion_mode == 'l2'):
criterion = mse_loss
else:
raise NotImplementedError
src_expand = src.unsqueeze(2).repeat(1, 1, dst.shape[1], 1)
dst_expand = dst.unsqueeze(1).repeat(1, src.shape[1], 1, 1)
distance = criterion(src_expand, dst_expand, reduction='none').sum((- 1))
(src2dst_distance, indices1) = torch.min(distance, dim=2)
(dst2src_distance, indices2) = torch.min(distance, dim=1)
loss_src = (src2dst_distance * src_weight)
loss_dst = (dst2src_distance * dst_weight)
if (reduction == 'sum'):
loss_src = torch.sum(loss_src)
loss_dst = torch.sum(loss_dst)
elif (reduction == 'mean'):
loss_src = torch.mean(loss_src)
loss_dst = torch.mean(loss_dst)
elif (reduction == 'none'):
pass
else:
raise NotImplementedError
return (loss_src, loss_dst, indices1, indices2) |
_algo(name=STATIC_QUANT)
def static_quantize_entry(model: tf.keras.Model, quant_config: StaticQuantConfig, calib_dataloader: Callable=None, calib_iteration: int=100) -> tf.keras.Model:
keras_adaptor = KerasAdaptor(framework_specific_info)
keras_adaptor.query_fw_capability(model)
tune_cfg = parse_to_keras_tune_cfg(model, quant_config, calib_iteration)
q_model = keras_adaptor.quantize(tune_cfg, model, calib_dataloader)
return q_model |
def sepreresnet200b(**kwargs):
return get_sepreresnet(blocks=200, conv1_stride=False, model_name='sepreresnet200b', **kwargs) |
def load_fmnist(path, kind='train'):
import os
import gzip
import numpy as np
labels_path = os.path.join(path, ('%s-labels-idx1-ubyte.gz' % kind))
images_path = os.path.join(path, ('%s-images-idx3-ubyte.gz' % kind))
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)
return (images, labels) |
def correlation(filename, name, fold, ax):
print(filename)
if (fold == 'five'):
(data, X, y) = load('data/{}'.format(filename))
X = normalize(X, axis=1, norm='l1')
D_bow = pairwise_distances(X, metric='manhattan')
elif (fold == 'one'):
(X_train, y_train, X_test, y_test) = load_one('data/{}'.format(filename))
X_train = normalize(X_train, axis=1, norm='l1')
X_test = normalize(X_test, axis=1, norm='l1')
D_bow = pairwise_distances(X_test, X_train, metric='manhattan')
D_wmd = np.load('distance/{}.npy'.format(filename))
D_wmd_ind = (D_wmd.reshape((- 1)) >= 0)
corr = np.corrcoef(D_bow.reshape((- 1))[D_wmd_ind], D_wmd.reshape((- 1))[D_wmd_ind])[(0, 1)]
ax.scatter(D_bow.reshape((- 1))[D_wmd_ind], D_wmd.reshape((- 1))[D_wmd_ind], c='#005aff', s=1, rasterized=True)
ax.tick_params(labelsize=16)
ax.set_xlabel('BOW (L1/L1)', size=20)
ax.set_ylabel('WMD', size=20)
ax.text(0.05, 0.95, '{}\n$\\rho = {:.3f}$'.format(name, corr), size=24, transform=ax.transAxes, verticalalignment='top', horizontalalignment='left') |
def gen_global_selector(n_features, term_names, term_types, unique_val_counts, importance_scores, round=3):
records = []
for term_idx in range(len(term_names)):
record = {}
record['Name'] = term_names[term_idx]
record['Type'] = ('categorical' if ((term_types[term_idx] == 'nominal') or (term_types[term_idx] == 'ordinal')) else term_types[term_idx])
if (term_idx < n_features):
record['# Unique'] = (np.nan if (unique_val_counts is None) else unique_val_counts[term_idx])
record['% Non-zero'] = np.nan
else:
record['# Unique'] = np.nan
record['% Non-zero'] = np.nan
records.append(record)
columns = ['Name', 'Type', '# Unique', '% Non-zero']
df = pd.DataFrame.from_records(records, columns=columns)
if (round is not None):
return df.round(round)
else:
return df |
_module()
class ConvFCBBoxHead(BBoxHead):
def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, init_cfg=None, *args, **kwargs):
super(ConvFCBBoxHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0)
if ((num_cls_convs > 0) or (num_reg_convs > 0)):
assert (num_shared_fcs == 0)
if (not self.with_cls):
assert ((num_cls_convs == 0) and (num_cls_fcs == 0))
if (not self.with_reg):
assert ((num_reg_convs == 0) and (num_reg_fcs == 0))
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
(self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_out_channels = last_layer_dim
(self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
(self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)):
if (self.num_cls_fcs == 0):
self.cls_last_dim *= self.roi_feat_area
if (self.num_reg_fcs == 0):
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
if self.custom_cls_channels:
cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
else:
cls_channels = (self.num_classes + 1)
self.fc_cls = build_linear_layer(self.cls_predictor_cfg, in_features=self.cls_last_dim, out_features=cls_channels)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes))
self.fc_reg = build_linear_layer(self.reg_predictor_cfg, in_features=self.reg_last_dim, out_features=out_dim_reg)
if (init_cfg is None):
self.init_cfg += [dict(type='Xavier', distribution='uniform', override=[dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs')])]
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
def forward(self, x):
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = (self.fc_cls(x_cls) if self.with_cls else None)
bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None)
return (cls_score, bbox_pred) |
def round_channels(channels, divisor=8):
rounded_channels = max(((int((channels + (divisor / 2.0))) // divisor) * divisor), divisor)
if (float(rounded_channels) < (0.9 * channels)):
rounded_channels += divisor
return rounded_channels |
def test_pisa_ssd_head_loss():
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3)}]
cfg = mmcv.Config(dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.0, ignore_iof_thr=(- 1), gt_max_assign_all=False), isr=dict(k=2.0, bias=0.0), carl=dict(k=1.0, bias=0.2), smoothl1_beta=1.0, allowed_border=(- 1), pos_weight=(- 1), neg_pos_ratio=3, debug=False))
ssd_anchor_generator = dict(type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[1], ratios=([2],), basesize_ratio_range=(0.15, 0.9))
self = PISASSDHead(num_classes=4, in_channels=(1,), train_cfg=cfg, anchor_generator=ssd_anchor_generator)
feat = [torch.rand(1, 1, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))) for i in range(len(self.anchor_generator.strides))]
(cls_scores, bbox_preds) = self.forward(feat)
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert (empty_cls_loss.item() == 0), 'cls loss should be non-zero'
assert (empty_box_loss.item() == 0), 'there should be no box loss when there are no true boxes'
gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert (onegt_cls_loss.item() > 0), 'cls loss should be non-zero'
assert (onegt_box_loss.item() > 0), 'box loss should be non-zero' |
class MaskRCNNBoxPredictor(BoxPredictor):
def __init__(self, is_training, num_classes, fc_hyperparams, use_dropout, dropout_keep_prob, box_code_size, conv_hyperparams=None, predict_instance_masks=False, mask_height=14, mask_width=14, mask_prediction_conv_depth=256, predict_keypoints=False):
super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes)
self._fc_hyperparams = fc_hyperparams
self._use_dropout = use_dropout
self._box_code_size = box_code_size
self._dropout_keep_prob = dropout_keep_prob
self._conv_hyperparams = conv_hyperparams
self._predict_instance_masks = predict_instance_masks
self._mask_height = mask_height
self._mask_width = mask_width
self._mask_prediction_conv_depth = mask_prediction_conv_depth
self._predict_keypoints = predict_keypoints
if self._predict_keypoints:
raise ValueError('Keypoint prediction is unimplemented.')
if ((self._predict_instance_masks or self._predict_keypoints) and (self._conv_hyperparams is None)):
raise ValueError('`conv_hyperparams` must be provided when predicting masks.')
def num_classes(self):
return self._num_classes
def _predict(self, image_features, num_predictions_per_location):
if (num_predictions_per_location != 1):
raise ValueError('Currently FullyConnectedBoxPredictor only supports predicting a single box per class per location.')
spatial_averaged_image_features = tf.reduce_mean(image_features, [1, 2], keep_dims=True, name='AvgPool')
flattened_image_features = slim.flatten(spatial_averaged_image_features)
if self._use_dropout:
flattened_image_features = slim.dropout(flattened_image_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training)
with slim.arg_scope(self._fc_hyperparams):
box_encodings = slim.fully_connected(flattened_image_features, (self._num_classes * self._box_code_size), activation_fn=None, scope='BoxEncodingPredictor')
class_predictions_with_background = slim.fully_connected(flattened_image_features, (self._num_classes + 1), activation_fn=None, scope='ClassPredictor')
box_encodings = tf.reshape(box_encodings, [(- 1), 1, self._num_classes, self._box_code_size])
class_predictions_with_background = tf.reshape(class_predictions_with_background, [(- 1), 1, (self._num_classes + 1)])
predictions_dict = {BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background}
if self._predict_instance_masks:
with slim.arg_scope(self._conv_hyperparams):
upsampled_features = tf.image.resize_bilinear(image_features, [self._mask_height, self._mask_width], align_corners=True)
upsampled_features = slim.conv2d(upsampled_features, num_outputs=self._mask_prediction_conv_depth, kernel_size=[2, 2])
mask_predictions = slim.conv2d(upsampled_features, num_outputs=self.num_classes, activation_fn=None, kernel_size=[3, 3])
instance_masks = tf.expand_dims(tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor')
predictions_dict[MASK_PREDICTIONS] = instance_masks
return predictions_dict |
def visualize_detection_results(camera_id, dataset_path, gt_annotations=None, annotations=None):
sequence = open_issia_sequence(camera_id, dataset_path)
count_frames = (- 1)
while sequence.isOpened():
(ret, frame) = sequence.read()
count_frames += 1
if (not ret):
break
if (not (gt_annotations is None)):
frame = _annotate_frame(frame, count_frames, gt_annotations, color=(0, 0, 255))
if (not (annotations is None)):
frame = _annotate_frame(frame, count_frames, annotations, color=(255, 0, 0))
cv2.imshow('frame', frame)
key = (cv2.waitKey(1) & 255)
if (key == ord('q')):
break
elif (key == ord(' ')):
cv2.waitKey()
sequence.release()
cv2.destroyAllWindows() |
()
_context
('--outdir', help='Where to save the results', required=True, metavar='DIR')
('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
('--metrics', help='Comma-separated list or "none" [default: fid50k_full]', type=CommaSeparatedList())
('--seed', help='Random seed [default: 0]', type=int, metavar='INT')
('-n', '--dry-run', help='Print training options and exit', is_flag=True)
('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL')
('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')
('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
('--square', help='True for square, False for rectangle', type=bool, metavar='BOOL', default=False)
('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar', 'shhq']))
('--gamma', help='Override R1 gamma', type=float)
('--kimg', help='Override training duration', type=int, metavar='INT')
('--batch', help='Override batch size', type=int, metavar='INT')
('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))
('--p', help='Augmentation probability for --aug=fixed', type=float)
('--target', help='ADA target value for --aug=ada', type=float)
('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc', 'body']))
('--resume', help='Resume training [default: noresume]', metavar='PKL')
('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')
('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')
('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')
('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')
('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')
('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')
def main(ctx, outdir, dry_run, **config_kwargs):
dnnlib.util.Logger(should_flush=True)
try:
(run_desc, args) = setup_training_loop_kwargs(**config_kwargs)
except UserError as err:
ctx.fail(err)
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match('^\\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if (x is not None)]
cur_run_id = (max(prev_run_ids, default=(- 1)) + 1)
args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert (not os.path.exists(args.run_dir))
print()
print('Training options:')
print(json.dumps(args, indent=2))
print()
print(f'Output directory: {args.run_dir}')
print(f'Training data: {args.training_set_kwargs.path}')
print(f'Training duration: {args.total_kimg} kimg')
print(f'Number of GPUs: {args.num_gpus}')
print(f'Number of images: {args.training_set_kwargs.max_size}')
print(f'Image resolution: {args.training_set_kwargs.resolution}')
print(f'Conditional model: {args.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
print()
if dry_run:
print('Dry run; exiting.')
return
print('Creating output directory...')
os.makedirs(args.run_dir, exist_ok=True)
with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
json.dump(args, f, indent=2)
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if (args.num_gpus == 1):
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus) |
class Generic_WSI_Classification_Dataset(Dataset):
def __init__(self, annotations: Union[(str, pd.DataFrame)]=None, shuffle: bool=False, seed: int=7, print_info: bool=True, label_dict={}, filter_dict={}, ignore=[], patient_strat=False, label_col=None, patient_voting='max', lasthalf=False, csv_path='dataset_csv/ccrcc_clean.csv'):
if ((annotations is None) and (csv_path is not None)):
annotations = csv_path
log.warning("Deprecation warning: 'csv_path' is deprecated for Generic_WSI_Classification_Dataset and will be removed in a future version. Please use 'annotations' instead.")
self.lasthalf = lasthalf
self.label_dict = label_dict
self.num_classes = len(set(self.label_dict.values()))
self.seed = seed
self.print_info = print_info
self.patient_strat = patient_strat
(self.train_ids, self.val_ids, self.test_ids) = (None, None, None)
self.data_dir = None
if (not label_col):
label_col = 'label'
self.label_col = label_col
if isinstance(annotations, str):
slide_data = pd.read_csv(annotations, dtype=str)
elif isinstance(annotations, pd.DataFrame):
slide_data = annotations
else:
raise ValueError(f'Unrecognized type for annotations: {type(annotations)}')
slide_data = self.filter_df(slide_data, filter_dict)
slide_data = self.df_prep(slide_data, self.label_dict, ignore, self.label_col)
if shuffle:
np.random.seed(seed)
np.random.shuffle(slide_data)
self.slide_data = slide_data
self.patient_data_prep(patient_voting)
self.cls_ids_prep()
if print_info:
self.summarize()
def cls_ids_prep(self):
self.patient_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.patient_cls_ids[i] = np.where((self.patient_data['label'] == i))[0]
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where((self.slide_data['label'] == i))[0]
def patient_data_prep(self, patient_voting='max'):
patients = self.slide_data['patient'].unique()
patient_labels = []
for p in patients:
locations = self.slide_data[(self.slide_data['patient'] == p)].index.tolist()
assert (len(locations) > 0), f'No data found for patient {p}'
label = self.slide_data['label'][locations].values
if (patient_voting == 'max'):
label = label.max()
elif (patient_voting == 'maj'):
label = stats.mode(label)[0]
else:
raise NotImplementedError
patient_labels.append(label)
self.patient_data = {'patient': patients, 'label': np.array(patient_labels)}
def df_prep(data, label_dict, ignore, label_col):
if (label_col != 'label'):
data['label'] = data[label_col].copy()
mask = data['label'].isin(label_dict)
data = data[mask]
data.reset_index(drop=True, inplace=True)
for i in data.index:
key = data.loc[(i, 'label')]
data.at[(i, 'label')] = label_dict[key]
return data
def filter_df(self, df, filter_dict={}):
if (len(filter_dict) > 0):
filter_mask = np.full(len(df), True, bool)
for (key, val) in filter_dict.items():
mask = df[key].isin(val)
filter_mask = np.logical_and(filter_mask, mask)
df = df[filter_mask]
return df
def __len__(self):
if self.patient_strat:
return len(self.patient_data['case_id'])
else:
return len(self.slide_data)
def summarize(self):
log.info('Dataset summary')
log.info('Outcome: {}'.format(self.label_col))
log.info('Labels: {}'.format(self.label_dict))
for i in range(self.num_classes):
log.info(('Patient in class %d: %d' % (i, self.patient_cls_ids[i].shape[0])))
log.info(('Slides in class %d: %d' % (i, self.slide_cls_ids[i].shape[0])))
def create_splits(self, k=3, val_num=(25, 25), test_num=(40, 40), label_frac=1.0, custom_test_ids=None):
settings = {'n_splits': k, 'val_num': val_num, 'test_num': test_num, 'label_frac': label_frac, 'seed': self.seed, 'custom_test_ids': custom_test_ids}
if self.patient_strat:
settings.update({'cls_ids': self.patient_cls_ids, 'samples': len(self.patient_data['case_id'])})
else:
settings.update({'cls_ids': self.slide_cls_ids, 'samples': len(self.slide_data)})
self.split_gen = generate_split(**settings)
def set_splits(self, start_from=None):
if start_from:
ids = nth(self.split_gen, start_from)
else:
ids = next(self.split_gen)
if self.patient_strat:
slide_ids = [[] for i in range(len(ids))]
for split in range(len(ids)):
for idx in ids[split]:
case_id = self.patient_data['case_id'][idx]
slide_indices = self.slide_data[(self.slide_data['case_id'] == case_id)].index.tolist()
slide_ids[split].extend(slide_indices)
(self.train_ids, self.val_ids, self.test_ids) = (slide_ids[0], slide_ids[1], slide_ids[2])
else:
(self.train_ids, self.val_ids, self.test_ids) = ids
def get_split_by_slides(self, slides):
mask = self.slide_data['slide'].isin(slides)
df_slice = self.slide_data[mask].reset_index(drop=True)
return Generic_Split(df_slice, data_dir=self.data_dir, num_classes=self.num_classes, lasthalf=self.lasthalf)
def get_split_from_df(self, all_splits, split_key='train'):
split = all_splits[split_key]
split = split.dropna().reset_index(drop=True)
if (len(split) > 0):
mask = self.slide_data['slide'].isin(split.tolist())
df_slice = self.slide_data[mask].reset_index(drop=True)
split = Generic_Split(df_slice, data_dir=self.data_dir, num_classes=self.num_classes, lasthalf=self.lasthalf)
else:
split = None
return split
def get_merged_split_from_df(self, all_splits, split_keys=['train']):
merged_split = []
for split_key in split_keys:
split = all_splits[split_key]
split = split.dropna().reset_index(drop=True).tolist()
merged_split.extend(split)
if (len(split) > 0):
mask = self.slide_data['slide'].isin(merged_split)
df_slice = self.slide_data[mask].reset_index(drop=True)
split = Generic_Split(df_slice, data_dir=self.data_dir, num_classes=self.num_classes, lasthalf=self.lasthalf)
else:
split = None
return split
def return_splits(self, from_id=True, csv_path=None):
if from_id:
if (len(self.train_ids) > 0):
train_data = self.slide_data.loc[self.train_ids].reset_index(drop=True)
train_split = Generic_Split(train_data, data_dir=self.data_dir, num_classes=self.num_classes, lasthalf=self.lasthalf)
else:
train_split = None
if (len(self.val_ids) > 0):
val_data = self.slide_data.loc[self.val_ids].reset_index(drop=True)
val_split = Generic_Split(val_data, data_dir=self.data_dir, num_classes=self.num_classes, lasthalf=self.lasthalf)
else:
val_split = None
if (len(self.test_ids) > 0):
test_data = self.slide_data.loc[self.test_ids].reset_index(drop=True)
test_split = Generic_Split(test_data, data_dir=self.data_dir, num_classes=self.num_classes, lasthalf=self.lasthalf)
else:
test_split = None
else:
assert csv_path
all_splits = pd.read_csv(csv_path, dtype=str)
train_split = self.get_split_from_df(all_splits, 'train')
val_split = self.get_split_from_df(all_splits, 'val')
test_split = self.get_split_from_df(all_splits, 'test')
return (train_split, val_split, test_split)
def get_list(self, ids):
return self.slide_data['slide'][ids]
def getlabel(self, ids):
return self.slide_data['label'][ids]
def __getitem__(self, idx):
return None
def test_split_gen(self, return_descriptor=False):
if return_descriptor:
index = [list(self.label_dict.keys())[list(self.label_dict.values()).index(i)] for i in range(self.num_classes)]
columns = ['train', 'val', 'test']
df = pd.DataFrame(np.full((len(index), len(columns)), 0, dtype=np.int32), index=index, columns=columns)
log.info('Number of training samples: {}'.format(len(self.train_ids)))
labels = self.getlabel(self.train_ids)
(unique, counts) = np.unique(labels, return_counts=True)
for u in range(len(unique)):
log.info('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[(index[u], 'train')] = counts[u]
log.info('Number of val samples: {}'.format(len(self.val_ids)))
labels = self.getlabel(self.val_ids)
(unique, counts) = np.unique(labels, return_counts=True)
for u in range(len(unique)):
log.info('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[(index[u], 'val')] = counts[u]
log.info('\nNumber of test samples: {}'.format(len(self.test_ids)))
labels = self.getlabel(self.test_ids)
(unique, counts) = np.unique(labels, return_counts=True)
for u in range(len(unique)):
log.info('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[(index[u], 'test')] = counts[u]
assert (len(np.intersect1d(self.train_ids, self.test_ids)) == 0)
assert (len(np.intersect1d(self.train_ids, self.val_ids)) == 0)
assert (len(np.intersect1d(self.val_ids, self.test_ids)) == 0)
if return_descriptor:
return df
def save_split(self, filename):
train_split = self.get_list(self.train_ids)
val_split = self.get_list(self.val_ids)
test_split = self.get_list(self.test_ids)
df_tr = pd.DataFrame({'train': train_split})
df_v = pd.DataFrame({'val': val_split})
df_t = pd.DataFrame({'test': test_split})
df = pd.concat([df_tr, df_v, df_t], axis=1)
df.to_csv(filename, index=False) |
class RandomCrop():
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = transforms.RandomCrop.get_params(image, (self.size, self.size))
image = functional.crop(image, *crop_params)
target = functional.crop(target, *crop_params)
return (image, target) |
def log_test_results(logger, writer, epoch, acc_groups, get_ys_func, tag):
results = utils.get_results(acc_groups, get_ys_func)
utils.write_dict_to_tb(writer, results, tag, epoch)
logger.write(f'''Test results {tag}
''')
logger.write(str(results))
if has_wandb:
wandb.log({f'test_{k}': results[k] for k in results}, step=epoch) |
class Up(nn.Module):
def __init__(self, in_ch1, out_ch, in_ch2=0, attn=False):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv((in_ch1 + in_ch2), out_ch)
if attn:
self.attn_block = Attention_block(in_ch1, in_ch2, out_ch)
else:
self.attn_block = None
def forward(self, x1, x2=None):
x1 = self.up(x1)
if (x2 is not None):
diffY = torch.tensor([(x2.size()[2] - x1.size()[2])])
diffX = torch.tensor([(x2.size()[3] - x1.size()[3])])
x1 = F.pad(x1, [(diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2))])
if (self.attn_block is not None):
x2 = self.attn_block(x1, x2)
x1 = torch.cat([x2, x1], dim=1)
x = x1
return self.conv(x) |
class AcousticModel(nn.Module):
def __init__(self, n_cnn_layers, rnn_dim, n_class, n_feats, stride=1, dropout=0.1):
super(AcousticModel, self).__init__()
self.n_class = n_class
if isinstance(n_class, int):
target_dim = n_class
else:
target_dim = (n_class[0] * n_class[1])
self.cnn_layers = nn.Sequential(nn.Conv2d(1, n_feats, 3, stride=stride, padding=(3 // 2)), nn.ReLU())
self.rescnn_layers = nn.Sequential(*[ResidualCNN(n_feats, n_feats, kernel=3, stride=1, dropout=dropout, n_feats=128) for _ in range(n_cnn_layers)])
self.maxpooling = nn.MaxPool2d(kernel_size=(2, 3))
self.fully_connected = nn.Linear((n_feats * 64), rnn_dim)
self.bilstm = nn.Sequential(BidirectionalLSTM(rnn_dim=rnn_dim, hidden_size=rnn_dim, dropout=dropout, batch_first=True), BidirectionalLSTM(rnn_dim=(rnn_dim * 2), hidden_size=rnn_dim, dropout=dropout, batch_first=False), BidirectionalLSTM(rnn_dim=(rnn_dim * 2), hidden_size=rnn_dim, dropout=dropout, batch_first=False))
self.classifier = nn.Sequential(nn.Linear((rnn_dim * 2), target_dim))
def forward(self, x):
x = self.cnn_layers(x)
x = self.rescnn_layers(x)
x = self.maxpooling(x)
sizes = x.size()
x = x.view(sizes[0], (sizes[1] * sizes[2]), sizes[3])
x = x.transpose(1, 2)
x = self.fully_connected(x)
x = self.bilstm(x)
x = self.classifier(x)
if isinstance(self.n_class, tuple):
x = x.view(sizes[0], sizes[3], self.n_class[0], self.n_class[1])
return x |
def _maybe_add_keypoints(obj: Dict[(str, Any)], ann_dict: Dict[(str, Any)]) -> None:
if ('keypoints' not in ann_dict):
return
keypts = ann_dict['keypoints']
for (idx, v) in enumerate(keypts):
if ((idx % 3) != 2):
keypts[idx] = (v + 0.5)
obj['keypoints'] = keypts |
class Template(list):
def parse(cls, body):
tpl = Template()
start = 0
for (s, e) in findMatchingBraces(body, 3):
tpl.append(TemplateText(body[start:s]))
tpl.append(TemplateArg(body[(s + 3):(e - 3)]))
start = e
tpl.append(TemplateText(body[start:]))
return tpl
def subst(self, params, extractor, depth=0):
if (depth > extractor.maxParameterRecursionLevels):
extractor.recursion_exceeded_3_errs += 1
return ''
return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
def __str__(self):
return ''.join([text_type(x) for x in self]) |
class AFNBasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dilation=(1, 1)):
super(AFNBasicBlock, self).__init__()
self.cnn = nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), padding=(1, 1), dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.re = nn.ReLU(inplace=True)
def forward(self, x):
return self.re(self.bn(self.cnn(x))) |
def make_cuda_ext(name, module, sources):
define_macros = []
if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')):
define_macros += [('WITH_CUDA', None)]
else:
raise EnvironmentError('CUDA is required to compile MMDetection!')
return CUDAExtension(name='{}.{}'.format(module, name), sources=[os.path.join(*module.split('.'), p) for p in sources], define_macros=define_macros, extra_compile_args={'cxx': [], 'nvcc': ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']}) |
def require_faiss(test_case):
return unittest.skipUnless(is_faiss_available(), 'test requires `faiss`')(test_case) |
def parse_fetches(fetches, prog=None, extra_keys=None):
(keys, values) = ([], [])
cls = []
for (k, v) in fetches.items():
if hasattr(v, 'name'):
keys.append(k)
values.append(v.name)
else:
cls.append(v)
if ((prog is not None) and (extra_keys is not None)):
for k in extra_keys:
try:
v = fluid.framework._get_var(k, prog)
keys.append(k)
values.append(v.name)
except Exception:
pass
return (keys, values, cls) |
class Wav2Vec2PhonemeCTCTokenizerOutput(ModelOutput):
text: Union[(List[str], str)]
char_offsets: Union[(List[ListOfDict], ListOfDict)] = None |
_materialize('torch')
class TorchReduceSum(ReduceBase):
in_dtypes = [(i,) for i in DTYPE_GEN_NON_BOOL]
out_dtypes = [(i,) for i in DTYPE_GEN_NON_BOOL if (i not in [DType.int8, DType.uint8, DType.int16, DType.int32])]
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
output = super().type_transfer(input_shapes)
if (input_shapes[0].dtype in DTYPE_GEN_INTS):
output[0].dtype = DType.int64
return output |
def _cast(value, dtype):
if isinstance(value, torch.Tensor):
is_eligible = (value.is_floating_point() and value.is_xpu and (value.dtype is not torch.float64))
return (value.to(dtype) if is_eligible else value)
elif isinstance(value, (str, bytes)):
return value
elif (HAS_NUMPY and isinstance(value, np.ndarray)):
return value
elif isinstance(value, collections.abc.Mapping):
return {_cast(k, dtype): _cast(v, dtype) for (k, v) in value.items()}
elif isinstance(value, collections.abc.Iterable):
iterable = (_cast(v, dtype) for v in value)
if isinstance(value, (list, tuple)):
return type(value)(iterable)
else:
return iterable
else:
return value |
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Stereo image compression network evaluation.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--dataset', type=str, required=True, help='sequences directory')
parser.add_argument('--data-name', type=str, required=True, help='sequences directory')
parser.add_argument('--output', type=str, help='output directory')
parser.add_argument('-im', '--IFrameModel', default='LDMIC', help='Model architecture (default: %(default)s)')
parser.add_argument('-iq', '--IFrame_quality', type=int, default=4, help='Model quality')
parser.add_argument('--i_model_path', type=str, help='Path to a checkpoint')
parser.add_argument('--crop', action='store_true', help='use crop')
parser.add_argument('--cuda', action='store_true', help='use cuda')
parser.add_argument('--half', action='store_true', help='use AMP')
parser.add_argument('--entropy-estimation', action='store_true', help='use evaluated entropy estimation (no entropy coding)')
parser.add_argument('-c', '--entropy-coder', choices=compressai.available_entropy_coders(), default=compressai.available_entropy_coders()[0], help='entropy coder (default: %(default)s)')
parser.add_argument('--keep_binaries', action='store_true', help='keep bitstream files in output directory')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('--metric', type=str, default='mse', help='metric: mse, ms-ssim')
parser.add_argument('--cpu_num', type=int, default=4)
return parser |
def analyze_slackprop_dist():
possib_slackprop_vals = np.linspace(0.05, 0.95, num=12, endpoint=True).tolist()
slackprop_lists = {}
for sp in possib_slackprop_vals:
slackprop_lists[np.around(sp, decimals=3)] = []
for model in tqdm.tqdm(model_names):
file_name = ('hp_grid3_' + model)
fh = open(file_name, 'r')
grid_data = []
for line in fh:
parsed = line.split(',')
parsed[1] = float(parsed[1])
parsed[5] = float(parsed[5])
parsed[6] = float(parsed[6])
parsed[7] = float(parsed[7])
parsed[8] = float(parsed[8])
parsed[9] = float(parsed[9])
grid_data.append(parsed[1:])
fh.close()
slackprop_index = 4
for grid_line in grid_data:
epoch_diff = grid_line[5]
slackprop_lists[np.around(grid_line[slackprop_index], decimals=3)].append(epoch_diff)
outputline1 = 'slackprop,'
outputline2 = 'MeanEpochDifference,'
outputline3 = 'STDofEpochDifference,'
for sp in possib_slackprop_vals:
outputline1 += (str(sp) + ',')
np_list = np.array(slackprop_lists[np.around(sp, decimals=3)])
sp_std = np.std(np_list)
sp_mean = np.mean(np_list)
outputline2 += (str(sp_mean) + ',')
outputline3 += (str(sp_std) + ',')
outputline1 = outputline1[:(- 1)]
outputline2 = outputline2[:(- 1)]
outputline3 = outputline3[:(- 1)]
print(outputline1)
print(outputline2)
print(outputline3) |
def register_model(model_name):
def decorator(f):
MODEL_REGISTRY[model_name] = f
return f
return decorator |
def load_demo(url_params, request: gr.Request):
logger.info(f'load_demo. ip: {request.client.host}. params: {url_params}')
selected = 0
if ('arena' in url_params):
selected = 1
elif ('compare' in url_params):
selected = 2
single_updates = load_demo_single(models, url_params)
side_by_side_anony_updates = load_demo_side_by_side_anony(models, url_params)
side_by_side_named_updates = load_demo_side_by_side_named(models, url_params)
return ((((gr.Tabs.update(selected=selected),) + single_updates) + side_by_side_anony_updates) + side_by_side_named_updates) |
def my_magphase(spec: torch.Tensor, dim: int=(- 2)) -> Tuple[(torch.Tensor, torch.Tensor)]:
mag_val = mag(spec, dim=dim)
phase = angle(spec, dim=dim)
return (mag_val, phase) |
_module()
class CrowdDet(TwoStageDetector):
def __init__(self, backbone: ConfigType, rpn_head: ConfigType, roi_head: ConfigType, train_cfg: ConfigType, test_cfg: ConfigType, neck: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, data_preprocessor=data_preprocessor) |
class TestTFQuantization(unittest.TestCase):
def setUpClass(self):
self.model = TFAutoModelForSequenceClassification.from_pretrained('hf-internal-testing/tiny-random-DistilBertForSequenceClassification')
raw_datasets = load_dataset('glue', 'sst2')['validation']
tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-DistilBertForSequenceClassification')
non_label_column_names = [name for name in raw_datasets.column_names if (name != 'label')]
def preprocess_function(examples):
args = (examples['sentence'],)
result = tokenizer(*args, padding=True, max_length=64, truncation=True)
return result
raw_datasets = raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=False)
data_collator = DefaultDataCollator(return_tensors='tf')
dataset = raw_datasets.select(range(10))
self.dummy_dataset = dataset.to_tf_dataset(columns=[col for col in dataset.column_names if (col not in set((non_label_column_names + ['label'])))], shuffle=False, batch_size=2, collate_fn=data_collator, drop_remainder=False, label_cols=(['labels'] if ('label' in dataset.column_names) else None))
def tearDownClass(self):
shutil.rmtree('./tmp', ignore_errors=True)
shutil.rmtree('./quantized_model', ignore_errors=True)
def test_tf_model_quant(self):
parser = HfArgumentParser(TFTrainingArguments)
args = parser.parse_args_into_dataclasses(args=['--output_dir', './quantized_model', '--per_device_eval_batch_size', '2'])
metric = load_metric('glue', 'sst2')
def compute_metrics(preds, label_ids):
preds = preds['logits']
preds = np.argmax(preds, axis=1)
result = metric.compute(predictions=preds, references=label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
self.optimizer = TFOptimization(model=self.model, args=args[0], compute_metrics=compute_metrics)
tune_metric = metrics.Metric(name='accuracy', greater_is_better=True, is_relative=False, criterion=0.5)
quantization_config = QuantizationConfig(framework='tensorflow', approach='POSTTRAININGSTATIC', metrics=[tune_metric], objectives=[objectives.performance])
quantized_model = self.optimizer.quantize(quant_config=quantization_config, train_dataset=self.dummy_dataset, eval_dataset=self.dummy_dataset)
loaded_model = tf.saved_model.load(args[0].output_dir)
def eval_func(model):
return 1
def train_func(model):
return model
self.optimizer.quantize(quant_config=quantization_config, train_func=train_func, eval_func=eval_func)
quantization_config = QuantizationConfig(framework='tensorflow', approach='POSTTRAININGSTATIC', metrics=[tune_metric], objectives=[objectives.performance], recipes={'first_conv_or_matmul_quantization': True, 'last_conv_or_matmul_quantization': True})
self.optimizer.quantize(quant_config=quantization_config, train_func=train_func, eval_func=eval_func) |
def main(_):
n_input = 1
n_output = 10
n_train = (FLAGS.iter * FLAGS.batch_size)
n_val = 5000
n_test = 10000
n_steps = (28 * 28)
n_classes = 10
x = tf.placeholder('float', [None, n_steps, n_input])
y = tf.placeholder('int64', [None])
if (FLAGS.model == 'lstm'):
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.hidden_size, state_is_tuple=True, forget_bias=1)
(hidden_out, _) = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
elif (FLAGS.model == 'eunn'):
cell = EUNNCell(FLAGS.hidden_size, FLAGS.capacity, FLAGS.fft, FLAGS.complex)
if complex:
initial_state_re = tf.get_variable('init_state_re', shape=[FLAGS.hidden_size])
initial_state_im = tf.get_variable('init_state_im', shape=[FLAGS.hidden_size])
initial_state = tf.complex(initial_state_re, initial_state_im)
(hidden_out_comp, _) = tf.nn.dynamic_rnn(cell, x, dtype=tf.complex64)
hidden_out = tf.real(hidden_out_comp)
else:
initial_state = tf.get_variable('init_state', shape=[FLAGS.hidden_size])
(hidden_out, _) = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32, initial_state=initial_state)
V_init_val = (np.sqrt(6.0) / np.sqrt((n_output + n_input)))
V_weights = tf.get_variable('V_weights', shape=[FLAGS.hidden_size, n_classes], dtype=tf.float32, initializer=tf.random_uniform_initializer((- V_init_val), V_init_val))
V_bias = tf.get_variable('V_bias', shape=[n_classes], dtype=tf.float32, initializer=tf.constant_initializer(0.01))
hidden_out_list = tf.unstack(hidden_out, axis=1)
temp_out = tf.matmul(hidden_out_list[(- 1)], V_weights)
output_data = tf.nn.bias_add(temp_out, V_bias)
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output_data, labels=y))
correct_pred = tf.equal(tf.argmax(output_data, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0001, decay=0.9).minimize(cost)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.log_device_placement = False
config.allow_soft_placement = False
with tf.Session(config=config) as sess:
ind = list(range(784))
shuffle(ind)
mnist = input_data.read_data_sets('/tmp/data/', one_hot=False)
sess.run(init)
step = 0
while (step < FLAGS.iter):
(batch_x, batch_y) = mnist_data(mnist, FLAGS.batch_size, ind, 'train')
(loss, acc) = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y})
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
print((((((' Iter: ' + str(step)) + ', Minibatch Loss= ') + '{:.6f}'.format(loss)) + ', Training Accuracy= ') + '{:.5f}'.format(acc)))
if ((step % 500) == 499):
(val_x, val_y) = mnist_data(mnist, n_val, ind, 'validation')
val_index = 0
val_acc_list = []
val_loss_list = []
for i in range(50):
batch_x = val_x[val_index:(val_index + 100)]
batch_y = val_y[val_index:(val_index + 100)]
val_index += 100
val_acc_list.append(sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}))
val_loss_list.append(sess.run(cost, feed_dict={x: batch_x, y: batch_y}))
val_acc = np.mean(val_acc_list)
val_loss = np.mean(val_loss_list)
print(((((('Iter ' + str(step)) + ', Validation Loss= ') + '{:.6f}'.format(val_loss)) + ', Validation Accuracy= ') + '{:.5f}'.format(val_acc)))
step += 1
print('Optimization Finished!')
(test_x, test_y) = mnist_data(mnist, n_test, ind, 'test')
test_index = 0
test_acc_list = []
test_loss_list = []
for i in range(100):
batch_x = test_x[test_index:(test_index + 100)]
batch_y = test_y[test_index:(test_index + 100)]
test_index += 100
test_acc_list.append(sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}))
test_loss_list.append(sess.run(cost, feed_dict={x: batch_x, y: batch_y}))
test_acc = np.mean(test_acc_list)
test_loss = np.mean(test_loss_list)
print(((('Test result: Loss= ' + '{:.6f}'.format(test_loss)) + ', Accuracy= ') + '{:.5f}'.format(test_acc))) |
def convert_stack(stack, include_func_start_lineno=False):
def _tuple_generator():
for frame in stack:
filename = frame.filename
lineno = frame.lineno
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame.globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
(yield (filename, lineno, frame.name, line, frame.func_start_lineno))
else:
(yield (filename, lineno, frame.name, line))
return tuple(_tuple_generator()) |
(frozen=True)
class PLNaive(SystemConfig):
model: NeuSModelConfig = field(default=NeuSModelConfig(renderer=NeuSRendererConfig(shadow_hint=False, specular_hint=False))) |
def scheduler(task_list, gpu_status, queue):
for i in range(len(task_list)):
while True:
for (gpu, status) in gpu_status.items():
if (status == 'free'):
device = gpu
gpu_status[device] = 'busy'
if command_args.ablation:
p = torch.multiprocessing.Process(target=run_and_plot, args=(device, i, queue, command_args.lr, command_args.logevery, command_args.plotevery, gpu_status, arg_List[i]))
else:
p = torch.multiprocessing.Process(target=run_and_plot, args=(device, i, queue, command_args.lr, command_args.logevery, command_args.plotevery, gpu_status))
p.daemon = True
p.start()
processes.append(p)
break
else:
time.sleep(1)
continue
break
time.sleep(0.2)
exit(0) |
def diaresnet110_svhn(num_classes=10, **kwargs):
return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name='diaresnet110_svhn', **kwargs) |
def read_info_file(data_dir, info_file):
labels = []
with open(os.path.join(data_dir, info_file), 'r') as f:
labels = [int(line.split()[0]) for line in f]
return torch.LongTensor(labels) |
class ConstantLengthDataset(IterableDataset):
def __init__(self, tokenizer, dataset, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6):
self.tokenizer = tokenizer
self.concat_token_id = tokenizer.bos_token_id
self.dataset = dataset
self.seq_length = seq_length
self.input_characters = ((seq_length * chars_per_token) * num_of_sequences)
self.epoch = 0
self.infinite = infinite
def __iter__(self):
iterator = iter(self.dataset)
more_examples = True
while more_examples:
(buffer, buffer_len) = ([], 0)
while True:
if (buffer_len >= self.input_characters):
break
try:
buffer.append(next(iterator)['content'])
buffer_len += len(buffer[(- 1)])
except StopIteration:
if self.infinite:
iterator = iter(self.dataset)
self.epoch += 1
logger.info(f'Dataset epoch: {self.epoch}')
else:
more_examples = False
break
tokenized_inputs = self.tokenizer(buffer, truncation=False)['input_ids']
all_token_ids = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend((tokenized_input + [self.concat_token_id]))
for i in range(0, len(all_token_ids), self.seq_length):
input_ids = all_token_ids[i:(i + self.seq_length)]
if (len(input_ids) == self.seq_length):
(yield torch.tensor(input_ids)) |
def create_model(args):
model = networks.create(args.arch)
model.cuda()
optim = None
if args.resume:
global best_mae, best_mse, start_epoch, optim_dict
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
best_mae = checkpoint['mae']
best_mse = checkpoint['mse']
start_epoch = checkpoint['epoch']
optim = checkpoint['optim']
return (model, optim) |
def train_func(model, agent, args, dllogger, global_step, train_examples, num_train_optimization_steps, n_gpu, device, optimizer):
model = agent.model.model
if (args.cache_dir is None):
cached_train_features_file = (args.train_file + '_{0}_{1}_{2}_{3}'.format(list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length)))
else:
cached_train_features_file = (((args.cache_dir.strip('/') + '/') + args.train_file.split('/')[(- 1)]) + '_{0}_{1}_{2}_{3}'.format(list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length)))
train_features = None
try:
with open(cached_train_features_file, 'rb') as reader:
train_features = restricted_loads(reader)
except:
train_features = convert_examples_to_features(examples=train_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True)
if ((not args.skip_cache) and is_main_process()):
dllogger.log(step='PARAMETER', data={'Cached_train features_file': cached_train_features_file})
with open(cached_train_features_file, 'wb') as writer:
pickle.dump(train_features, writer)
dllogger.log(step='PARAMETER', data={'train_start': True})
dllogger.log(step='PARAMETER', data={'training_samples': len(train_examples)})
dllogger.log(step='PARAMETER', data={'training_features': len(train_features)})
dllogger.log(step='PARAMETER', data={'train_batch_size': args.train_batch_size})
dllogger.log(step='PARAMETER', data={'steps': num_train_optimization_steps})
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=(args.train_batch_size * n_gpu))
args.train_features = train_features
model.train()
gradClipper = GradientClipper(max_grad_norm=1.0)
final_loss = None
train_start = time.time()
agent.pre_epoch_begin()
for epoch in range(int(args.num_train_epochs)):
train_iter = (tqdm(train_dataloader, desc='Iteration', disable=args.disable_progress_bar) if is_main_process() else train_dataloader)
agent.on_epoch_begin(epoch)
for (step, batch) in enumerate(train_iter):
agent.on_batch_begin(step)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
break
if (n_gpu == 1):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, start_positions, end_positions) = batch
(start_logits, end_logits) = model(input_ids, segment_ids, input_mask)
if (len(start_positions.size()) > 1):
start_positions = start_positions.squeeze((- 1))
if (len(end_positions.size()) > 1):
end_positions = end_positions.squeeze((- 1))
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
loss = ((start_loss + end_loss) / 2)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
gradClipper.step(amp.master_params(optimizer))
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
scheduler.step()
optimizer.step()
agent.on_post_grad()
optimizer.zero_grad()
global_step += 1
final_loss = loss.item()
if ((step % args.log_freq) == 0):
dllogger.log(step=(epoch, global_step), data={'step_loss': final_loss, 'learning_rate': optimizer.param_groups[0]['lr']})
agent.on_batch_end()
agent.on_epoch_end()
args.time_to_train = (time.time() - train_start)
args.final_loss = final_loss |
def clean_index(index, n_items, names, param_name, attribute_name):
if isinstance(index, str):
if (names is None):
msg = f'{param_name} cannot be used to index by name since {attribute_name} has been removed.'
_log.error(msg)
raise ValueError(msg)
try:
index = names.index(index)
except:
msg = f'{attribute_name} does not contain "{index}".'
_log.error(msg)
raise ValueError(msg)
else:
if isinstance(index, int):
pass
elif isinstance(index, float):
if index.is_integer():
index = int(index)
else:
msg = f'{param_name} is {index}, which is not an integer.'
_log.error(msg)
raise ValueError(msg)
else:
msg = f'{param_name} must be an integer index or string name.'
_log.error(msg)
raise ValueError(msg)
if ((index < 0) or (n_items <= index)):
msg = f'{param_name} index {index} out of bounds.'
_log.error(msg)
raise ValueError(msg)
return index |
class AdapterController(nn.Module):
def __init__(self, config):
super().__init__()
self.low_rank_adapters = config.low_rank_adapters
self.intrinsic_projections_path = os.path.join(config.output_dir, 'intrinsic_projections')
self.config = config
self.adapters = nn.ModuleDict(dict())
if (type(config.tasks[0]) is list):
self.tasks = config.tasks[0]
else:
self.tasks = config.tasks
self.device = config.device
self.shared_phm_rule = config.shared_phm_rule
self.hypercomplex_adapters = config.hypercomplex_adapters
self.adapters = self.construct_adapters(self.tasks)
self.add_layer_norm_before_adapter = config.add_layer_norm_before_adapter
self.add_layer_norm_after_adapter = config.add_layer_norm_after_adapter
if self.add_layer_norm_before_adapter:
self.pre_layer_norm = nn.LayerNorm(config.input_dim)
if self.add_layer_norm_after_adapter:
self.post_layer_norm = nn.LayerNorm(config.input_dim)
def get_task(self, task):
return task
def construct_adapters(self, tasks):
for task in tasks:
if self.hypercomplex_adapters:
self.adapters[task] = HyperComplexAdapter(self.config)
elif self.low_rank_adapters:
self.adapters[task] = LowRankAdapter(self.config)
else:
self.adapters[task] = Adapter(self.config)
return self.adapters
def disable_adapters(self, tasks):
tasks = self.convert_to_list(tasks)
for task in tasks:
adapter = self.get_adapter(task)
for param in adapter.parameters():
param.requires_grad = False
def convert_to_list(self, tasks):
if isinstance(tasks, list):
return tasks
return [tasks]
def enable_adapters(self, tasks):
tasks = self.convert_to_list(tasks)
for task in tasks:
adapter = self.get_adapter(task)
for (name, param) in adapter.named_parameters():
if (self.config.hypercomplex_adapters and (not self.config.learn_phm)):
if (not ('phm_rule' in name)):
param.requires_grad = True
else:
param.requires_grad = True
def get_adapter(self, task):
task = self.config.adapters_cur_training_task
return self.adapters[task]
def forward(self, inputs, task):
task = self.get_task(task)
self.enable_adapters(task)
other_tasks = [x for x in self.tasks if (x != task)]
self.disable_adapters(other_tasks)
adapter = self.get_adapter(task)
z = (self.pre_layer_norm(inputs) if self.add_layer_norm_before_adapter else inputs)
outputs = adapter(z)
if self.add_layer_norm_after_adapter:
outputs = self.post_layer_norm(outputs)
outputs = (outputs + inputs)
return outputs |
class MNIST_IND(datasets.MNIST):
def __getitem__(self, index):
(img, target) = super(MNIST_IND, self).__getitem__(index)
return (img, target, index) |
def test(st, end):
model.eval()
torch.cuda.empty_cache()
with torch.no_grad():
output = model(test_data, st, end)
loss_test = F.nll_loss(output, test_labels[st:end])
acc_test = accuracy(output, test_labels[st:end], batch=True)
return (loss_test.item(), acc_test.item()) |
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab))
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end)
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
return res |
def conv3x3(in_planes: int, out_planes: int, stride: int=1, groups: int=1, dilation: int=1) -> nn.Conv2d:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, padding_mode=PADDING_MODE, groups=groups, bias=False, dilation=dilation) |
class SequenceDataset(Dataset):
def __init__(self, data):
self.data = data
self.utt_ids = list(self.data.keys())
def __getitem__(self, ind):
utt_id = self.utt_ids[ind]
ret = self.data[utt_id].transpose()
return ret
def __len__(self):
return len(self.utt_ids) |
_model
def tresnet_xl(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['tresnet_xl']
model = TResNet(layers=[4, 5, 24, 3], num_classes=num_classes, in_chans=in_chans, width_factor=1.3, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def antialias_construct_topology_hash(tri):
assert isinstance(tri, torch.Tensor)
return _get_plugin().antialias_construct_topology_hash(tri) |
.parametrize('penalty', list(all_penalties.values()))
def test_grad(penalty) -> None:
y = torch.randn(512, dtype=torch.double, requires_grad=True)
= torch.randn(512, dtype=torch.double).abs_().clamp_min_(0.001)
= torch.randn(512, dtype=torch.double).abs_().clamp_min_(1e-06)
.requires_grad_(True)
.requires_grad_(True)
assert gradcheck(penalty, inputs=(y, , )) |
def helper_halut(N: int=128, D: int=64, M: int=16, C: int=16, a: float=1.0, b: float=0.0, K: int=16, quantize_lut: bool=False, run_optimized: bool=True) -> None:
print('=====TEST=====')
print(f'params: ({N}, {D}, {M}), C: {C}, a: {a}, b: {b}, quantize_lut: {quantize_lut}, run_optimized: {run_optimized}, K: {K}')
A = ((np.random.random((N, D)) + b) * a)
B = ((np.random.random((D, M)) + b) * a)
store_array = hm.learn_halut_offline(A, B, C=C, K=K, quantize_lut=quantize_lut, run_optimized=run_optimized)
new_halut = hm.HalutMatmul()
new_halut.from_numpy(store_array)
print(new_halut.stats())
A_2 = ((np.random.random(((N // 4), D)) + b) * a)
res_halut = new_halut.matmul_online(A_2)
res_numpy = np.matmul(A_2, B)
error_hist_numpy(res_halut, res_numpy)
check_if_error_normal_dist_around_zero(res_halut, res_numpy)
time_halut = ((timeit.Timer(functools.partial(new_halut.matmul_online, *[A_2])).timeit(5) * 1000) / 5)
time_numpy = ((timeit.Timer(functools.partial(np.matmul, *[A_2, B])).timeit(5) * 1000) / 5)
print(('time calculation numpy/halutmatmul fp: %.2f / %.2f ms' % (time_numpy, time_halut)))
mse = np.square((res_halut - res_numpy)).mean()
mae = np.abs((res_halut - res_numpy)).mean()
print(('mse: %.4f / mae: %.4f' % (mse, mae))) |
def build_emission_nodes(node_tree: bpy.types.NodeTree, color: Tuple[(float, float, float)]=(0.0, 0.0, 0.0), strength: float=1.0) -> None:
output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
emission_node = node_tree.nodes.new(type='ShaderNodeEmission')
emission_node.inputs['Color'].default_value = (color + (1.0,))
emission_node.inputs['Strength'].default_value = strength
node_tree.links.new(emission_node.outputs['Emission'], output_node.inputs['Surface'])
arrange_nodes(node_tree) |
def rotation_matrix(angle, axis):
direction = np.zeros(3, dtype=float)
if (axis == 'x'):
direction[0] = 1
elif (axis == 'y'):
direction[1] = 1
elif (axis == 'z'):
direction[2] = 1
else:
raise ValueError('Invalid axis.')
direction = np.asarray(direction, dtype=float)
sin_angle = math.sin(angle)
cos_angle = math.cos(angle)
rot = np.diag([cos_angle, cos_angle, cos_angle])
rot += (np.outer(direction, direction) * (1.0 - cos_angle))
direction *= sin_angle
rot += np.array([[0, (- direction[2]), direction[1]], [direction[2], 0, (- direction[0])], [(- direction[1]), direction[0], 0]])
return rot |
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) |
def test_2(**init_kwargs):
zpy.init(**init_kwargs)
dataset_config = zpy.DatasetConfig('can_v7')
dataset_config.set('run\\.padding_style', 'messy')
print(dataset_config.config)
previews = zpy.preview(dataset_config)
urls = [preview['url'] for preview in previews]
print(json.dumps(previews, indent=4, sort_keys=True))
print(json.dumps(urls, indent=4, sort_keys=True)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.