code stringlengths 101 5.91M |
|---|
_metaclass(abc.ABCMeta)
class InferenceTask(tf.train.SessionRunHook, Configurable):
def __init__(self, params):
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.INFER)
self._predictions = None
def begin(self):
self._predictions = graph_utils.get_dict_from_collection('predictions')
def default_params():
raise NotImplementedError() |
def resnet_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-05, batch_norm_scale=True):
batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS}
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc |
def pll_maximum(yHat_2d, y_2d):
optimal_tau = ((yHat_2d - y_2d) ** (- 2.0))
return pll(np.array([yHat_2d]), np.array([y_2d]), 1, optimal_tau) |
.parametrize('custom_prior', [True, False])
.parametrize('vectorized', [True, False])
.parametrize('pass_dict', [True, False])
def test_sampler_prior(custom_prior, vectorized, pass_dict):
if custom_prior:
if pass_dict:
def prior(x):
return dict(a=x[(..., 0)], b=x[(..., 1)])
else:
def prior(x):
return x
else:
prior = Prior()
prior.add_parameter('a')
prior.add_parameter('b')
def likelihood(x):
if isinstance(x, dict):
x = np.squeeze(np.column_stack([x['a'], x['b']]))
return ((- np.linalg.norm((x - 0.5), axis=(- 1))) * 0.001)
sampler = Sampler(prior, likelihood, n_dim=2, n_networks=1, vectorized=vectorized, pass_dict=pass_dict, n_live=500)
sampler.run(f_live=0.45, n_eff=0, verbose=False)
(points, log_w, log_l) = sampler.posterior(return_as_dict=pass_dict)
if (custom_prior and pass_dict):
with pytest.raises(ValueError):
(points, log_w, log_l) = sampler.posterior(return_as_dict=False) |
def _split_a3ms(output_dir):
for fname in os.listdir(output_dir):
if (not (os.path.splitext(fname)[(- 1)] == '.a3m')):
continue
fpath = os.path.join(output_dir, fname)
with open(fpath, 'r') as fp:
a3ms = fp.read()
a3ms = a3ms.split('\x00')[:(- 1)]
for a3m in a3ms:
name = a3m.split('\n', 1)[0][1:]
prot_dir = os.path.join(output_dir, name)
Path(prot_dir).mkdir(parents=True, exist_ok=True)
with open(os.path.join(prot_dir, fname), 'w') as fp:
fp.write(a3m)
os.remove(fpath)
os.remove((fpath + '.dbtype'))
os.remove((fpath + '.index')) |
def create_demo(model):
gr.Markdown('### Image to 3D mesh')
gr.Markdown('Convert a single 2D image to a 3D mesh')
with gr.Row():
image = gr.Image(label='Input Image', type='pil')
result = gr.Model3D(label='3d mesh reconstruction', clear_color=[1.0, 1.0, 1.0, 1.0])
checkbox = gr.Checkbox(label='Keep occlusion edges', value=False)
submit = gr.Button('Submit')
submit.click(partial(get_mesh, model), inputs=[image, checkbox], outputs=[result]) |
class XLNetForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=(- 100), sequence_a_segment_id=0, mask_padding_with_zero=True):
label_map = {label: i for (i, label) in enumerate(label_list)}
word_to_id = json.load(open('word_to_id.json', 'r'))
word_id_pad = word_to_id['***PADDING***']
features = []
for (ex_index, example) in enumerate(examples):
tokens = []
label_ids = []
label_ids_ctc = []
label_ids_seg = []
word_freq_ids = []
for (word, label) in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
if (word not in word_to_id):
word_id = word_to_id['UNK']
else:
word_id = word_to_id[word]
if (len(word_tokens) > 0):
tokens.extend(word_tokens)
label_ids.extend(([label_map[label[0]]] + ([pad_token_label_id] * (len(word_tokens) - 1))))
label_ids_ctc.extend(([label_map[label[1]]] + ([pad_token_label_id] * (len(word_tokens) - 1))))
label_ids_seg.extend(([label_map[label[2]]] + ([pad_token_label_id] * (len(word_tokens) - 1))))
word_freq_ids.append(word_id)
special_tokens_count = tokenizer.num_special_tokens_to_add()
if (len(tokens) > (max_seq_length - special_tokens_count)):
tokens = tokens[:(max_seq_length - special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
label_ids_ctc = label_ids_ctc[:(max_seq_length - special_tokens_count)]
label_ids_seg = label_ids_seg[:(max_seq_length - special_tokens_count)]
word_freq_ids = word_freq_ids[:(max_seq_length - special_tokens_count)]
tokens += [sep_token]
word_freq_ids += [word_id_pad]
label_ids += [pad_token_label_id]
label_ids_ctc += [pad_token_label_id]
label_ids_seg += [pad_token_label_id]
if sep_token_extra:
tokens += [sep_token]
word_freq_ids += [word_id_pad]
label_ids += [pad_token_label_id]
label_ids_ctc += [pad_token_label_id]
label_ids_seg += [pad_token_label_id]
segment_ids = ([sequence_a_segment_id] * len(tokens))
if cls_token_at_end:
tokens += [cls_token]
word_freq_ids += [word_id_pad]
label_ids += [pad_token_label_id]
label_ids_ctc += [pad_token_label_id]
label_ids_seg += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = ([cls_token] + tokens)
word_freq_ids = ([word_id_pad] + word_freq_ids)
label_ids = ([pad_token_label_id] + label_ids)
label_ids_ctc = ([pad_token_label_id] + label_ids_ctc)
label_ids_seg = ([pad_token_label_id] + label_ids_seg)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
word_freq_ids = (([word_id_pad] * padding_length) + word_freq_ids)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
label_ids = (([pad_token_label_id] * padding_length) + label_ids)
label_ids_ctc = (([pad_token_label_id] * padding_length) + label_ids_ctc)
label_ids_seg = (([pad_token_label_id] * padding_length) + label_ids_seg)
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([(0 if mask_padding_with_zero else 1)] * padding_length)
word_freq_ids += ([word_id_pad] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([pad_token_label_id] * padding_length)
label_ids_ctc += ([pad_token_label_id] * padding_length)
label_ids_seg += ([pad_token_label_id] * padding_length)
while (len(word_freq_ids) != max_seq_length):
word_freq_ids.append(word_id_pad)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(word_freq_ids) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
assert (len(label_ids_ctc) == max_seq_length)
assert (len(label_ids_seg) == max_seq_length)
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, freq_ids=word_freq_ids, segment_ids=segment_ids, label_ids=label_ids, label_ids_ctc=label_ids_ctc, label_ids_seg=label_ids_seg))
return features |
class Casiab_sub(BaseVideoDataset):
dataset_dir = 'CASIA_pro'
def __init__(self, root='data', min_seq_len=8, verbose=True, **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'train_candi')
self.gallery_dir = osp.join(self.dataset_dir, 'gallery_candi')
self.query_dir = osp.join(self.dataset_dir, 'query_candi')
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True, min_seq_len=min_seq_len)
query = self._process_dir(self.query_dir, relabel=False, min_seq_len=min_seq_len)
gallery = self._process_dir(self.gallery_dir, relabel=False, min_seq_len=min_seq_len)
if verbose:
print('=> Casiab loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, _, self.num_train_cams) = self.get_videodata_info(self.train)
(self.num_query_pids, _, self.num_query_cams) = self.get_videodata_info(self.query)
(self.num_gallery_pids, _, self.num_gallery_cams) = self.get_videodata_info(self.gallery)
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
def _get_names(self, fpath):
names = []
with open(fpath, 'r') as f:
for line in f:
new_line = line.rstrip()
names.append(new_line)
return names
def _process_dir(self, dir_path, relabel=False, min_seq_len=8):
pid_paths = glob.glob(osp.join(dir_path, '*'))
tracklets = []
for pid_path in pid_paths:
pid = int(osp.basename(pid_path))
camid_paths = glob.glob(osp.join(pid_path, 'nm*'))
for camid_path in camid_paths:
camid = int(osp.basename(camid_path)[4])
target_view_dir = osp.join(camid_path, '090')
sub_tracklet_dirs = glob.glob(osp.join(target_view_dir, 'sub*'))
for sub_sub_tracklet_dir in sub_tracklet_dirs:
image_paths = glob.glob(osp.join(sub_sub_tracklet_dir, '*'))
if (len(image_paths) >= min_seq_len):
img_paths = tuple(image_paths)
tracklets.append((img_paths, pid, camid))
return tracklets |
def tensorflow_lite_inference(path, model_name, inputs, inputs_astype):
filePath = os.path.join(path, model_name)
interpreter = tf.compat.v1.lite.Interpreter(filePath)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
inputs = inputs.astype(inputs_astype)
interpreter.set_tensor(input_details[0]['index'], inputs)
print('\nStart tensorflowLite inference')
t0 = time.time()
interpreter.invoke()
t1 = time.time()
inferenceTime = round((t1 - t0), 3)
print('\nTensorflowLite inference time:{0} seconds.'.format(inferenceTime))
outputs = interpreter.get_tensor(output_details[0]['index'])
return (outputs, inferenceTime) |
def test_A():
num_v = 20
num_e = 50
import random
for _ in range(3):
g = Graph(num_v)
A = torch.zeros((num_v, num_v))
for _ in range(num_e):
s = random.randrange(num_v)
d = random.randrange(num_v)
g.add_edges((s, d))
A[(s, d)] = 1
A[(d, s)] = 1
assert torch.all((g.A.to_dense() == A))
for _ in range(3):
g = Graph(num_v)
A = torch.zeros((num_v, num_v))
for _ in range(num_e):
s = random.randrange(num_v)
d = random.randrange(num_v)
g.add_edges((s, d), merge_op='sum')
if (s == d):
A[(s, d)] += 1
else:
A[(s, d)] += 1
A[(d, s)] += 1
assert torch.all((g.A.to_dense() == A)) |
_model
def dm_nfnet_f2(pretrained=False, **kwargs):
return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) |
class LeastSquaresNormalize(intnormb.LocationScaleCLIMixin, intnormb.DirectoryNormalizeCLI):
def __init__(self, *, norm_value: float=1.0, **kwargs: typing.Any):
super().__init__(norm_value=norm_value, **kwargs)
self.tissue_memberships: list[mioi.Image] = []
self.standard_tissue_means: (npt.NDArray | None) = None
def calculate_location(self, image: intnormt.ImageLike, /, mask: (intnormt.ImageLike | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1) -> float:
return 0.0
def calculate_scale(self, image: intnormt.ImageLike, /, mask: (intnormt.ImageLike | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1) -> float:
tissue_membership: intnormt.ImageLike
if (modality == intnormt.Modality.T1):
tissue_membership = intnormtm.find_tissue_memberships(image, mask)
self.tissue_memberships.append(tissue_membership)
elif (mask is not None):
tissue_membership = self._fix_tissue_membership(image, mask)
else:
msg = "If 'modality' != 't1', you must provide a "
msg += 'tissue membership array in the mask argument.'
raise ValueError(msg)
tissue_means = self.tissue_means(image, tissue_membership)
sf = self.scaling_factor(tissue_means)
return sf
def _fit(self, images: collections.abc.Sequence[intnormt.ImageLike], /, masks: (collections.abc.Sequence[intnormt.ImageLike] | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1, **kwargs: typing.Any) -> None:
image = images[0]
mask = (masks[0] if (masks is not None) else None)
tissue_membership: intnormt.ImageLike
if ((not isinstance(mask, np.ndarray)) and (mask is not None)):
raise ValueError('Mask must be either none or be like a numpy array.')
if (modality == intnormt.Modality.T1):
tissue_membership = intnormtm.find_tissue_memberships(image, mask)
elif (mask is not None):
logger.debug("Assuming 'masks' contains tissue memberships.")
tissue_membership = self._fix_tissue_membership(image, mask)
else:
msg = "If 'modality' != 't1', you must provide a "
msg += 'tissue membership array in the mask argument.'
raise ValueError(msg)
csf_mean = np.average(image, weights=tissue_membership[(..., 0)])
norm_image: intnormt.ImageLike = ((image / csf_mean) * self.norm_value)
self.standard_tissue_means = self.tissue_means(norm_image, tissue_membership)
def _fix_tissue_membership(self, image: intnormt.ImageLike, tissue_membership: S) -> S:
image_ndim = int(image.ndim)
tm_ndim = int(tissue_membership.ndim)
if ((tissue_membership.shape[:image_ndim] != image.shape) and (tm_ndim == 4)):
tissue_membership = tissue_membership.transpose(3, 0, 1, 2)
if (tissue_membership.shape[:image_ndim] != image.shape):
msg = 'If masks provided, need to have same spatial shape as image.'
raise intnorme.NormalizationError(msg)
return tissue_membership
def tissue_means(image: intnormt.ImageLike, /, tissue_membership: intnormt.ImageLike) -> npt.NDArray:
n_tissues = tissue_membership.shape[(- 1)]
weighted_avgs = [np.average(image, weights=tissue_membership[(..., i)]) for i in range(n_tissues)]
return np.asarray([weighted_avgs]).T
def scaling_factor(self, tissue_means: npt.NDArray) -> float:
numerator = (tissue_means.T tissue_means)
denominator = (tissue_means.T self.standard_tissue_means)
sf: float = (numerator / denominator).item()
return sf
def name() -> str:
return 'lsq'
def fullname() -> str:
return 'Least Squares'
def description() -> str:
desc = 'Minimize distance between tissue means (CSF/GM/WM) in a '
desc += 'least squares-sense within a set of MR images.'
return desc
def save_additional_info(self, args: argparse.Namespace, **kwargs: typing.Any) -> None:
normed = kwargs['normalized']
image_fns = kwargs['image_filenames']
if (not self.tissue_memberships):
logger.debug("'tissue_memberships' empty. Skipping saving.")
return
if (len(self.tissue_memberships) != len(image_fns)):
msg = f"'tissue_memberships' ({len(self.tissue_memberships)}) "
msg += f"and 'image_filenames' ({len(image_fns)}) "
msg += 'must be in correspondence.'
raise RuntimeError(msg)
if (len(self.tissue_memberships) != len(normed)):
msg = f"'tissue_memberships' ({len(self.tissue_memberships)}) "
msg += f"and 'normalized' ({len(normed)}) "
msg += 'must be in correspondence.'
raise RuntimeError(msg)
for (memberships, norm, fn) in zip(self.tissue_memberships, normed, image_fns):
if hasattr(norm, 'affine'):
tissue_memberships: mioi.Image = mioi.Image(memberships, norm.affine)
elif hasattr(memberships, 'affine'):
tissue_memberships = mioi.Image(memberships, memberships.affine)
else:
tissue_memberships = mioi.Image(memberships, None)
(base, name, ext) = intnormio.split_filename(fn)
new_name = ((name + '_tissue_memberships') + ext)
if (args.output_dir is None):
output = (base / new_name)
else:
output = (pathlib.Path(args.output_dir) / new_name)
tissue_memberships.to_filename(output)
del self.tissue_memberships
if (args.save_standard_tissue_means is not None):
self.save_standard_tissue_means(args.save_standard_tissue_means)
def save_standard_tissue_means(self, filename: intnormt.PathLike, /) -> None:
if (self.standard_tissue_means is None):
msg = 'Fit required before saving standard tissue means.'
raise intnorme.NormalizationError(msg)
np.save(filename, self.standard_tissue_means)
def load_standard_tissue_means(self, filename: intnormt.PathLike, /) -> None:
data = np.load(filename)
self.standard_tissue_means = data
def from_argparse_args(cls, args: argparse.Namespace, /) -> LeastSquaresNormalize:
out = cls(norm_value=args.norm_value)
return out
def call_from_argparse_args(self, args: argparse.Namespace, /, **kwargs: typing.Any) -> None:
if (args.load_standard_tissue_means is not None):
self.load_standard_tissue_means(args.load_standard_tissue_means)
self.fit = (lambda *args, **kwargs: None)
args.modality = intnormt.Modality.from_string(args.modality)
use_masks = True
if (args.mask_dir is not None):
if (args.modality != intnormt.Modality.T1):
msg = f"If brain masks provided, 'modality' must be 't1'. Got '{args.modality}'."
raise ValueError(msg)
elif (args.tissue_membership_dir is not None):
use_masks = False
args.mask_dir = args.tissue_membership_dir
super().call_from_argparse_args(args, use_masks_in_plot=use_masks)
def get_parent_parser(cls, desc: str, valid_modalities: frozenset[str]=intnorm.VALID_MODALITIES, **kwargs: typing.Any) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('image_dir', type=intnormt.dir_path(), help='Path of directory containing images to normalize.')
parser.add_argument('-o', '--output-dir', type=intnormt.dir_path(), default=None, help='Path of directory in which to save normalized images.')
parser.add_argument('-mo', '--modality', type=str, default='t1', choices=intnorm.VALID_MODALITIES, help='Modality of the images.')
parser.add_argument('-n', '--norm-value', type=intnormt.positive_float(), default=1.0, help='Reference value for normalization.')
parser.add_argument('-e', '--extension', type=str, default='nii*', help='Extension of images (must be nibabel readable).')
parser.add_argument('-p', '--plot-histogram', action='store_true', help='Plot the histogram of the normalized image.')
parser.add_argument('-v', '--verbosity', action='count', default=0, help='Increase output verbosity (e.g., -vv is more than -v).')
parser.add_argument('--version', action='store_true', help='Print the version of intensity-normalization.')
return parser
def add_method_specific_arguments(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = parent_parser.add_argument_group('method-specific arguments')
parser.add_argument('-sstm', '--save-standard-tissue-means', default=None, type=intnormt.save_file_path(), help='Save the standard tissue means fit by the method.')
parser.add_argument('-lstm', '--load-standard-tissue-means', default=None, type=intnormt.file_path(), help='Load a standard tissue means previously fit by the method.')
exclusive = parent_parser.add_argument_group('mutually exclusive optional arguments')
group = exclusive.add_mutually_exclusive_group(required=False)
group.add_argument('-m', '--mask-dir', type=intnormt.dir_path(), default=None, help='Path to a foreground mask for the image. Provide this if not providing a tissue mask (if image is not skull-stripped).')
group.add_argument('-tm', '--tissue-membership-dir', type=intnormt.dir_path(), help='Path to a mask of a tissue memberships. Provide this if not providing the foreground mask.')
return parent_parser |
def main():
(cfg, config_file) = parse_args()
cfg.freeze()
logger.info('{}'.format(cfg))
logger.info('check mode - {}'.format(cfg.mode.mode))
if (not os.path.exists(cfg.train.log_directory)):
os.mkdir(cfg.train.log_directory)
if (not os.path.exists(os.path.join(cfg.train.log_directory, cfg.train.model_name))):
os.mkdir(os.path.join(cfg.train.log_directory, cfg.train.model_name))
os.mkdir(os.path.join(cfg.train.log_directory, cfg.train.model_name, 'summaries'))
else:
logger.warning('This logging directory already exists: {}. Over-writing current files'.format(os.path.join(cfg.train.log_directory, cfg.train.model_name)))
train_engine = engine.Engine(args=cfg, logger=logger)
if (cfg.mode.mode == 'train'):
train_engine.train()
if (cfg.mode.mode == 'test'):
train_engine.online_test() |
()
def nearest_upsampling(input_layer, kernel, stride, edges=PAD_SAME, name=PROVIDED):
assert (len(input_layer.shape) == 4), 'input rank must be 4'
kernel = _kernel(kernel)
stride = _stride(stride)
input_height = input_layer.shape[1]
input_width = input_layer.shape[2]
depth = input_layer.shape[3]
filter_height = kernel[0]
filter_width = kernel[1]
row_stride = stride[1]
col_stride = stride[2]
(out_rows, out_cols) = get2d_deconv_output_size(input_height, input_width, filter_height, filter_width, row_stride, col_stride, edges)
output_shape_3d = [input_layer.shape[0], out_rows, out_cols, depth, 1]
kernel_3d = (kernel + [1])
stride_3d = (stride + [1])
filter_mask = tf.ones(shape=(kernel_3d + [1, 1]), dtype=input_layer.dtype)
output_tensor = tf.nn.conv3d_transpose(value=tf.expand_dims(input_layer, axis=(- 1)), filter=filter_mask, output_shape=output_shape_3d, strides=stride_3d, padding=edges, name=name)
output_tensor = tf.squeeze(output_tensor, axis=4)
if ((filter_height != row_stride) or (filter_width != col_stride)):
output_mask = _deconv_mask(tmf.get_shape(input_layer), output_shape_3d[:(- 1)], kernel, stride, edges, input_layer.dtype)
output_tensor = tf.multiply(output_tensor, output_mask)
return input_layer.with_tensor(output_tensor) |
class TanhPolar(nn.LayerBase):
def __init__(self, width, height, angular_offset_deg=270, **kwargs):
self.width = width
self.height = height
(warp_gridx, warp_gridy) = TanhPolar._get_tanh_polar_warp_grids(width, height, angular_offset_deg=angular_offset_deg)
(restore_gridx, restore_gridy) = TanhPolar._get_tanh_polar_restore_grids(width, height, angular_offset_deg=angular_offset_deg)
self.warp_gridx_t = tf.constant(warp_gridx[(None, ...)])
self.warp_gridy_t = tf.constant(warp_gridy[(None, ...)])
self.restore_gridx_t = tf.constant(restore_gridx[(None, ...)])
self.restore_gridy_t = tf.constant(restore_gridy[(None, ...)])
super().__init__(**kwargs)
def warp(self, inp_t):
batch_t = tf.shape(inp_t)[0]
warp_gridx_t = tf.tile(self.warp_gridx_t, (batch_t, 1, 1))
warp_gridy_t = tf.tile(self.warp_gridy_t, (batch_t, 1, 1))
if (nn.data_format == 'NCHW'):
inp_t = tf.transpose(inp_t, (0, 2, 3, 1))
out_t = nn.bilinear_sampler(inp_t, warp_gridx_t, warp_gridy_t)
if (nn.data_format == 'NCHW'):
out_t = tf.transpose(out_t, (0, 3, 1, 2))
return out_t
def restore(self, inp_t):
batch_t = tf.shape(inp_t)[0]
restore_gridx_t = tf.tile(self.restore_gridx_t, (batch_t, 1, 1))
restore_gridy_t = tf.tile(self.restore_gridy_t, (batch_t, 1, 1))
if (nn.data_format == 'NCHW'):
inp_t = tf.transpose(inp_t, (0, 2, 3, 1))
inp_t = tf.pad(inp_t, [(0, 0), (1, 1), (1, 0), (0, 0)], 'SYMMETRIC')
out_t = nn.bilinear_sampler(inp_t, restore_gridx_t, restore_gridy_t)
if (nn.data_format == 'NCHW'):
out_t = tf.transpose(out_t, (0, 3, 1, 2))
return out_t
def _get_tanh_polar_warp_grids(W, H, angular_offset_deg):
angular_offset_pi = ((angular_offset_deg * np.pi) / 180.0)
roi_center = np.array([(W // 2), (H // 2)], np.float32)
roi_radii = (np.array([W, H], np.float32) / (np.pi ** 0.5))
(cos_offset, sin_offset) = (np.cos(angular_offset_pi), np.sin(angular_offset_pi))
normalised_dest_indices = np.stack(np.meshgrid(np.arange(0.0, 1.0, (1.0 / W)), np.arange(0.0, (2.0 * np.pi), ((2.0 * np.pi) / H))), axis=(- 1))
radii = normalised_dest_indices[(..., 0)]
orientation_x = np.cos(normalised_dest_indices[(..., 1)])
orientation_y = np.sin(normalised_dest_indices[(..., 1)])
src_radii = (np.arctanh(radii) * ((roi_radii[0] * roi_radii[1]) / np.sqrt((((roi_radii[1] ** 2) * (orientation_x ** 2)) + ((roi_radii[0] ** 2) * (orientation_y ** 2))))))
src_x_indices = (src_radii * orientation_x)
src_y_indices = (src_radii * orientation_y)
(src_x_indices, src_y_indices) = (((roi_center[0] + (cos_offset * src_x_indices)) - (sin_offset * src_y_indices)), ((roi_center[1] + (cos_offset * src_y_indices)) + (sin_offset * src_x_indices)))
return (src_x_indices.astype(np.float32), src_y_indices.astype(np.float32))
def _get_tanh_polar_restore_grids(W, H, angular_offset_deg):
angular_offset_pi = ((angular_offset_deg * np.pi) / 180.0)
roi_center = np.array([(W // 2), (H // 2)], np.float32)
roi_radii = (np.array([W, H], np.float32) / (np.pi ** 0.5))
(cos_offset, sin_offset) = (np.cos(angular_offset_pi), np.sin(angular_offset_pi))
dest_indices = np.stack(np.meshgrid(np.arange(W), np.arange(H)), axis=(- 1)).astype(float)
normalised_dest_indices = np.matmul((dest_indices - roi_center), np.array([[cos_offset, (- sin_offset)], [sin_offset, cos_offset]]))
radii = np.linalg.norm(normalised_dest_indices, axis=(- 1))
normalised_dest_indices[(..., 0)] /= np.clip(radii, 1e-09, None)
normalised_dest_indices[(..., 1)] /= np.clip(radii, 1e-09, None)
radii *= ((np.sqrt((((roi_radii[1] ** 2) * (normalised_dest_indices[(..., 0)] ** 2)) + ((roi_radii[0] ** 2) * (normalised_dest_indices[(..., 1)] ** 2)))) / roi_radii[0]) / roi_radii[1])
src_radii = np.tanh(radii)
src_x_indices = ((src_radii * W) + 1.0)
src_y_indices = (np.mod((((np.arctan2(normalised_dest_indices[(..., 1)], normalised_dest_indices[(..., 0)]) / 2.0) / np.pi) * H), H) + 1.0)
return (src_x_indices.astype(np.float32), src_y_indices.astype(np.float32)) |
class BaseDetector(metaclass=ABCMeta):
default_cfg_acorr: dict[(str, Union[(str, float)])] = {'method_derivative': 'sobel', 'sigma_d': 1.0, 'truncation_d': 3.0, 'method_weighting': 'gaussian', 'sigma_w': 1.0, 'truncation_w': 3.0}
def __init__(self, cfg, cfg_acorr, cfg_matching, disable_grads=True):
check_cfg(self.default_cfg, cfg)
check_cfg(self.default_cfg_acorr, cfg_acorr)
check_cfg(self.default_cfg_matching, cfg_matching)
cfg = {**self.default_cfg, **cfg}
cfg.update({'cfg_acorr': {**self.default_cfg_acorr, **cfg_acorr}})
self._init(cfg)
self.match_fun = MNN({**self.default_cfg_matching, **cfg_matching})
if disable_grads:
torch.set_grad_enabled(False)
def __call__(self, im):
return self.run(im)
def match_descriptors(self, desc1, desc2, return_sim=False):
return self.match_fun(desc1, desc2, return_sim)
def run(self, im):
pass
def _init(self, cfg):
pass
def default_cfg(self):
pass
def default_cfg_matching(self):
pass |
def _unflatten(dico):
new_dico = OrderedDict()
for (full_k, v) in dico.items():
full_k = full_k.split('.')
node = new_dico
for k in full_k[:(- 1)]:
if (k.startswith('[') and k.endswith(']')):
k = int(k[1:(- 1)])
if (k not in node):
node[k] = OrderedDict()
node = node[k]
node[full_k[(- 1)]] = v
return new_dico |
class QuantizableInceptionD(inception_module.InceptionD):
def __init__(self, *args, **kwargs):
super(QuantizableInceptionD, self).__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs)
self.myop = nn.quantized.FloatFunctional()
def forward(self, x):
outputs = self._forward(x)
return self.myop.cat(outputs, 1) |
def ppo_loss(A, rho, eps=0.2):
return (- torch.min((rho * A), (rho.clamp((1 - eps), (1 + eps)) * A))) |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if (loss_scaler is not None):
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
model.save_checkpoint(save_dir=args.output_dir, tag=('checkpoint-%s' % epoch_name), client_state=client_state) |
def plot_arrow_2D(generalized_pose, length=1.0, width=0.5, fc='r', ec='k'):
(x, y, theta) = (generalized_pose.x, generalized_pose.y, generalized_pose.theta)
plt.arrow(x, y, (length * np.cos(theta)), (length * np.sin(theta)), fc=fc, ec=ec, head_width=width, head_length=width) |
def _get_word_ngrams(n, sentences):
assert (len(sentences) > 0)
assert (n > 0)
words = sum(sentences, [])
return _get_ngrams(n, words) |
def main():
parser = get_parser()
args = parser.parse_args()
if (len(args.results) != (args.num_spkrs ** 2)):
parser.print_help()
sys.exit(1)
results = {}
for r in six.moves.range(1, (args.num_spkrs + 1)):
for h in six.moves.range(1, (args.num_spkrs + 1)):
idx = ((((r - 1) * args.num_spkrs) + h) - 1)
key = 'r{}h{}'.format(r, h)
result = get_results(args.results[idx], key)
results[key] = result
results = merge_results(results)
new_results = get_utt_permutation(results, args.num_spkrs)
pat = re.compile('\\d+')
score = np.zeros((len(new_results.keys()), 4))
for (idx, key) in enumerate(new_results.keys()):
tmp_score = list(map(int, pat.findall(new_results[key]['Scores'])))
score[idx] = tmp_score
return (score, new_results) |
def create_reverse_dependency_map():
modules = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / 'src/transformers').glob('**/*.py')]
direct_deps = {m: get_module_dependencies(m) for m in modules}
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / 'tests').glob('**/*.py')]
direct_deps.update({t: get_test_dependencies(t) for t in tests})
all_files = (modules + tests)
something_changed = True
while something_changed:
something_changed = False
for m in all_files:
for d in direct_deps[m]:
for dep in direct_deps[d]:
if (dep not in direct_deps[m]):
direct_deps[m].append(dep)
something_changed = True
reverse_map = collections.defaultdict(list)
for m in all_files:
if m.endswith('__init__.py'):
reverse_map[m].extend(direct_deps[m])
for d in direct_deps[m]:
reverse_map[d].append(m)
return reverse_map |
class ScheduledOptim():
def __init__(self, optimizer):
self._optimizer = optimizer
def step_lr(self):
self._optimizer.step()
def update_lr(self):
self._update_learning_rate()
def _update_learning_rate(self):
for param_group in self._optimizer.param_groups:
lr = (param_group['lr'] * 0.8)
param_group['lr'] = lr |
def load_data(train, test, session_key, item_key, time_key, pad_idx=0):
items2idx = {}
items2idx['<pad>'] = pad_idx
idx_cnt = 0
(train_data, idx_cnt) = _load_data(train, items2idx, idx_cnt, pad_idx, session_key, item_key, time_key)
print(len(items2idx.keys()))
(test_data, idx_cnt) = _load_data(test, items2idx, idx_cnt, pad_idx, session_key, item_key, time_key)
print(len(items2idx.keys()))
item_num = len(items2idx.keys())
return (train_data, test_data, items2idx, item_num) |
def Split_On_last_letter_Quote_Mark(input_word):
new_token = [input_word]
if (len(input_word) <= 1):
return new_token
class_func_name_rule = re.compile(Class_Func_Name)
class_func_words = class_func_name_rule.findall(input_word)
if (len(class_func_words) > 0):
return new_token
func_name_rule = re.compile(Func_Name)
func_words = func_name_rule.findall(input_word)
if (len(func_words) > 0):
return new_token
if ((input_word.count("'") == 1) and (input_word[(- 1)] == "'")):
input_word_updated = input_word[:(- 1)]
new_token = [input_word_updated, " '"]
if ((input_word.count('"') == 1) and (input_word[(- 1)] == '"')):
input_word_updated = input_word[:(- 1)]
new_token = [input_word_updated, ' "']
return new_token |
_end_docstrings(PIPELINE_INIT_ARGS)
class ObjectDetectionPipeline(Pipeline):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if (self.framework == 'tf'):
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self, 'vision')
self.check_model_type(MODEL_FOR_OBJECT_DETECTION_MAPPING)
def _sanitize_parameters(self, **kwargs):
postprocess_kwargs = {}
if ('threshold' in kwargs):
postprocess_kwargs['threshold'] = kwargs['threshold']
return ({}, {}, postprocess_kwargs)
def __call__(self, *args, **kwargs) -> Union[(Predictions, List[Prediction])]:
return super().__call__(*args, **kwargs)
def preprocess(self, image):
image = load_image(image)
target_size = torch.IntTensor([[image.height, image.width]])
inputs = self.feature_extractor(images=[image], return_tensors='pt')
inputs['target_size'] = target_size
return inputs
def _forward(self, model_inputs):
target_size = model_inputs.pop('target_size')
outputs = self.model(**model_inputs)
model_outputs = outputs.__class__({'target_size': target_size, **outputs})
return model_outputs
def postprocess(self, model_outputs, threshold=0.9):
target_size = model_outputs['target_size']
raw_annotations = self.feature_extractor.post_process(model_outputs, target_size)
raw_annotation = raw_annotations[0]
keep = (raw_annotation['scores'] > threshold)
scores = raw_annotation['scores'][keep]
labels = raw_annotation['labels'][keep]
boxes = raw_annotation['boxes'][keep]
raw_annotation['scores'] = scores.tolist()
raw_annotation['labels'] = [self.model.config.id2label[label.item()] for label in labels]
raw_annotation['boxes'] = [self._get_bounding_box(box) for box in boxes]
keys = ['score', 'label', 'box']
annotation = [dict(zip(keys, vals)) for vals in zip(raw_annotation['scores'], raw_annotation['labels'], raw_annotation['boxes'])]
return annotation
def _get_bounding_box(self, box: 'torch.Tensor') -> Dict[(str, int)]:
if (self.framework != 'pt'):
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
(xmin, ymin, xmax, ymax) = box.int().tolist()
bbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
return bbox |
def _cuda_s2_mm(x, y):
import s2cnn.utils.cuda as cuda_utils
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
assert (y.size(0) == x.size(0))
nl = round((x.size(0) ** 0.5))
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
assert (x.size(0) == (nl ** 2))
assert (y.size(0) == (nl ** 2))
device = torch.cuda.current_device()
cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks(((nspec * nbatch) * nfeature_out), 1024), 1, 1), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return output |
def sgd_optimizer_fromparams(params, lr, momentum, weight_decay):
optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
return optimizer |
class SpaceAdaptiveReceiver(sc.Receiver):
def receive_notify(self, solver, message):
if ((sc.Signal.TRAIN_PIPE_END in message.keys()) and ((solver.global_step % 1000) == 0)):
sc.logger.info('space adaptive sampling...')
results = solver.infer_step({'data_evaluate': ['x', 't', 'sdf', 'AllenCahn_u']})
residual_data = results['data_evaluate']['AllenCahn_u'].detach().cpu().numpy().ravel()
index = np.argsort(((- 1.0) * np.abs(residual_data)))[:200]
_points = {key: values[index].detach().cpu().numpy() for (key, values) in results['data_evaluate'].items()}
_points.pop('AllenCahn_u')
_points['area'] = (np.zeros_like(_points['sdf']) + (1.0 / 200))
solver.set_domain_parameter('re_sampling_domain', {'points': _points}) |
def _pad_1x1_to_3x3_tensor(kernel1x1, padding_11=1):
if (kernel1x1 is None):
return 0
else:
return torch.nn.functional.pad(kernel1x1, ([padding_11] * 4)) |
class AnomalyRotation():
def __init__(self, max_aug, aug_type):
self.max_aug = max_aug
self.aug_type = aug_type
if (aug_type == 'r'):
self.target_augs = np.random.choice(range((- 5), (5 + 1)), 2, replace=False)
elif (aug_type == 's'):
self.target_augs = (0.95, 1.05)
elif (aug_type == 'b'):
self.target_augs = (0.95, 1.05)
def __call__(self, x):
if (self.aug_type == 'r'):
angle = (((np.random.random() - 0.5) * 2) * self.max_aug)
while (np.round(angle) in self.target_augs):
angle = (((np.random.random() - 0.5) * 2) * self.max_aug)
return TF.rotate(x, angle)
elif (self.aug_type == 's'):
sv = ((((np.random.random() - 0.5) * 2) * self.max_aug) + 1)
while (self.target_augs[0] < sv < self.target_augs[1]):
sv = ((((np.random.random() - 0.5) * 2) * self.max_aug) + 1)
return TF.affine(x, scale=sv, angle=0, translate=[0, 0], shear=0)
elif (self.aug_type == 'b'):
bv = ((((np.random.random() - 0.5) * 2) * self.max_aug) + 1)
while (self.target_augs[0] < bv < self.target_augs[1]):
bv = ((((np.random.random() - 0.5) * 2) * self.max_aug) + 1)
return TF.adjust_brightness(x, bv) |
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1):
super(ModulatedDeformConvPack, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias)
out_channels = (((self.deformable_groups * 3) * self.kernel_size[0]) * self.kernel_size[1])
self.conv_offset_mask = nn.Conv2d(self.in_channels, out_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, bias=True)
self.conv_offset_mask.lr_mult = lr_mult
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return ModulatedDeformConvFunction.apply(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups, self.im2col_step) |
class MPNetForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def world_size():
if dist.is_initialized():
return dist.get_world_size()
else:
return 1 |
_REGISTRY.register()
def build_p37_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
bottom_up = dla34(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
assert (cfg.MODEL.BIFPN.NUM_LEVELS == 5)
backbone = BiFPN(cfg=cfg, bottom_up=bottom_up, in_features=in_features, out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS, norm=cfg.MODEL.BIFPN.NORM, num_levels=cfg.MODEL.BIFPN.NUM_LEVELS, num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN, separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV)
return backbone |
def gen_lemma_rule(form, lemma, allow_copy=False):
form = form.lower()
previous_case = (- 1)
lemma_casing = ''
for (i, c) in enumerate(lemma):
case = ('' if (c.lower() != c) else '')
if (case != previous_case):
lemma_casing += '{}{}{}'.format(('' if lemma_casing else ''), case, (i if (i <= (len(lemma) // 2)) else (i - len(lemma))))
previous_case = case
lemma = lemma.lower()
(best, best_form, best_lemma) = (0, 0, 0)
for l in range(len(lemma)):
for f in range(len(form)):
cpl = 0
while (((f + cpl) < len(form)) and ((l + cpl) < len(lemma)) and (form[(f + cpl)] == lemma[(l + cpl)])):
cpl += 1
if (cpl > best):
best = cpl
best_form = f
best_lemma = l
rule = (lemma_casing + ';')
if (not best):
rule += ('a' + lemma)
else:
rule += 'd{}{}'.format(min_edit_script(form[:best_form], lemma[:best_lemma], allow_copy), min_edit_script(form[(best_form + best):], lemma[(best_lemma + best):], allow_copy))
return rule |
class AudioAddSilenceTransformer():
def __init__(self, startDurationSeconds: float, endDurationSeconds: float):
self.startDurationSeconds = startDurationSeconds
self.endDurationSeconds = endDurationSeconds
def transform(self, audio: Audio):
silenceAudioFront = self.generateSilence(self.startDurationSeconds, audio.samplingRate)
silenceAudioBack = self.generateSilence(self.endDurationSeconds, audio.samplingRate)
newAudio = ((silenceAudioFront + audio) + silenceAudioBack)
return newAudio
def generateSilence(self, duration: float, samplingRate: int):
silenceDataPoints = int((duration * samplingRate))
silence = np.zeros(silenceDataPoints)
silenceAudio = Audio(silence, samplingRate, 's', 's')
return silenceAudio |
def get_psp_resnet50_ade(pretrained=False, root='~/.encoding/models', **kwargs):
return get_psp('ade20k', 'resnet50', pretrained, root=root, **kwargs) |
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
if (args.num_gpus > 1):
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if (os.name == 'nt'):
init_method = ('file:///' + init_file.replace('\\', '/'))
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
sync_device = (torch.device('cuda', rank) if (args.num_gpus > 1) else None)
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if (rank != 0):
custom_ops.verbosity = 'none'
training_loop.training_loop(rank=rank, **args) |
class TwoCropsTransform():
def __init__(self, base_transform1, base_transform2):
self.base_transform1 = base_transform1
self.base_transform2 = base_transform2
def __call__(self, x):
im1 = self.base_transform1(x)
im2 = self.base_transform2(x)
return [im1, im2] |
def predicative(adjective):
w = adjective.lower()
if (len(w) > 3):
for suffix in ('em', 'en', 'er', 'es', 'e'):
if w.endswith(suffix):
b = w[:max((- len(suffix)), (- (len(w) - 3)))]
if b.endswith('bl'):
b = (b[:(- 1)] + 'el')
if b.endswith('pr'):
b = (b[:(- 1)] + 'er')
return b
return w |
def BasicConv3d(in_channels, out_channels, kernel_size, stride, pad, dilation=1):
return nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False), nn.BatchNorm3d(out_channels), nn.LeakyReLU(inplace=True, negative_slope=0.2)) |
def cal_loss(pred, gold, smoothing):
gold = gold.contiguous().view((- 1))
pred = pred.contiguous().view((- 1), pred.size((- 1)))
if smoothing:
eps = 0.1
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view((- 1), 1), 1)
one_hot = ((one_hot * (1 - eps)) + (((1 - one_hot) * eps) / (n_class - 1)))
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(Constants_PAD)
loss = (- (one_hot * log_prb).sum(dim=1))
loss = loss.masked_select(non_pad_mask).sum()
else:
loss = F.cross_entropy(pred, gold, ignore_index=Constants_PAD, reduction='sum')
return loss |
class ASPP_Bottleneck(nn.Module):
def __init__(self, num_classes):
super(ASPP_Bottleneck, self).__init__()
self.conv_1x1_1 = nn.Conv2d((4 * 512), 256, kernel_size=1)
self.bn_conv_1x1_1 = nn.BatchNorm2d(256)
self.conv_3x3_1 = nn.Conv2d((4 * 512), 256, kernel_size=3, stride=1, padding=6, dilation=6)
self.bn_conv_3x3_1 = nn.BatchNorm2d(256)
self.conv_3x3_2 = nn.Conv2d((4 * 512), 256, kernel_size=3, stride=1, padding=12, dilation=12)
self.bn_conv_3x3_2 = nn.BatchNorm2d(256)
self.conv_3x3_3 = nn.Conv2d((4 * 512), 256, kernel_size=3, stride=1, padding=18, dilation=18)
self.bn_conv_3x3_3 = nn.BatchNorm2d(256)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_1x1_2 = nn.Conv2d((4 * 512), 256, kernel_size=1)
self.bn_conv_1x1_2 = nn.BatchNorm2d(256)
self.conv_1x1_3 = nn.Conv2d(1280, 256, kernel_size=1)
self.bn_conv_1x1_3 = nn.BatchNorm2d(256)
self.conv_1x1_4 = nn.Conv2d(256, num_classes, kernel_size=1)
def forward(self, feature_map):
feature_map_h = feature_map.size()[2]
feature_map_w = feature_map.size()[3]
out_1x1 = F.relu(self.bn_conv_1x1_1(self.conv_1x1_1(feature_map)))
out_3x3_1 = F.relu(self.bn_conv_3x3_1(self.conv_3x3_1(feature_map)))
out_3x3_2 = F.relu(self.bn_conv_3x3_2(self.conv_3x3_2(feature_map)))
out_3x3_3 = F.relu(self.bn_conv_3x3_3(self.conv_3x3_3(feature_map)))
out_img = self.avg_pool(feature_map)
out_img = F.relu(self.bn_conv_1x1_2(self.conv_1x1_2(out_img)))
out_img = F.interpolate(out_img, size=(feature_map_h, feature_map_w), mode='bilinear')
out = torch.cat([out_1x1, out_3x3_1, out_3x3_2, out_3x3_3, out_img], 1)
out = F.relu(self.bn_conv_1x1_3(self.conv_1x1_3(out)))
out = self.conv_1x1_4(out)
return out |
def evaluate_hull(x, hull):
if (x < hull[4][0]):
hux = ((hull[3][0] * (x - hull[1][0])) + hull[2][0])
indx = 0
else:
if (len(hull[5]) == 1):
indx = 1
else:
indx = 1
while ((indx < len(hull[4])) and (hull[4][indx] < x)):
indx = (indx + 1)
indx = (indx - 1)
hux = ((hull[3][indx] * (x - hull[1][indx])) + hull[2][indx])
neginf = numpy.finfo(numpy.dtype(numpy.float64)).min
if ((x < hull[1][0]) or (x > hull[1][(- 1)])):
hlx = neginf
elif (indx == 0):
hlx = ((((hull[1][1] - x) * hull[2][0]) + ((x - hull[1][0]) * hull[2][1])) / (hull[1][1] - hull[1][0]))
elif (indx == len(hull[4])):
hlx = ((((hull[1][(- 1)] - x) * hull[2][(- 2)]) + ((x - hull[1][(- 2)]) * hull[2][(- 1)])) / (hull[1][(- 1)] - hull[1][(- 2)]))
elif (x < hull[1][(indx + 1)]):
hlx = ((((hull[1][(indx + 1)] - x) * hull[2][indx]) + ((x - hull[1][indx]) * hull[2][(indx + 1)])) / (hull[1][(indx + 1)] - hull[1][indx]))
else:
hlx = ((((hull[1][(indx + 2)] - x) * hull[2][(indx + 1)]) + ((x - hull[1][(indx + 1)]) * hull[2][(indx + 2)])) / (hull[1][(indx + 2)] - hull[1][(indx + 1)]))
return (hux, hlx) |
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs, stochastic_policy, save=False, reuse=False):
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func('pi', ob_space, ac_space, reuse=reuse)
U.initialize()
U.load_state(load_model_path)
obs_list = []
acs_list = []
len_list = []
ret_list = []
for _ in tqdm(range(number_trajs)):
traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
(obs, acs, ep_len, ep_ret) = (traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret'])
obs_list.append(obs)
acs_list.append(acs)
len_list.append(ep_len)
ret_list.append(ep_ret)
if stochastic_policy:
print('stochastic policy:')
else:
print('deterministic policy:')
if save:
filename = ((load_model_path.split('/')[(- 1)] + '.') + env.spec.id)
np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list), lens=np.array(len_list), rets=np.array(ret_list))
avg_len = (sum(len_list) / len(len_list))
avg_ret = (sum(ret_list) / len(ret_list))
print('Average length:', avg_len)
print('Average return:', avg_ret)
return (avg_len, avg_ret) |
def convert_models(onnx_path: str, num_controlnet: int, output_path: str, fp16: bool=False, sd_xl: bool=False):
if sd_xl:
batch_size = 1
unet_in_channels = 4
unet_sample_size = 64
num_tokens = 77
text_hidden_size = 2048
img_size = 512
text_embeds_shape = ((2 * batch_size), 1280)
time_ids_shape = ((2 * batch_size), 6)
else:
batch_size = 1
unet_in_channels = 4
unet_sample_size = 64
num_tokens = 77
text_hidden_size = 768
img_size = 512
batch_size = 1
latents_shape = ((2 * batch_size), unet_in_channels, unet_sample_size, unet_sample_size)
embed_shape = ((2 * batch_size), num_tokens, text_hidden_size)
controlnet_conds_shape = (num_controlnet, (2 * batch_size), 3, img_size, img_size)
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
TRT_BUILDER = trt.Builder(TRT_LOGGER)
TRT_RUNTIME = trt.Runtime(TRT_LOGGER)
network = TRT_BUILDER.create_network((1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)))
onnx_parser = trt.OnnxParser(network, TRT_LOGGER)
parse_success = onnx_parser.parse_from_file(onnx_path)
for idx in range(onnx_parser.num_errors):
print(onnx_parser.get_error(idx))
if (not parse_success):
sys.exit('ONNX model parsing failed')
print('Load Onnx model done')
profile = TRT_BUILDER.create_optimization_profile()
profile.set_shape('sample', latents_shape, latents_shape, latents_shape)
profile.set_shape('encoder_hidden_states', embed_shape, embed_shape, embed_shape)
profile.set_shape('controlnet_conds', controlnet_conds_shape, controlnet_conds_shape, controlnet_conds_shape)
if sd_xl:
profile.set_shape('text_embeds', text_embeds_shape, text_embeds_shape, text_embeds_shape)
profile.set_shape('time_ids', time_ids_shape, time_ids_shape, time_ids_shape)
config = TRT_BUILDER.create_builder_config()
config.add_optimization_profile(profile)
config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True)
if fp16:
config.set_flag(trt.BuilderFlag.FP16)
plan = TRT_BUILDER.build_serialized_network(network, config)
if (plan is None):
sys.exit('Failed building engine')
print('Succeeded building engine')
engine = TRT_RUNTIME.deserialize_cuda_engine(plan)
with open(output_path, 'wb') as f:
f.write(engine.serialize()) |
.register('SGD')
def build_sgd(cfg, groups):
lr = cfg.OPTIMIZER.LR
weight_decay = cfg.OPTIMIZER.WEIGHT_DECAY.DECAY
momentum = cfg.OPTIMIZER.MOMENTUM
return optim.SGD(groups, lr=lr, momentum=momentum, weight_decay=weight_decay) |
class MultipleNegativesRankingLoss(nn.Module):
def __init__(self, sentence_embedder):
super(MultipleNegativesRankingLoss, self).__init__()
self.sentence_embedder = sentence_embedder
def forward(self, sentence_features: Iterable[Dict[(str, Tensor)]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
(reps_a, reps_b) = reps
return self.multiple_negatives_ranking_loss(reps_a, reps_b)
def multiple_negatives_ranking_loss(self, embeddings_a: Tensor, embeddings_b: Tensor):
scores = torch.matmul(embeddings_a, embeddings_b.t())
diagonal_mean = torch.mean(torch.diag(scores))
mean_log_row_sum_exp = torch.mean(torch.logsumexp(scores, dim=1))
return ((- diagonal_mean) + mean_log_row_sum_exp) |
class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
(name='save_mock')
def _save_mock(monkeypatch: MonkeyPatch) -> MagicMock:
save_mock = MagicMock()
monkeypatch.setattr(cache.dataframe_utils, 'save_df', save_mock)
return save_mock |
class DataSource():
def __init__(self, data, config, tokenizer, label_tokenizer):
self.dataset_path = config.dataset_path
self.max_uttr_len = config.max_uttr_len
self.history_len = config.history_len
self.label_tokenizer = label_tokenizer
self.tokenizer = tokenizer
self.pad_label_id = label_tokenizer.pad_token_id
self.bos_label_id = label_tokenizer.bos_token_id
self.eos_label_id = label_tokenizer.eos_token_id
self.statistics = {'n_sessions': 0, 'n_uttrs': 0, 'n_tokens': 0, 'n_segments': 0, 'n_fragments': 0}
sessions = data
for sess in sessions:
sess['processed_utterances'] = []
for uttr in sess['utterances']:
uttr_tokens = []
uttr_labels = []
for segment in uttr:
text = segment['text']
floor = segment['floor']
dialog_act = segment['segment_meta']['dialog_act']
tokens = self.tokenizer.convert_string_to_tokens(text)[:self.max_uttr_len]
uttr_tokens += tokens
uttr_labels += ((['I'] * (len(tokens) - 1)) + [('E_' + dialog_act)])
uttr_token_ids = self.tokenizer.convert_tokens_to_ids(uttr_tokens, bos_and_eos=True)
uttr_label_ids = (([self.bos_label_id] + [self.label_tokenizer.word2id[label] for label in uttr_labels]) + [self.eos_label_id])
uttr_floor_id = ['A', 'B'].index(floor)
sess['processed_utterances'].append({'token_ids': uttr_token_ids, 'label_ids': uttr_label_ids, 'floor_id': uttr_floor_id})
self._fragments = []
for sess in sessions:
uttrs = sess['processed_utterances']
for uttr_end_idx in range(0, len(uttrs)):
uttr_start_idx = max(0, ((uttr_end_idx - self.history_len) + 1))
fragment = {'utterances': uttrs[uttr_start_idx:(uttr_end_idx + 1)]}
self._fragments.append(fragment)
self.statistics['n_sessions'] = len(sessions)
self.statistics['n_fragments'] = len(self._fragments)
for sess in sessions:
self.statistics['n_uttrs'] += len(sess['utterances'])
for uttr in sess['utterances']:
self.statistics['n_segments'] += len(uttr)
for segment in uttr:
tokens = segment['text'].split(' ')
self.statistics['n_tokens'] += len(tokens)
def epoch_init(self, shuffle=False):
self.cur_fragment_idx = 0
if shuffle:
self.fragments = copy.deepcopy(self._fragments)
random.shuffle(self.fragments)
else:
self.fragments = self._fragments
def __len__(self):
return len(self._fragments)
def next(self, batch_size):
if (self.cur_fragment_idx == len(self.fragments)):
return None
(X, Y) = ([], [])
X_floor = []
empty_sent = ''
empty_tokens = self.tokenizer.convert_string_to_tokens(empty_sent)
empty_ids = self.tokenizer.convert_tokens_to_ids(empty_tokens, bos_and_eos=False)
padding_segment = {'tokens': empty_tokens, 'token_ids': empty_ids, 'floor_id': 0, 'label_ids': ([self.pad_label_id] * len(empty_ids))}
while (self.cur_fragment_idx < len(self.fragments)):
if (len(Y) == batch_size):
break
fragment = self.fragments[self.cur_fragment_idx]
segments = fragment['utterances']
self.cur_fragment_idx += 1
for segment in segments:
X.append(segment['token_ids'])
X_floor.append(segment['floor_id'])
segment = segments[(- 1)]
Y.append(segment['label_ids'])
for _ in range((self.history_len - len(segments))):
segment = padding_segment
X.append(segment['token_ids'])
X_floor.append(segment['floor_id'])
X = self.tokenizer.convert_batch_ids_to_tensor(X)
max_segment_len = X.size(1)
Y = [(y + ([self.pad_label_id] * (max_segment_len - len(y)))) for y in Y]
batch_size = len(Y)
history_len = (X.size(0) // batch_size)
X = torch.LongTensor(X).to(DEVICE).view(batch_size, history_len, (- 1))
X_floor = torch.LongTensor(X_floor).to(DEVICE).view(batch_size, history_len)
Y = torch.LongTensor(Y).to(DEVICE).view(batch_size, (- 1))
batch_data_dict = {'X': X, 'X_floor': X_floor, 'Y': Y}
return batch_data_dict |
class FeatureFused(nn.Module):
def __init__(self, inter_channels=48, norm_layer=nn.BatchNorm2d):
super(FeatureFused, self).__init__()
self.conv2 = nn.Sequential(nn.Conv2d(512, inter_channels, 1, bias=False), norm_layer(inter_channels), nn.ReLU(True))
self.conv3 = nn.Sequential(nn.Conv2d(1024, inter_channels, 1, bias=False), norm_layer(inter_channels), nn.ReLU(True))
def forward(self, c2, c3, c4):
size = c4.size()[2:]
c2 = self.conv2(F.interpolate(c2, size, mode='bilinear', align_corners=True))
c3 = self.conv3(F.interpolate(c3, size, mode='bilinear', align_corners=True))
fused_feature = torch.cat([c4, c3, c2], dim=1)
return fused_feature |
def test_python_vs_c_linacc_changingacc_xyz_accellsrframe_scalaromegaz_2d():
lp = potential.MiyamotoNagaiPotential(normalize=1.0, a=1.0, b=0.2)
dp = potential.DehnenBarPotential(omegab=1.8, rb=0.5, Af=0.03)
diskpot = (lp + dp)
x0 = [(lambda t: ((((- 0.03) * (t ** 2.0)) / 2.0) - (((0.03 * (t ** 3.0)) / 6.0) / 20.0))), (lambda t: (((0.04 * (t ** 2.0)) / 2.0) + (((0.08 * (t ** 3.0)) / 6.0) / 20.0))), (lambda t: 0.0)]
v0 = [(lambda t: (((- 0.03) * t) - (((0.03 * (t ** 2.0)) / 2.0) / 20.0))), (lambda t: ((0.04 * t) + (((0.08 * (t ** 2.0)) / 2.0) / 20.0))), (lambda t: 0.0)]
a0 = [(lambda t: ((- 0.03) - ((0.03 * t) / 20.0))), (lambda t: (0.04 + ((0.08 * t) / 20.0))), (lambda t: 0.0)]
omega = lp.omegac(1.0)
omegadot = 0.02
framepot = potential.NonInertialFrameForce(x0=x0, v0=v0, a0=a0, Omega=omega, Omegadot=omegadot)
def check_orbit(py_method='dop853', c_method='dop853_c', tol=1e-08):
o = Orbit().toPlanar()
o.turn_physical_off()
ts = numpy.linspace(0.0, 20.0, 1001)
o.integrate(ts, (diskpot + framepot), method=py_method)
op = o()
op.integrate(ts, (diskpot + framepot), method=c_method)
assert (numpy.amax(numpy.fabs((o.x(ts) - op.x(ts)))) < tol), f'Integrating an orbit in a rotating frame in Python does not agree with integrating the same orbit in C; using methods {py_method} and {c_method}'
assert (numpy.amax(numpy.fabs((o.y(ts) - op.y(ts)))) < tol), f'Integrating an orbit in a rotating frame in Python does not agree with integrating the same orbit in C; using methods {py_method} and {c_method}'
assert (numpy.amax(numpy.fabs((o.vx(ts) - op.vx(ts)))) < tol), f'Integrating an orbit in a rotating frame in Python does not agree with integrating the same orbit in C; using methods {py_method} and {c_method}'
assert (numpy.amax(numpy.fabs((o.vy(ts) - op.vy(ts)))) < tol), f'Integrating an orbit in a rotating frame in Python does not agree with integrating the same orbit in C; using methods {py_method} and {c_method}'
return None
check_orbit()
return None |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x |
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
if ((eps is not None) and sparse):
raise ValueError("Cannot set 'eps' when sparse=True")
(classes, class_idx) = np.unique(labels_true, return_inverse=True)
(clusters, cluster_idx) = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if (eps is not None):
contingency = (contingency + eps)
return contingency |
def visualize_stn():
with torch.no_grad():
data = next(iter(test_loader))[0].to(device)
input_tensor = data.cpu()
transformed_input_tensor = model.stn(data).cpu()
in_grid = convert_image_np(torchvision.utils.make_grid(input_tensor))
out_grid = convert_image_np(torchvision.utils.make_grid(transformed_input_tensor))
(f, axarr) = plt.subplots(1, 2)
axarr[0].imshow(in_grid)
axarr[0].set_title('Dataset Images')
axarr[1].imshow(out_grid)
axarr[1].set_title('Transformed Images') |
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if (t.dtype == tf.int64):
t = tf.cast(t, tf.int32)
example[name] = t
return example |
class SwinForMaskedImageModeling(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TerminalTablePrinter(object):
def __init__(self):
self.headers = None
self.tabulars = []
def print_tabular(self, new_tabular):
if (self.headers is None):
self.headers = [x[0] for x in new_tabular]
else:
assert (len(self.headers) == len(new_tabular))
self.tabulars.append([x[1] for x in new_tabular])
self.refresh()
def refresh(self):
import os
(rows, columns) = os.popen('stty size', 'r').read().split()
tabulars = self.tabulars[(- (int(rows) - 3)):]
sys.stdout.write('\x1b[2J\x1b[H')
sys.stdout.write(tabulate(tabulars, self.headers))
sys.stdout.write('\n') |
class RelationGenerator(nn.Module):
def __init__(self, vocabs, embed_dim, rel_size, dropout):
super(RelationGenerator, self).__init__()
self.vocabs = vocabs
self.transfer_head = nn.Linear(embed_dim, rel_size)
self.transfer_dep = nn.Linear(embed_dim, rel_size)
self.proj = nn.Linear((rel_size + 1), (vocabs['rel'].size * (rel_size + 1)))
self.dropout = dropout
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.transfer_head.weight, std=0.02)
nn.init.normal_(self.transfer_dep.weight, std=0.02)
nn.init.normal_(self.proj.weight, std=0.02)
nn.init.constant_(self.proj.bias, 0.0)
nn.init.constant_(self.transfer_head.bias, 0.0)
nn.init.constant_(self.transfer_dep.bias, 0.0)
def forward(self, outs, graph_state, target_rel=None, work=False):
def get_scores(dep, head):
head = torch.tanh(self.transfer_head(head))
dep = torch.tanh(self.transfer_dep(dep))
head = F.dropout(head, p=self.dropout, training=self.training)
dep = F.dropout(dep, p=self.dropout, training=self.training)
(dep_num, bsz, _) = dep.size()
head_num = head.size(0)
bias_dep = dep.new_ones((dep_num, bsz, 1))
bias_head = head.new_ones((head_num, bsz, 1))
dep = torch.cat([dep, bias_dep], 2)
head = torch.cat([head, bias_head], 2)
dep = self.proj(dep).view(dep_num, bsz, self.vocabs['rel'].size, (- 1)).transpose(0, 1).contiguous()
head = head.permute(1, 2, 0)
scores = torch.bmm(dep.view(bsz, (dep_num * self.vocabs['rel'].size), (- 1)), head).view(bsz, dep_num, self.vocabs['rel'].size, head_num)
return scores
scores = get_scores(outs, graph_state).permute(1, 0, 3, 2).contiguous()
(dep_num, bsz, _) = outs.size()
head_num = graph_state.size(0)
log_probs = F.log_softmax(scores, dim=(- 1))
(_, rel) = torch.max(log_probs, (- 1))
if work:
return log_probs
rel_mask = (torch.eq(target_rel, self.vocabs['rel'].token2idx(NIL)) + torch.eq(target_rel, self.vocabs['rel'].token2idx(PAD)))
rel_acc = torch.eq(rel, target_rel).float().masked_fill_(rel_mask, 0.0).sum().item()
rel_tot = (rel_mask.numel() - rel_mask.float().sum().item())
if (not self.training):
print(('rel acc %.3f' % (rel_acc / rel_tot)))
rel_loss = label_smoothed_nll_loss(log_probs.view((- 1), self.vocabs['rel'].size), target_rel.view((- 1)), 0.0).view(dep_num, bsz, head_num)
rel_loss = rel_loss.masked_fill_(rel_mask, 0.0).sum((0, 2))
return rel_loss |
def main():
args = parse_args()
if (args.seed is None):
args.seed = np.random.randint(1, 10000)
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
assert torch.cuda.is_available(), 'Please ensure codes are executed in cuda.'
device = 'cuda'
if (args.seed is not None):
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.cuda.manual_seed(args.seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(args.seed)
time_str = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
if (args.msg is None):
message = time_str
else:
message = ('-' + args.msg)
args.checkpoint = (((('checkpoints/' + args.model) + message) + '-') + str(args.seed))
if (not os.path.isdir(args.checkpoint)):
mkdir_p(args.checkpoint)
screen_logger = logging.getLogger('Model')
screen_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
file_handler = logging.FileHandler(os.path.join(args.checkpoint, 'out.txt'))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
screen_logger.addHandler(file_handler)
def printf(str):
screen_logger.info(str)
print(str)
printf(f'args: {args}')
printf('==> Building model..')
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
if (device == 'cuda'):
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
best_test_acc = 0.0
best_train_acc = 0.0
best_test_acc_avg = 0.0
best_train_acc_avg = 0.0
best_test_loss = float('inf')
best_train_loss = float('inf')
start_epoch = 0
optimizer_dict = None
if (not os.path.isfile(os.path.join(args.checkpoint, 'last_checkpoint.pth'))):
save_args(args)
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=('ModelNet' + args.model))
logger.set_names(['Epoch-Num', 'Learning-Rate', 'Train-Loss', 'Train-acc-B', 'Train-acc', 'Valid-Loss', 'Valid-acc-B', 'Valid-acc'])
else:
printf(f'Resuming last checkpoint from {args.checkpoint}')
checkpoint_path = os.path.join(args.checkpoint, 'last_checkpoint.pth')
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
best_test_acc = checkpoint['best_test_acc']
best_train_acc = checkpoint['best_train_acc']
best_test_acc_avg = checkpoint['best_test_acc_avg']
best_train_acc_avg = checkpoint['best_train_acc_avg']
best_test_loss = checkpoint['best_test_loss']
best_train_loss = checkpoint['best_train_loss']
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=('ModelNet' + args.model), resume=True)
optimizer_dict = checkpoint['optimizer']
printf('==> Preparing data..')
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=args.workers, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=args.workers, batch_size=(args.batch_size // 2), shuffle=False, drop_last=False)
optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
if (optimizer_dict is not None):
optimizer.load_state_dict(optimizer_dict)
scheduler = CosineAnnealingLR(optimizer, args.epoch, eta_min=args.min_lr, last_epoch=(start_epoch - 1))
for epoch in range(start_epoch, args.epoch):
printf(('Epoch(%d/%s) Learning Rate %s:' % ((epoch + 1), args.epoch, optimizer.param_groups[0]['lr'])))
train_out = train(net, train_loader, optimizer, criterion, device)
test_out = validate(net, test_loader, criterion, device)
scheduler.step()
if (test_out['acc'] > best_test_acc):
best_test_acc = test_out['acc']
is_best = True
else:
is_best = False
best_test_acc = (test_out['acc'] if (test_out['acc'] > best_test_acc) else best_test_acc)
best_train_acc = (train_out['acc'] if (train_out['acc'] > best_train_acc) else best_train_acc)
best_test_acc_avg = (test_out['acc_avg'] if (test_out['acc_avg'] > best_test_acc_avg) else best_test_acc_avg)
best_train_acc_avg = (train_out['acc_avg'] if (train_out['acc_avg'] > best_train_acc_avg) else best_train_acc_avg)
best_test_loss = (test_out['loss'] if (test_out['loss'] < best_test_loss) else best_test_loss)
best_train_loss = (train_out['loss'] if (train_out['loss'] < best_train_loss) else best_train_loss)
save_model(net, epoch, path=args.checkpoint, acc=test_out['acc'], is_best=is_best, best_test_acc=best_test_acc, best_train_acc=best_train_acc, best_test_acc_avg=best_test_acc_avg, best_train_acc_avg=best_train_acc_avg, best_test_loss=best_test_loss, best_train_loss=best_train_loss, optimizer=optimizer.state_dict())
logger.append([epoch, optimizer.param_groups[0]['lr'], train_out['loss'], train_out['acc_avg'], train_out['acc'], test_out['loss'], test_out['acc_avg'], test_out['acc']])
printf(f"Training loss:{train_out['loss']} acc_avg:{train_out['acc_avg']}% acc:{train_out['acc']}% time:{train_out['time']}s")
printf(f'''Testing loss:{test_out['loss']} acc_avg:{test_out['acc_avg']}% acc:{test_out['acc']}% time:{test_out['time']}s [best test acc: {best_test_acc}%]
''')
logger.close()
printf((((f'' * 2) + 'Final results') + ('' * 2)))
printf(f"++ Last Train time: {train_out['time']} | Last Test time: {test_out['time']} ++")
printf(f'++ Best Train loss: {best_train_loss} | Best Test loss: {best_test_loss} ++')
printf(f'++ Best Train acc_B: {best_train_acc_avg} | Best Test acc_B: {best_test_acc_avg} ++')
printf(f'++ Best Train acc: {best_train_acc} | Best Test acc: {best_test_acc} ++')
printf((f'' * 5)) |
_model
def dla60_res2next(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla60_res2next']
model = DLA(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=8, base_width=4, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class TensorFlowEmitter(object):
def __init__(self, tab=None):
self.tab = (tab or (' ' * 4))
self.prefix = ''
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:(- len(self.tab))]
def statement(self, s):
return ((self.prefix + s) + '\n')
def emit_imports(self):
return self.statement('from kaffe.tensorflow import Network\n')
def emit_class_def(self, name):
return self.statement(('class %s(Network):' % name))
def emit_setup_def(self):
return self.statement('def setup(self):')
def emit_parents(self, chain):
assert len(chain)
s = '(self.feed('
sep = ((', \n' + self.prefix) + (' ' * len(s)))
s += sep.join([("'%s'" % parent.name) for parent in chain[0].node.parents])
return self.statement((s + ')'))
def emit_node(self, node):
return self.statement((((' ' * 5) + '.') + node.emit()))
def emit(self, name, chains):
s = self.emit_imports()
s += self.emit_class_def(name)
self.indent()
s += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
b += self.emit_parents(chain)
for node in chain:
b += self.emit_node(node)
blocks.append((b[:(- 1)] + ')'))
s = (s + '\n\n'.join(blocks))
return s |
def eval_corrupt_wrapper(model, fn_test_corrupt, args_test_corrupt):
corruptions = ['clean', 'scale', 'jitter', 'rotate', 'dropout_global', 'dropout_local', 'add_global', 'add_local']
DGCNN_OA = {'clean': 0.448, 'scale': 0.415, 'jitter': 0.284, 'rotate': 0.341, 'dropout_global': 0.326, 'dropout_local': 0.319, 'add_global': 0.328, 'add_local': 0.279}
OA_clean = None
perf_all = {'OA': [], 'CE': [], 'RCE': []}
for corruption_type in corruptions:
perf_corrupt = {'OA': []}
for level in range(5):
if (corruption_type == 'clean'):
split = 'clean'
else:
split = ((corruption_type + '_') + str(level))
test_perf = fn_test_corrupt(split=split, model=model, **args_test_corrupt)
perf_corrupt['OA'].append(test_perf['acc'])
test_perf['corruption'] = corruption_type
if (corruption_type != 'clean'):
test_perf['level'] = level
pprint.pprint(test_perf, width=200)
if (corruption_type == 'clean'):
OA_clean = round(test_perf['acc'], 3)
break
for k in perf_corrupt:
perf_corrupt[k] = (sum(perf_corrupt[k]) / len(perf_corrupt[k]))
perf_corrupt[k] = round(perf_corrupt[k], 3)
if (corruption_type != 'clean'):
perf_corrupt['CE'] = ((1 - perf_corrupt['OA']) / (1 - DGCNN_OA[corruption_type]))
perf_corrupt['RCE'] = ((OA_clean - perf_corrupt['OA']) / (DGCNN_OA['clean'] - DGCNN_OA[corruption_type]))
for k in perf_all:
perf_corrupt[k] = round(perf_corrupt[k], 3)
perf_all[k].append(perf_corrupt[k])
perf_corrupt['corruption'] = corruption_type
perf_corrupt['level'] = 'Overall' |
_grad()
def evaluate(args, model, criterion, postprocessors, dataloader, support_data_loader, base_ds, device, type='all'):
model.eval()
criterion.eval()
support_iter = iter(support_data_loader)
all_category_codes_final = []
print('Extracting support category codes...')
number_of_supports = 100
for i in range(number_of_supports):
try:
(support_images, support_class_ids, support_targets) = next(support_iter)
except:
support_iter = iter(support_data_loader)
(support_images, support_class_ids, support_targets) = next(support_iter)
support_images = [support_image.squeeze(0) for support_image in support_images]
support_class_ids = support_class_ids.squeeze(0).to(device)
support_targets = [{k: v.squeeze(0) for (k, v) in t.items()} for t in support_targets]
num_classes = support_class_ids.shape[0]
num_episode = math.ceil((num_classes / args.episode_size))
category_codes_final = []
support_class_ids_final = []
for i in range(num_episode):
if ((args.episode_size * (i + 1)) <= num_classes):
support_images_ = utils.nested_tensor_from_tensor_list(support_images[(args.episode_size * i):(args.episode_size * (i + 1))]).to(device)
support_targets_ = [{k: v.to(device) for (k, v) in t.items()} for t in support_targets[(args.episode_size * i):(args.episode_size * (i + 1))]]
support_class_ids_ = support_class_ids[(args.episode_size * i):(args.episode_size * (i + 1))]
else:
support_images_ = utils.nested_tensor_from_tensor_list(support_images[(- args.episode_size):]).to(device)
support_targets_ = [{k: v.to(device) for (k, v) in t.items()} for t in support_targets[(- args.episode_size):]]
support_class_ids_ = support_class_ids[(- args.episode_size):]
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
category_code = model.module.compute_category_codes(support_images_, support_targets_)
else:
category_code = model.compute_category_codes(support_images_, support_targets_)
category_code = torch.stack(category_code, dim=0)
category_codes_final.append(category_code)
support_class_ids_final.append(support_class_ids_)
support_class_ids_final = torch.cat(support_class_ids_final, dim=0)
category_codes_final = torch.cat(category_codes_final, dim=1)
all_category_codes_final.append(category_codes_final)
if (args.num_feature_levels == 1):
all_category_codes_final = torch.stack(all_category_codes_final, dim=0)
all_category_codes_final = torch.mean(all_category_codes_final, 0, keepdims=False)
all_category_codes_final = list(torch.unbind(all_category_codes_final, dim=0))
elif (args.num_feature_levels == 4):
raise NotImplementedError
else:
raise NotImplementedError
print('Completed extracting category codes. Start Inference...')
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple((k for k in ('bbox',) if (k in postprocessors.keys())))
evaluator = DetectionEvaluator(base_ds, iou_types)
if (type == 'all'):
pass
elif (type == 'base'):
if (args.dataset_file == 'coco_base'):
evaluator.coco_eval['bbox'].params.catIds = coco_base_class_ids
elif (args.dataset_file == 'voc_base1'):
evaluator.coco_eval['bbox'].params.catIds = voc_base1_class_ids
elif (args.dataset_file == 'voc_base2'):
evaluator.coco_eval['bbox'].params.catIds = voc_base2_class_ids
elif (args.dataset_file == 'voc_base3'):
evaluator.coco_eval['bbox'].params.catIds = voc_base3_class_ids
else:
raise ValueError
elif (type == 'novel'):
if ((args.dataset_file == 'coco_base') or (args.dataset_file == 'coco')):
evaluator.coco_eval['bbox'].params.catIds = coco_novel_class_ids
elif (args.dataset_file == 'voc_base1'):
evaluator.coco_eval['bbox'].params.catIds = voc_novel1_class_ids
elif (args.dataset_file == 'voc_base2'):
evaluator.coco_eval['bbox'].params.catIds = voc_novel2_class_ids
elif (args.dataset_file == 'voc_base3'):
evaluator.coco_eval['bbox'].params.catIds = voc_novel3_class_ids
else:
raise ValueError
else:
raise ValueError("Type must be 'all', 'base' or 'novel'!")
print_freq = 50
for (samples, targets) in metric_logger.log_every(dataloader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for (k, v) in t.items()} for t in targets]
outputs = model(samples, targets=targets, supp_class_ids=support_class_ids_final, category_codes=all_category_codes_final)
loss_dict = criterion(outputs)
weight_dict = criterion.weight_dict
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t['orig_size'] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
res = {target['image_id'].item(): output for (target, output) in zip(targets, results)}
if (evaluator is not None):
evaluator.update(res)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
if (evaluator is not None):
evaluator.synchronize_between_processes()
if (evaluator is not None):
if (type == 'all'):
print('\n\n\n\n * ALL Categories:')
elif (type == 'base'):
print('\n\n\n\n * Base Categories:')
elif (type == 'novel'):
print('\n\n\n\n * Novel Categories:')
else:
raise ValueError("Type must be 'all', 'base' or 'novel'!")
evaluator.accumulate()
evaluator.summarize()
stats = {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
if (evaluator is not None):
if ('bbox' in postprocessors.keys()):
stats['coco_eval_bbox'] = evaluator.coco_eval['bbox'].stats.tolist()
del support_images
del support_class_ids
del support_targets
del samples
del targets
del outputs
del weight_dict
del loss_dict
del loss_dict_reduced
del loss_dict_reduced_scaled
del loss_dict_reduced_unscaled
del category_code
del category_codes_final
del all_category_codes_final
del orig_target_sizes
del res
del results
torch.cuda.empty_cache()
return (stats, evaluator) |
_BOX_FEATURE_EXTRACTORS.register('ResNet18Conv5ROIFeatureExtractor')
class ResNet18Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config, in_channels):
super(ResNet18Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio)
self.pooler = pooler
self.out_channels = 4096
if (config.DB.METHOD == 'dropblock'):
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
self.spatial_dropblock = DropBlock2D(block_size=1, drop_prob=0.3)
self.classifier = nn.Sequential(nn.Linear(((7 * 7) * 2048), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout())
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x, proposals):
pooled_feats = self.pooler(x, proposals)
x = self.classifier(torch.flatten(pooled_feats, 1))
return (x, pooled_feats)
def forward_pooler(self, x, proposals):
x = self.pooler(x, proposals)
return x
def forward_neck(self, x):
x = x.view(x.shape[0], (- 1))
x = self.classifier(x)
return x
def forward_dropblock(self, pooled_feat):
db_pooled_feat = self.dropblock(pooled_feat)
return db_pooled_feat
def forward_dropblock_pool(self, pooled_feats):
x = self.spatial_dropblock(pooled_feats)
x = x.view(x.shape[0], (- 1))
x = self.classifier(x)
return x
def forward_noise_pool(self, pooled_feats):
noise_pooled_feats = ((torch.normal(0, (1 ** 2), size=pooled_feats.shape, device=pooled_feats[0].device) * pooled_feats) + pooled_feats)
x = noise_pooled_feats.view(noise_pooled_feats.shape[0], (- 1))
x = self.classifier(x)
return x |
def visualize_prediction(src_kps, prd_kps, src_img, trg_img, vispath, relaxation=2000):
src_imsize = src_img.size()[1:][::(- 1)]
trg_imsize = trg_img.size()[1:][::(- 1)]
img_tps = geometry.ImageTPS(src_kps, prd_kps, src_imsize, trg_imsize, relaxation)
wrp_img = ff.to_pil_image(img_tps(unnorm(src_img.cpu())))
trg_img = ff.to_pil_image(unnorm(trg_img.cpu()))
new_im = Image.new('RGB', ((trg_imsize[0] * 2), trg_imsize[1]))
new_im.paste(wrp_img, (0, 0))
new_im.paste(trg_img, (trg_imsize[0], 0))
new_im.save(vispath) |
class TFRegNetYLayer(tf.keras.layers.Layer):
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1, **kwargs):
super().__init__(**kwargs)
should_apply_shortcut = ((in_channels != out_channels) or (stride != 1))
groups = max(1, (out_channels // config.groups_width))
self.shortcut = (TFRegNetShortCut(out_channels, stride=stride, name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear', name='shortcut'))
self.layers = [TFRegNetConvLayer(out_channels, kernel_size=1, activation=config.hidden_act, name='layer.0'), TFRegNetConvLayer(out_channels, stride=stride, groups=groups, activation=config.hidden_act, name='layer.1'), TFRegNetSELayer(out_channels, reduced_channels=int(round((in_channels / 4))), name='layer.2'), TFRegNetConvLayer(out_channels, kernel_size=1, activation=None, name='layer.3')]
self.activation = ACT2FN[config.hidden_act]
def call(self, hidden_state):
residual = hidden_state
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state |
def metric_name_to_print_format(metric_name) -> str:
if (metric_name in ['amota', 'amotp', 'motar', 'recall', 'mota', 'motp']):
print_format = '%.3f'
elif (metric_name in ['tid', 'lgd']):
print_format = '%.2f'
elif (metric_name in ['faf']):
print_format = '%.1f'
else:
print_format = '%d'
return print_format |
def get_approximate_min_distance(x: np.ndarray, axis=0):
approx_min_dist = np.abs((x_train[(0, axis)] - x_train[(1, axis)]))
return approx_min_dist |
.parametrize('update,expected', [(None, {}), (['a=5'], {'a': 5}), (['foo.bar=6'], {'foo': {'bar': 6}}), (['a=9', 'b=0'], {'a': 9, 'b': 0}), (["hello='world'"], {'hello': 'world'}), (['hello="world"'], {'hello': 'world'}), (['f=23.5'], {'f': 23.5}), (['n=None'], {'n': None}), (['t=True'], {'t': True}), (['f=False'], {'f': False})])
def test_get_config_updates(update, expected):
assert (get_config_updates(update) == (expected, [])) |
def compute_scores_and_write_to_csv(target_filepattern, prediction_filepattern, output_filename, scorer, aggregator, delimiter='\n'):
target_filenames = _glob(target_filepattern)
prediction_filenames = _glob(prediction_filepattern)
scores = _compute_scores(target_filenames, prediction_filenames, scorer, delimiter)
if aggregator:
for score in scores:
aggregator.add_scores(score)
_write_aggregates_to_csv(output_filename, aggregator.aggregate())
else:
_write_scores_to_csv(output_filename, scores) |
def test_get_observations_at():
config = get_config()
if (not os.path.exists(config.SIMULATOR.SCENE)):
pytest.skip('Please download Habitat test data to data folder.')
config.defrost()
config.TASK.SENSORS = []
config.SIMULATOR.AGENT_0.SENSORS = ['RGB_SENSOR', 'DEPTH_SENSOR']
config.freeze()
with habitat.Env(config=config, dataset=None) as env:
valid_start_position = [(- 1.3731), 0.08431, 8.60692]
expected_pointgoal = [0.1, 0.2, 0.3]
goal_position = np.add(valid_start_position, expected_pointgoal)
start_rotation = [0, 0, 0, 1]
env.episode_iterator = iter([NavigationEpisode(episode_id='0', scene_id=config.SIMULATOR.SCENE, start_position=valid_start_position, start_rotation=start_rotation, goals=[NavigationGoal(position=goal_position)])])
obs = env.reset()
start_state = env.sim.get_agent_state()
for _ in range(100):
new_obs = env.step(sample_non_stop_action(env.action_space))
for (key, val) in new_obs.items():
agent_state = env.sim.get_agent_state()
if (not (np.allclose(agent_state.position, start_state.position) and np.allclose(agent_state.rotation, start_state.rotation))):
assert (not np.allclose(val, obs[key]))
obs_at_point = env.sim.get_observations_at(start_state.position, start_state.rotation, keep_agent_at_new_pose=False)
for (key, val) in obs_at_point.items():
assert np.allclose(val, obs[key])
obs_at_point = env.sim.get_observations_at(start_state.position, start_state.rotation, keep_agent_at_new_pose=True)
for (key, val) in obs_at_point.items():
assert np.allclose(val, obs[key])
agent_state = env.sim.get_agent_state()
assert np.allclose(agent_state.position, start_state.position)
assert np.allclose(agent_state.rotation, start_state.rotation) |
def prettyprint(dct):
print('{')
for (key, val) in dct.items():
print(" '{}':".format(key))
if isinstance(val, str):
print(textwrap.indent(val, ' \t'))
else:
print(textwrap.indent(val.__repr__(), ' \t'))
print('}') |
def test_digits_two_stage_object():
model = MaxCoverageSelection(100, optimizer=TwoStageGreedy())
model.fit(X_digits)
assert_array_equal(model.ranking[:4], digits_ranking[:4])
assert_array_almost_equal(model.gains[:4], digits_gains[:4], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def get_mvdr_vector(psd_s: ComplexTensor, psd_n: ComplexTensor, reference_vector: torch.Tensor, eps: float=1e-15) -> ComplexTensor:
C = psd_n.size((- 1))
eye = torch.eye(C, dtype=psd_n.dtype, device=psd_n.device)
shape = ([1 for _ in range((psd_n.dim() - 2))] + [C, C])
eye = eye.view(*shape)
psd_n += (eps * eye)
numerator = FC.einsum('...ec,...cd->...ed', [psd_n.inverse(), psd_s])
ws = (numerator / (FC.trace(numerator)[(..., None, None)] + eps))
beamform_vector = FC.einsum('...fec,...c->...fe', [ws, reference_vector])
return beamform_vector |
def generate_import_column(name, dtype):
return {'inputColumn': name, 'inputType': typeConverter(dtype), 'name': name, 'operation': 'COPY'} |
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out) |
class TaggingDataset(Dataset):
def __init__(self, root: Union[(str, Path)], audio_transform: Callable=None, subset: Optional[str]='training') -> None:
super().__init__()
self.subset = subset
assert ((subset is None) or (subset in ['training', 'validation', 'testing'])), ('When `subset` not None, it must take a value from ' + "{'training', 'validation', 'testing'}.")
self.audio_transform = audio_transform
self._path = os.fspath(root)
if (not os.path.isdir(self._path)):
raise RuntimeError('Dataset not found.')
def get_audio_id(self):
raise NotImplementedError
def load_audio(self):
raise NotImplementedError
def get_tags(self):
raise NotImplementedError
def __len__(self) -> int:
return len(self.file_list)
def __getitem__(self, n: int) -> Tuple[(Tensor, int, str)]:
waveform = self.load_audio(n)
label = self.get_tags(n)
return (waveform, label)
def num_classes(cls):
raise NotImplementedError |
def _create_proportional_tensor(axis_weights):
axis_sums = [weights.sum() for weights in axis_weights]
total_weight = exp((sum((log(axis_sum) for axis_sum in axis_sums)) / len(axis_sums)))
axis_percentages = [(weights / axis_sum) for (weights, axis_sum) in zip(axis_weights, axis_sums)]
shape = tuple(map(len, axis_percentages))
n_cells = np.prod(shape)
tensor = np.empty(n_cells, np.float64)
axis_percentages.reverse()
for cell_idx in range(n_cells):
remainder = cell_idx
frac = 1.0
for percentages in axis_percentages:
bin_idx = (remainder % len(percentages))
remainder //= len(percentages)
frac *= percentages[bin_idx]
val = (frac * total_weight)
tensor.itemset(cell_idx, val)
return tensor.reshape(shape) |
class BaseDataset(data.Dataset, ABC):
def __init__(self, opt):
self.opt = opt
self.root = opt.dataroot
def modify_commandline_options(parser, is_train):
return parser
def __len__(self):
return 0
def __getitem__(self, index):
pass |
def mkdir_if_missing(dirname):
if (not osp.exists(dirname)):
try:
os.makedirs(dirname)
except OSError as e:
if (e.errno != errno.EEXIST):
raise |
def train(train_loader, model, optimizer):
model.train()
loss_fn = nn.L1Loss()
train_loss = [utils.Averager() for _ in range(len(train_loader.dataset.scale_max))]
data_norm = config['data_norm']
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, (- 1), 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, (- 1), 1, 1).cuda()
t = data_norm['gt']
gt_sub = torch.FloatTensor(t['sub']).view(1, 1, (- 1)).cuda()
gt_div = torch.FloatTensor(t['div']).view(1, 1, (- 1)).cuda()
optimizer.zero_grad()
for (idx, batch) in enumerate(tqdm(train_loader, leave=False, desc='train')):
for (k, v) in batch.items():
if isinstance(batch[k], list):
for idx in range(len(batch[k])):
batch[k][idx] = v[idx].cuda()
elif (v is not None):
batch[k] = v.cuda()
bs = batch['inp'].shape[0]
batch_index = torch.arange(bs).unsqueeze(1)
inp = ((batch['inp'] - inp_sub) / inp_div)
preds = model(inp, batch['coords'], batch['sample_coord'], batch['cell'])
losses = 0.0
for idx in range(len(preds)):
if (idx == (len(preds) - 1)):
sample_coord = batch['sample_coord']
sample_coord = sample_coord.unsqueeze(2)
gt = F.grid_sample(((batch['gt'] - inp_sub) / inp_div), sample_coord.flip((- 1)), mode='nearest', align_corners=False).permute(0, 2, 3, 1)
gt = gt.reshape(bs, (- 1), 3)
loss = loss_fn(preds[idx], gt)
losses += loss
train_loss[idx].add(loss.item())
optimizer.zero_grad()
losses.backward()
optimizer.step()
preds = None
losses = None
return [train_loss[idx].item() for idx in range(len(train_loss))] |
def eval_func(model):
(x_train, y_train, x_test, y_test) = build_dataset()
start = time.time()
model.compile(metrics=['accuracy'], run_eagerly=False)
score = model.evaluate(x_test, y_test)
end = time.time()
if (test_mode == 'performance'):
latency = (end - start)
print('Latency: {:.3f} ms'.format((latency * 1000)))
print('Throughput: {:.3f} data/sec'.format((1.0 / latency)))
return score[1] |
def save_data(my_array, my_file_name, chunks_value):
x_hdf = da.from_array(my_array, chunks=chunks_value)
x_hdf.to_hdf5(my_file_name, '/x', compression='lzf', shuffle=True)
return |
_config
def model_lifelong_sidetune_double_resnet_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'TaskonomyEncoder', 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'base_kwargs': {'eval_only': True, 'train': False, 'normalize_outputs': False}, 'use_baked_encoding': True, 'side_class': 'TaskonomyEncoder', 'side_weights_path': '/mnt/models/curvature_encoder.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}, 'training': {'sources': ([['rgb', 'curvature_encoding']] * N_TASKONOMY_TASKS)}} |
_module()
class MaskFormer(SingleStageDetector):
'Implementation of `Per-Pixel Classification is\n NOT All You Need for Semantic Segmentation\n <
def __init__(self, backbone: ConfigType, neck: OptConfigType=None, panoptic_head: OptConfigType=None, panoptic_fusion_head: OptConfigType=None, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None):
super(SingleStageDetector, self).__init__(data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if (neck is not None):
self.neck = MODELS.build(neck)
panoptic_head_ = panoptic_head.deepcopy()
panoptic_head_.update(train_cfg=train_cfg)
panoptic_head_.update(test_cfg=test_cfg)
self.panoptic_head = MODELS.build(panoptic_head_)
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=test_cfg)
self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Dict[(str, Tensor)]:
x = self.extract_feat(batch_inputs)
losses = self.panoptic_head.loss(x, batch_data_samples)
return losses
def predict(self, batch_inputs: Tensor, batch_data_samples: SampleList, rescale: bool=True) -> SampleList:
feats = self.extract_feat(batch_inputs)
(mask_cls_results, mask_pred_results) = self.panoptic_head.predict(feats, batch_data_samples)
results_list = self.panoptic_fusion_head.predict(mask_cls_results, mask_pred_results, batch_data_samples, rescale=rescale)
results = self.add_pred_to_datasample(batch_data_samples, results_list)
return results
def add_pred_to_datasample(self, data_samples: SampleList, results_list: List[dict]) -> SampleList:
for (data_sample, pred_results) in zip(data_samples, results_list):
if ('pan_results' in pred_results):
data_sample.pred_panoptic_seg = pred_results['pan_results']
if ('ins_results' in pred_results):
data_sample.pred_instances = pred_results['ins_results']
assert ('sem_results' not in pred_results), 'segmantic segmentation results are not supported yet.'
return data_samples
def _forward(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Tuple[List[Tensor]]:
feats = self.extract_feat(batch_inputs)
results = self.panoptic_head.forward(feats, batch_data_samples)
return results |
class TestImagePreprocessing(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], '../resources')
def test_read_images(self):
file_path = os.path.join(self.resource_path, 'cats/')
data_shard = bigdl.orca.data.read_images(file_path)
collected = data_shard.collect()
size = (80, 80)
resized = data_shard.rdd.map((lambda x: x.resize(size)))
for im in collected:
self.assertTrue(isinstance(im, PIL.Image.Image))
self.assertTrue((data_shard.rdd.count() == 6))
self.assertTrue((resized.count() == 6))
def test_read_images_pil(self):
file_path = os.path.join(self.resource_path, 'cats/')
data_shard = bigdl.orca.data.read_images(file_path, backend='pillow')
collected = data_shard.collect()
size = (80, 80)
resized = data_shard.rdd.map((lambda x: x.resize(size)))
for im in collected:
self.assertTrue(isinstance(im, PIL.Image.Image))
self.assertTrue((data_shard.rdd.count() == 6))
self.assertTrue((resized.count() == 6))
def test_read_images_pil_withlabel(self):
file_path = os.path.join(self.resource_path, 'cats/')
def get_label(file_name):
label = (1 if ('dog' in file_name.split('/')[(- 1)]) else 0)
return label
data_shard = bigdl.orca.data.read_images(file_path, get_label, backend='pillow')
collected = data_shard.collect()
size = (80, 80)
resized = data_shard.rdd.map((lambda x: (x[0].resize(size), x[1])))
for im in collected:
self.assertTrue((isinstance(im, tuple) and isinstance(im[0], PIL.Image.Image) and (im[1] == 0)))
self.assertTrue((data_shard.rdd.count() == 6))
self.assertTrue((resized.count() == 6))
def test_read_images_spark(self):
file_path = os.path.join(self.resource_path, 'cats/')
data_shard = bigdl.orca.data.read_images(file_path, backend='spark')
collected = data_shard.collect()
size = (80, 80)
resized = data_shard.rdd.map((lambda x: x.resize(size)))
for im in collected:
self.assertTrue(isinstance(im, PIL.Image.Image))
self.assertTrue((data_shard.rdd.count() == 6))
self.assertTrue((resized.count() == 6))
def test_read_images_spark_withlabel(self):
file_path = os.path.join(self.resource_path, 'dogs/')
def get_label(file_name):
label = (1 if ('dog' in file_name.split('/')[(- 1)]) else 0)
return label
data_shard = bigdl.orca.data.read_images(file_path, get_label, backend='spark')
collected = data_shard.collect()
for im in collected:
self.assertTrue((isinstance(im, tuple) and isinstance(im[0], PIL.Image.Image) and (im[1] == 1)))
self.assertTrue((data_shard.rdd.count() == 6))
def test_read_images_pil_with_masks(self):
image_path = os.path.join(self.resource_path, 'tsg_salt/images')
target_path = os.path.join(self.resource_path, 'tsg_salt/masks')
data_shard = bigdl.orca.data.read_images(image_path, target_path=target_path, image_type='.png', target_type='.png')
print(len(data_shard))
collected = data_shard.collect()
for im in collected:
self.assertTrue((isinstance(im, tuple) and isinstance(im[0], PIL.Image.Image) and isinstance(im[1], PIL.Image.Image)))
self.assertTrue((data_shard.rdd.count() == 5))
def test_read_images_spark_with_masks(self):
image_path = os.path.join(self.resource_path, 'tsg_salt/images')
target_path = os.path.join(self.resource_path, 'tsg_salt/masks')
data_shard = bigdl.orca.data.read_images(image_path, target_path=target_path, image_type='.png', target_type='.png', backend='spark')
print(len(data_shard))
collected = data_shard.collect()
for im in collected:
self.assertTrue((isinstance(im, tuple) and isinstance(im[0], PIL.Image.Image) and isinstance(im[1], PIL.Image.Image)))
self.assertTrue((data_shard.rdd.count() == 5))
def test_read_voc(self):
from bigdl.orca.data.image.preprocessing import read_voc
image_path = os.path.join(self.resource_path, 'VOCdevkit')
data_shard = read_voc(image_path, split_names=[(2007, 'trainval')], max_samples=5)
collected = data_shard.collect()
print(collected)
for im in collected:
self.assertTrue((isinstance(im, tuple) and isinstance(im[0], PIL.Image.Image) and isinstance(im[1], np.ndarray)))
self.assertTrue((data_shard.rdd.count() == 5)) |
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields()) |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument('--json_out', help='output result file name without extension', type=str)
parser.add_argument('--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, help='The data root of coco dataset.', default='./data/coco/')
parser.add_argument('--out-dir', type=str, help='The output directory of coco semi-supervised annotations.', default='./data/coco/semi_anns/')
parser.add_argument('--labeled-percent', type=float, nargs='+', help='The percentage of labeled data in the training set.', default=[1, 2, 5, 10])
parser.add_argument('--fold', type=int, help='K-fold cross validation for semi-supervised object detection.', default=5)
args = parser.parse_args()
return args |
class OffsetGenerator():
def initialize(cls, n_patch_side, pad_size):
grid_1d = torch.linspace((- 1), 1, n_patch_side).to('cuda')
if (pad_size > 0):
pad_dist = torch.cumsum((grid_1d[(- 1)] - grid_1d[(- 2)]).repeat(pad_size), dim=0)
grid_1d = torch.cat([((- 1) - pad_dist).flip(dims=[0]), grid_1d, (1 + pad_dist)])
n_patch_side += (pad_size * 2)
n_tokens = (n_patch_side ** 2)
grid_y = grid_1d.view((- 1), 1).repeat(1, n_patch_side)
grid_x = grid_1d.view(1, (- 1)).repeat(n_patch_side, 1)
grid = torch.stack([grid_y, grid_x], dim=(- 1)).view((- 1), 2)
grid_q = grid.view((- 1), 1, 2).repeat(1, n_tokens, 1)
grid_k = grid.view(1, (- 1), 2).repeat(n_tokens, 1, 1)
cls.qk_vec = (grid_k - grid_q)
def get_qk_vec(cls):
return cls.qk_vec.clone() |
class SingleSTG(nn.Module):
def __init__(self, input_dim, hidden_dim, sigma):
super(SingleSTG, self).__init__()
self.gate = FeatureSelector(input_dim, sigma)
self.to_latent = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim))
def forward(self, x):
return self.to_latent(self.gate(x))
def test(self, x):
return self.to_latent(self.gate.test(x)) |
(version='2.0')
class Quantization(Component):
def __init__(self, conf_fname_or_obj=None):
super(Quantization, self).__init__()
if isinstance(conf_fname_or_obj, QuantConf):
self.conf = conf_fname_or_obj
elif isinstance(conf_fname_or_obj, Config):
self.conf = QuantConf()
self.conf.map_pyconfig_to_cfg(conf_fname_or_obj)
else:
self.conf = QuantConf(conf_fname_or_obj)
self._init_with_conf()
seed = self.cfg.tuning.random_seed
random.seed(seed)
np.random.seed(seed)
self._calib_dataloader = None
self._calib_func = None
def _create_eval_dataloader(self, cfg):
if (self._eval_func is None):
if (self._eval_dataloader is None):
eval_dataloader_cfg = deep_get(cfg, 'evaluation.accuracy.dataloader')
if (eval_dataloader_cfg is None):
logger.info("Because both eval_dataloader_cfg and user-defined eval_func are None, automatically setting 'tuning.exit_policy.performance_only = True'.")
deep_set(cfg, 'tuning.exit_policy.performance_only', True)
logger.info('The cfg.tuning.exit_policy.performance_only is: {}'.format(cfg.tuning.exit_policy.performance_only))
else:
if ((deep_get(cfg, 'evaluation.accuracy.iteration') == (- 1)) and ('dummy_v2' in deep_get(cfg, 'evaluation.accuracy.dataloader.dataset', {}))):
deep_set(cfg, 'evaluation.accuracy.iteration', 10)
self._eval_dataloader = create_dataloader(self.framework, eval_dataloader_cfg)
if (os.environ.get('PERFORMANCE_ONLY') in ['0', '1']):
performance_only = bool(int(os.environ.get('PERFORMANCE_ONLY')))
deep_set(cfg, 'tuning.exit_policy.performance_only', performance_only)
logger.info("Get environ 'PERFORMANCE_ONLY={}', force setting 'tuning.exit_policy.performance_only = True'.".format(performance_only))
def _create_calib_dataloader(self, cfg):
approach_cfg = deep_get(cfg, 'quantization.approach')
if ((self._calib_dataloader is None) and (self._calib_func is None)):
if (approach_cfg in ['post_training_static_quant', 'post_training_auto_quant']):
calib_dataloader_cfg = deep_get(cfg, 'quantization.calibration.dataloader')
if ((approach_cfg == 'post_training_auto_quant') and (calib_dataloader_cfg is None)):
logger.error("dataloader is required for 'post_training_auto_quant'. use 'post_training_dynamic_quant' instead if no dataloader provided.")
assert (calib_dataloader_cfg is not None), 'dataloader field of calibration field of quantization section in yaml file should be configured as calib_dataloader property is NOT set!'
if deep_get(calib_dataloader_cfg, 'shuffle'):
logger.warning('Reset `shuffle` field to False when post_training_static_quant is selected.')
deep_set(calib_dataloader_cfg, 'shuffle', False)
elif (approach_cfg == 'quant_aware_training'):
calib_dataloader_cfg = deep_get(cfg, 'quantization.train.dataloader')
assert (calib_dataloader_cfg is not None), 'dataloader field of train field of quantization section in yaml file should be configured as calib_dataloader property is NOT set!'
else:
calib_dataloader_cfg = None
if calib_dataloader_cfg:
self._calib_dataloader = create_dataloader(self.framework, calib_dataloader_cfg)
def pre_process(self):
cfg = self.conf.usr_cfg
assert isinstance(self._model, BaseModel), 'need set your Model for quantization....'
self._create_eval_dataloader(cfg)
self._create_calib_dataloader(cfg)
strategy = cfg.tuning.strategy.name.lower()
if (cfg.quantization.quant_level == 0):
strategy = 'conservative'
logger.info('On the premise that the accuracy meets the conditions, improve the performance.')
if (strategy == 'mse_v2'):
if (not (self.framework.startswith('tensorflow') or (self.framework == 'pytorch_fx'))):
strategy = 'basic'
logger.warning(f'MSE_v2 does not support {self.framework} now, use basic instead.')
logger.warning('Only tensorflow, pytorch_fx is supported by MSE_v2 currently.')
assert (strategy in EXP_STRATEGIES), 'Tuning strategy {} is NOT supported'.format(strategy)
_resume = None
self.resume_file = (os.path.abspath(os.path.expanduser(cfg.tuning.workspace.resume)) if (cfg.tuning.workspace and cfg.tuning.workspace.resume) else None)
if self.resume_file:
assert os.path.exists(self.resume_file), "The specified resume file {} doesn't exist!".format(self.resume_file)
with open(self.resume_file, 'rb') as f:
_resume = pickle.load(f).__dict__
self.strategy = EXP_STRATEGIES[strategy](self._model, self.conf, self._calib_dataloader, self._calib_func, self._eval_dataloader, self._eval_func, _resume, self.hooks)
if getattr(self._calib_dataloader, 'distributed', False):
self.register_hook('on_train_begin', self.strategy.adaptor._pre_hook_for_hvd)
def execute(self):
try:
with time_limit(self.conf.usr_cfg.tuning.exit_policy.timeout):
logger.debug('Dump user yaml configuration:')
logger.debug(self.conf.usr_cfg)
self.strategy.traverse()
except KeyboardInterrupt:
pass
except Exception as e:
logger.error('Unexpected exception {} happened during tuning.'.format(repr(e)))
import traceback
traceback.print_exc()
finally:
if self.strategy.best_qmodel:
logger.info('Specified timeout or max trials is reached! Found a quantized model which meet accuracy goal. Exit.')
self.strategy.deploy_config()
else:
logger.error('Specified timeout or max trials is reached! Not found any quantized model which meet accuracy goal. Exit.')
return self.strategy.best_qmodel
def __call__(self):
return super(Quantization, self).__call__()
fit = __call__
def dataset(self, dataset_type, *args, **kwargs):
from ..data import Datasets
return Datasets(self.framework)[dataset_type](*args, **kwargs)
def calib_dataloader(self):
return self._calib_dataloader
_dataloader.setter
def calib_dataloader(self, dataloader):
from .common import _generate_common_dataloader
self._calib_dataloader = _generate_common_dataloader(dataloader, self.framework)
def metric(self):
assert False, 'Should not try to get the value of `metric` attribute.'
return None
def metric(self, user_metric):
if deep_get(self.conf.usr_cfg, 'evaluation.accuracy.metric'):
logger.warning('Override the value of `metric` field defined in yaml file as user defines the value of `metric` attribute by code.')
from ..metric import METRICS
from .common import Metric as NCMetric
if isinstance(user_metric, NCMetric):
name = user_metric.name
metric_cls = user_metric.metric_cls
metric_cfg = {name: {**user_metric.kwargs}}
else:
for i in ['reset', 'update', 'result']:
assert hasattr(user_metric, i), 'Please realise {} functionin user defined metric'.format(i)
metric_cls = type(user_metric).__name__
name = ('user_' + metric_cls)
metric_cfg = {name: id(user_metric)}
deep_set(self.conf.usr_cfg, 'evaluation.accuracy.metric', metric_cfg)
self.conf.usr_cfg = DotDict(self.conf.usr_cfg)
metrics = METRICS(self.framework)
metrics.register(name, metric_cls)
self._metric = user_metric
def objective(self):
assert False, 'Should not try to get the value of `objective` attribute.'
return None
def objective(self, user_objective):
if (deep_get(self.conf.usr_cfg, 'tuning.multi_objectives.objective') or deep_get(self.conf.usr_cfg, 'tuning.objective')):
logger.warning('Override the value of `objective` field defined in yaml file as user defines the value of `objective` attribute by code.')
user_obj_cfg = ('tuning.objective' if deep_get(self.conf.usr_cfg, 'tuning.objective') else 'tuning.multi_objectives.objective')
from ..objective import objective_custom_registry
objective_cls = type(user_objective)
name = user_objective.__class__.__name__
objective_cfg = (name if deep_get(self.conf.usr_cfg, 'tuning.objective') else [name])
deep_set(self.conf.usr_cfg, user_obj_cfg, objective_cfg)
self.conf.usr_cfg = DotDict(self.conf.usr_cfg)
objective_custom_registry(name, objective_cls)
def postprocess(self, user_postprocess):
assert False, 'Should not try to get the value of `postprocess` attribute.'
return None
def postprocess(self, user_postprocess):
from .common import Postprocess as NCPostprocess
assert isinstance(user_postprocess, NCPostprocess), 'please initialize a neural_compressor.experimental.common.Postprocess and set....'
postprocess_cfg = {user_postprocess.name: {**user_postprocess.kwargs}}
if deep_get(self.conf.usr_cfg, 'evaluation.accuracy.postprocess'):
logger.warning('Override the value of `postprocess` field defined in yaml file as user defines the value of `postprocess` attribute by code.')
deep_set(self.conf.usr_cfg, 'evaluation.accuracy.postprocess.transform', postprocess_cfg)
from neural_compressor.data import TRANSFORMS
postprocesses = TRANSFORMS(self.framework, 'postprocess')
postprocesses.register(user_postprocess.name, user_postprocess.postprocess_cls)
def q_func(self):
assert False, 'Should not try to get the value of `q_func` attribute.'
return None
_func.setter
def q_func(self, user_q_func):
self._calib_func = user_q_func
calib_func = q_func
def model(self):
return self._model
def model(self, user_model):
approach_cfg = deep_get(self.cfg, 'quantization.approach')
if (not self.framework):
self.framework = get_model_fwk_name(user_model)
if ((self.framework == 'tensorflow') and (approach_cfg == 'quant_aware_training')):
if (type(user_model) == str):
self._model = TensorflowQATModel(user_model)
else:
self._model = TensorflowQATModel(user_model._model)
else:
Component.model.__set__(self, user_model)
def __repr__(self):
return 'Quantization' |
class TestAutoRoundLinear(unittest.TestCase):
def setUpClass(self):
model_name = 'facebook/opt-125m'
self.model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, torch_dtype='auto', trust_remote_code=True)
self.model = self.model.eval()
self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
def test_signround(self):
round = AutoRound(self.model, self.tokenizer, device='cpu', iters=5, seqlen=8, n_samples=1, group_size=7)
round.quantize()
def test_Adamround(self):
round = AutoOPTRound(self.model, self.tokenizer, device='cpu', iters=2, seqlen=8, n_samples=1, scheme='sym')
round.quantize() |
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), name=None):
if (name is not None):
bn_name = (name + '_bn')
conv_name = (name + '_conv')
else:
bn_name = None
conv_name = None
if (K.image_data_format() == 'channels_first'):
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False, name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.