code stringlengths 101 5.91M |
|---|
def is_positive_semidefinite_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT):
if (atol is None):
atol = ATOL_DEFAULT
if (rtol is None):
rtol = RTOL_DEFAULT
if (not is_hermitian_matrix(mat, rtol=rtol, atol=atol)):
return False
vals = np.linalg.eigvalsh(mat)
for v in vals:
if (v < (- atol)):
return False
return True |
def _test():
import torch
pretrained = False
models = [(ror3_56_cifar10, 10), (ror3_56_cifar100, 100), (ror3_56_svhn, 10), (ror3_110_cifar10, 10), (ror3_110_cifar100, 100), (ror3_110_svhn, 10), (ror3_164_cifar10, 10), (ror3_164_cifar100, 100), (ror3_164_svhn, 10)]
for (model, num_classes) in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != ror3_56_cifar10) or (weight_count == 762746))
assert ((model != ror3_56_cifar100) or (weight_count == 768596))
assert ((model != ror3_56_svhn) or (weight_count == 762746))
assert ((model != ror3_110_cifar10) or (weight_count == 1637690))
assert ((model != ror3_110_cifar100) or (weight_count == 1643540))
assert ((model != ror3_110_svhn) or (weight_count == 1637690))
assert ((model != ror3_164_cifar10) or (weight_count == 2512634))
assert ((model != ror3_164_cifar100) or (weight_count == 2518484))
assert ((model != ror3_164_svhn) or (weight_count == 2512634))
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes)) |
_UTILS.register_module()
class OHEMSampler(BaseSampler):
def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, loss_key='loss_cls', **kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.context = context
if (not hasattr(self.context, 'num_stages')):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
self.loss_key = loss_key
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if (not hasattr(self.context, 'num_stages')):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key]
(_, topk_loss_inds) = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False)
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats)
def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False)
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats) |
def enable_dropout(m: torch.nn.Module) -> None:
for module in m.modules():
if (module.__class__.__name__ == 'LinearBlock'):
for submodule in module.modules():
if submodule.__class__.__name__.startswith('Dropout'):
submodule.train() |
def disable_running_stats(model):
def _disable(module):
if isinstance(module, _BatchNorm):
module.backup_momentum = module.momentum
module.momentum = 0
model.apply(_disable) |
def test(model, target_test_loader):
model.eval()
correct = 0
criterion = torch.nn.CrossEntropyLoss()
len_target_dataset = len(target_test_loader.dataset)
with torch.no_grad():
for (data, target) in target_test_loader:
(data, target) = (data.to(DEVICE), target.to(DEVICE))
s_output = model(data)
loss = criterion(s_output, target)
pred = torch.max(s_output, 1)[1]
correct += torch.sum((pred == target))
acc = (correct.double() / len(target_test_loader.dataset))
return acc |
def minimizer_local(args):
from scipy.optimize import minimize
from .cem_function import posterior_function_local_for_minimization
from .parameter import ModelParameters
a = ModelParameters()
(changing_parameter, identifier, global_parameters, errors, elements) = args
res = minimize(fun=posterior_function_local_for_minimization, x0=changing_parameter, args=(identifier, global_parameters, errors, elements), method='Nelder-Mead', tol=a.tol_minimization, options={'maxiter': a.maxiter_minimization})
if a.verbose:
print(res.message)
return res.x |
class ConsistencyModelPipeline(DiffusionPipeline):
model_cpu_offload_seq = 'unet'
def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None:
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
self.safety_checker = None
def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels, height, width)
if (isinstance(generator, list) and (len(generator) != batch_size)):
raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.')
if (latents is None):
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device=device, dtype=dtype)
latents = (latents * self.scheduler.init_noise_sigma)
return latents
def postprocess_image(self, sample: torch.FloatTensor, output_type: str='pil'):
if (output_type not in ['pt', 'np', 'pil']):
raise ValueError(f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']")
sample = ((sample / 2) + 0.5).clamp(0, 1)
if (output_type == 'pt'):
return sample
sample = sample.cpu().permute(0, 2, 3, 1).numpy()
if (output_type == 'np'):
return sample
sample = self.numpy_to_pil(sample)
return sample
def prepare_class_labels(self, batch_size, device, class_labels=None):
if (self.unet.config.num_class_embeds is not None):
if isinstance(class_labels, list):
class_labels = torch.tensor(class_labels, dtype=torch.int)
elif isinstance(class_labels, int):
assert (batch_size == 1), 'Batch size must be 1 if classes is an int'
class_labels = torch.tensor([class_labels], dtype=torch.int)
elif (class_labels is None):
class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,))
class_labels = class_labels.to(device)
else:
class_labels = None
return class_labels
def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps):
if ((num_inference_steps is None) and (timesteps is None)):
raise ValueError('Exactly one of `num_inference_steps` or `timesteps` must be supplied.')
if ((num_inference_steps is not None) and (timesteps is not None)):
logger.warning(f'Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied; `timesteps` will be used over `num_inference_steps`.')
if (latents is not None):
expected_shape = (batch_size, 3, img_size, img_size)
if (latents.shape != expected_shape):
raise ValueError(f'The shape of latents is {latents.shape} but is expected to be {expected_shape}.')
if ((callback_steps is None) or ((callback_steps is not None) and ((not isinstance(callback_steps, int)) or (callback_steps <= 0)))):
raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.')
_grad()
_example_docstring(EXAMPLE_DOC_STRING)
def __call__(self, batch_size: int=1, class_labels: Optional[Union[(torch.Tensor, List[int], int)]]=None, num_inference_steps: int=1, timesteps: List[int]=None, generator: Optional[Union[(torch.Generator, List[torch.Generator])]]=None, latents: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[([int, int, torch.FloatTensor], None)]]=None, callback_steps: int=1):
img_size = self.unet.config.sample_size
device = self._execution_device
self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps)
sample = self.prepare_latents(batch_size=batch_size, num_channels=self.unet.config.in_channels, height=img_size, width=img_size, dtype=self.unet.dtype, device=device, generator=generator, latents=latents)
class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels)
if (timesteps is not None):
self.scheduler.set_timesteps(timesteps=timesteps, device=device)
timesteps = self.scheduler.timesteps
num_inference_steps = len(timesteps)
else:
self.scheduler.set_timesteps(num_inference_steps)
timesteps = self.scheduler.timesteps
with self.progress_bar(total=num_inference_steps) as progress_bar:
for (i, t) in enumerate(timesteps):
scaled_sample = self.scheduler.scale_model_input(sample, t)
model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0]
sample = self.scheduler.step(model_output, t, sample, generator=generator)[0]
progress_bar.update()
if ((callback is not None) and ((i % callback_steps) == 0)):
callback(i, t, sample)
image = self.postprocess_image(sample, output_type=output_type)
self.maybe_free_model_hooks()
if (not return_dict):
return (image,)
return ImagePipelineOutput(images=image) |
def _locobot_camera_action_space():
return spaces.Dict({'set_pan': spaces.Box(low=(- np.inf), high=np.inf, shape=(1,)), 'set_tilt': spaces.Box(low=(- np.inf), high=np.inf, shape=(1,)), 'set_pan_tilt': spaces.Box(low=(- np.inf), high=np.inf, shape=(2,))}) |
def get_run_nums(ex_dir):
not_in = ['_sources', '.ipynb_checkpoints']
run_nums = [x for x in os.listdir(ex_dir) if (x not in not_in)]
return run_nums |
class RopchainJob(job_class):
def __init__(self):
super().__init__()
self.script_file = __file__
self.rop_tool = 'ropper'
def run_rop_tool(self):
rw_address = self.find_rw_section(self.binary)
rop_tool = Ropper(self.binary, self.input, self, self.ropchain, self.bad_chars)
rop_tool.run(self.timeout) |
def convert_net(net_name, conf, enable_micro):
option = cvt.ConverterOption()
option.name = net_name
option.order = conf.get(ModelKeys.order, 0)
if (ModelKeys.quantize_stat in conf):
option.quantize_stat = conf[ModelKeys.quantize_stat]
else:
option.quantize_stat = False
if (ModelKeys.graph_optimize_options in conf):
option.transformer_option = conf[ModelKeys.graph_optimize_options]
if (ModelKeys.winograd in conf):
option.winograd = conf[ModelKeys.winograd]
if (ModelKeys.quantize in conf):
option.quantize = conf[ModelKeys.quantize]
if (ModelKeys.quantize_schema in conf):
option.quantize_schema = conf[ModelKeys.quantize_schema]
if (ModelKeys.quantize_large_weights in conf):
option.quantize_large_weights = conf[ModelKeys.quantize_large_weights]
if (ModelKeys.quantize_range_file in conf):
option.quantize_range_file = conf[ModelKeys.quantize_range_file]
if (ModelKeys.change_concat_ranges in conf):
option.change_concat_ranges = conf[ModelKeys.change_concat_ranges]
if (ModelKeys.cl_mem_type in conf):
option.cl_mem_type = conf[ModelKeys.cl_mem_type]
if (ModelKeys.platform in conf):
option.platform = conf[ModelKeys.platform]
if (ModelKeys.runtime in conf):
option.device = conf[ModelKeys.runtime]
if (option.device == DeviceType.CPU_GPU):
option.device = DeviceType.CPU
option.device = option.device.value
if option.quantize_stat:
option.quantize = False
option.enable_micro = enable_micro
option.data_type = conf[ModelKeys.data_type]
for i in range(len(conf[ModelKeys.input_tensors])):
input_node = cvt.NodeInfo()
input_node.name = conf[ModelKeys.input_tensors][i]
if (ModelKeys.input_aliases in conf):
input_node.alias = conf[ModelKeys.input_aliases][i]
else:
input_node.alias = input_node.name
input_node.shape = conf[ModelKeys.input_shapes][i]
input_node.data_type = conf[ModelKeys.input_data_types][i]
input_node.data_format = conf[ModelKeys.input_data_formats][i]
if ((input_node.data_format == DataFormat.NCHW) and (len(input_node.shape) == 4)):
input_node.shape = transpose_shape(input_node.shape, [0, 2, 3, 1])
input_node.data_format = DataFormat.NHWC
input_node.range = conf[ModelKeys.input_ranges][i]
option.add_input_node(input_node)
for i in range(len(conf[ModelKeys.output_tensors])):
output_node = cvt.NodeInfo()
output_node.name = conf[ModelKeys.output_tensors][i]
if (ModelKeys.output_aliases in conf):
output_node.alias = conf[ModelKeys.output_aliases][i]
else:
output_node.alias = output_node.name
output_node.shape = conf[ModelKeys.output_shapes][i]
output_node.data_type = conf[ModelKeys.output_data_types][i]
output_node.data_format = conf[ModelKeys.output_data_formats][i]
if ((output_node.data_format == DataFormat.NCHW) and (len(output_node.shape) == 4)):
output_node.shape = transpose_shape(output_node.shape, [0, 2, 3, 1])
output_node.data_format = DataFormat.NHWC
option.add_output_node(output_node)
if (ModelKeys.check_tensors in conf):
for i in range(len(conf[ModelKeys.check_tensors])):
check_node = cvt.NodeInfo()
check_node.name = conf[ModelKeys.check_tensors][i]
check_node.shape = conf[ModelKeys.check_shapes][i]
option.add_check_node(check_node)
else:
option.check_nodes = option.output_nodes
option.build()
print('Transform model to one that can better run on device')
platform = conf[ModelKeys.platform]
if (platform == Platform.TENSORFLOW):
from transform import tensorflow_converter
converter = tensorflow_converter.TensorflowConverter(option, conf['model_file_path'])
elif (platform == Platform.CAFFE):
from transform import caffe_converter
converter = caffe_converter.CaffeConverter(option, conf['model_file_path'], conf['weight_file_path'])
elif (platform == Platform.ONNX):
from transform import onnx_converter
converter = onnx_converter.OnnxConverter(option, conf['model_file_path'])
elif (platform == Platform.MEGENGINE):
from transform import megengine_converter
converter = megengine_converter.MegengineConverter(option, conf['model_file_path'])
elif (platform == Platform.KERAS):
from transform import keras_converter
converter = keras_converter.KerasConverter(option, conf['model_file_path'])
elif (platform == Platform.PYTORCH):
from transform import pytorch_converter
converter = pytorch_converter.PytorchConverter(option, conf['model_file_path'])
else:
mace_check(False, ('Mace do not support platorm %s yet.' % platform))
(output_graph_def, converter_info) = converter.run()
mace_transformer = transformer.Transformer(option, output_graph_def, converter_info)
(output_graph_def, quantize_activation_info) = mace_transformer.run()
runtime = conf[ModelKeys.runtime]
if (runtime in [DeviceType.HEXAGON, DeviceType.HTA]):
from transform import hexagon_converter
converter = hexagon_converter.HexagonConverter(option, output_graph_def, quantize_activation_info)
output_graph_def = converter.run()
elif (runtime == DeviceType.APU):
mace_check((platform == Platform.TENSORFLOW), 'apu only support model from tensorflow')
from transform import apu_converter
converter = apu_converter.ApuConverter(option, output_graph_def, quantize_activation_info)
output_graph_def = converter.run()
return output_graph_def |
def walk_data(nsteps: int, params: P, data: D, key: PRNGKey, metrop_step_fn: MetropolisStep[(P, D)]) -> Tuple[(chex.Numeric, D, PRNGKey)]:
def step_fn(carry, x):
del x
(accept_prob, data, key) = metrop_step_fn(params, carry[1], carry[2])
return (((carry[0] + accept_prob), data, key), None)
out = jax.lax.scan(step_fn, (0.0, data, key), xs=None, length=nsteps)
(accept_sum, data, key) = out[0]
return ((accept_sum / nsteps), data, key) |
class LinformerAttention(nn.Module):
def __init__(self, seq_len, k=256, share_kv=False, softmax_temp=None, attention_dropout=0.0, device=None, dtype=None):
super().__init__()
self.seq_len = seq_len
self.share_kv = share_kv
self.proj_k = nn.Parameter(torch.empty(seq_len, k, device=device, dtype=dtype))
if (not share_kv):
self.proj_v = nn.Parameter(torch.empty(seq_len, k, device=device, dtype=dtype))
self.softmax_temp = softmax_temp
self.dropout = nn.Dropout(attention_dropout)
self.reset_parameters()
def reset_parameters(self):
dim = self.proj_k.shape[(- 1)]
std = (1 / math.sqrt(dim))
nn.init.normal_(self.proj_k, mean=0.0, std=std)
if (not self.share_kv):
nn.init.normal_(self.proj_v, mean=0.0, std=std)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
if (attn_mask is not None):
raise NotImplementedError('Linformer does not support attn_mask')
(B, T, H, E) = query.shape
(_, S_k, _, D) = key.shape
(_, S_v, _, D) = value.shape
softmax_temp = (self.softmax_temp or (1 / math.sqrt(E)))
assert ((S_k <= self.seq_len) and (S_v <= self.seq_len)), f'the sequence length of the key / value must be at most {self.seq_len}'
if ((key_padding_mask is not None) and (not key_padding_mask.all_ones)):
key = key.masked_fill(rearrange((~ key_padding_mask.bool_matrix), 'b s -> b s 1 1'), 0.0)
value = value.masked_fill(rearrange((~ key_padding_mask.bool_matrix), 'b s -> b s 1 1'), 0.0)
key = torch.einsum('bshd,sk->bkhd', key, (self.proj_k[:S_k] * softmax_temp))
value = torch.einsum('bshe,sk->bkhe', value, (self.proj_k[:S_v] if self.share_kv else self.proj_v[:S_v]))
QK = torch.einsum('bthe,bkhe->bhtk', query, key)
attn = torch.softmax(QK, dim=(- 1))
A = self.dropout(attn)
output = torch.einsum('bhtk,bkhd->bthd', A, value)
return (output, (attn if need_weights else None)) |
class TestTorchModel(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_1(self):
pt_file = '/tf_dataset2/inc-ut/nlptoolkit_ut_model/bert_mini_fp32.pt'
if is_win():
pt_file = 'D:\\dataset\\nlptoolkit_ut_model\\bert_mini_fp32.pt'
self.assertTrue(os.path.exists(pt_file), 'INT8 IR model is not found, please set your own model path!')
ids = torch.LongTensor([[1, 2, 3]])
tok = torch.zeros_like(ids)
att = torch.ones_like(ids)
traced_model = torch.jit.load(pt_file)
example_in = torch.rand(8, 128)
ref_out = traced_model(ids, tok, att, ids)[0].detach().numpy()
graph = compile(pt_file)
graph.save(file_name)
newgraph = Graph()
newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin'), load_weight=True)
self.assertTrue((newgraph.nodes[(- 1)].name == 'output_data'))
self.assertTrue((newgraph.nodes[(- 1)].input_tensors[(- 1)].name == '268'))
shutil.rmtree(file_name) |
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
(i, j, h, w) = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(size={self.size})' |
class pspnet_res18(nn.Module):
def __init__(self, num_classes=19):
super(pspnet_res18, self).__init__()
config_path = './IFR/configs/_base_/models/pspnet_r18-d8.py'
cfg = Config.fromfile(config_path)
self.model = build_segmentor(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
self.fix_backbone()
def fix_backbone(self):
for p in self.parameters():
p.requires_grad = False
for p in self.model.decode_head.conv_seg.parameters():
p.requires_grad = True
def forward(self, x):
with torch.no_grad():
feat = self.model.backbone(x)
feat = self.model.decode_head(feat, return_feat=True)
pred = self.model.decode_head.cls_seg(feat)
return pred |
def unique_months(arr):
months = set()
for d in arr:
months.add(d[:7])
return sorted(months) |
class DepthWrapper():
def __init__(self, optimizer: Optimizer):
self.cloud_publisher = rospy.Publisher('/transformed_depth_clouds', PointCloud2, queue_size=5)
rospy.wait_for_service('preprocess_cloud')
self.preprocess_cloud = rospy.ServiceProxy('preprocess_cloud', PreprocessCloud)
self.state_index = 1
self.optimizer = optimizer
self.submap_clouds = []
self.correspondence_threshold = 1.0
self.icp_noise_model = gtsam.noiseModel.Diagonal.Sigmas(np.ones((6,)))
self.baseTdepth = gtsam.Pose3()
self.depth_lock = Lock()
def preprocess_measurement(self, msg: PointCloud2, min_range=0, max_range=np.inf):
start = time.time()
response = self.preprocess_cloud(msg)
point_cloud = np.array([response.x, response.y, response.z])
end = time.time()
print(f'preprocessing time: {(end - start)} s')
self.publish_transformed_cloud(point_cloud, 0)
return point_cloud
def create_depth_factor(self, a: int, b: int, cloud_a: np.ndarray, cloud_b: np.ndarray, aTb_estimate=None):
if (not aTb_estimate):
if self.optimizer.results.exists(X(b)):
wTa = self.optimizer.results.atPose3(X(a))
wTb = self.optimizer.results.atPose3(X(b))
aTb_estimate = wTa.between(wTb)
elif ((a == 0) and (b == 1)):
aTb_estimate = gtsam.Pose3()
else:
wTp = self.optimizer.results.atPose3(X((a - 1)))
wTq = self.optimizer.results.atPose3(X((b - 1)))
aTb_estimate = wTp.between(wTq)
aTb_matrix = pygicp.align_points(cloud_a.T, cloud_b.T, max_correspondence_distance=self.correspondence_threshold, initial_guess=aTb_estimate.matrix(), k_correspondences=15, num_threads=4)
aTb = gtsam.Pose3(aTb_matrix)
factor = gtsam.BetweenFactorPose3(X(a), X(b), aTb, self.icp_noise_model)
wTa = self.optimizer.results.atPose3(X(a))
wTb_estimate = wTa.compose(aTb)
return (factor, wTb_estimate)
def depth_callback(self, msg: PointCloud2, imu=None):
aTb_estimate = None
(index_a, index_b) = ((self.state_index - 1), self.state_index)
if (imu and (len(self.submap_clouds) > 0)):
aTb_estimate = imu.create_and_add_factor(index_a, index_b)
min_range = rospy.get_param('/depth/min_range')
max_range = rospy.get_param('/depth/max_range')
cloud_b = self.preprocess_measurement(msg, min_range=min_range, max_range=max_range)
cloud_b = self.baseTdepth.transformFrom(cloud_b)
if (len(self.submap_clouds) == 0):
self.submap_clouds.append(cloud_b)
return
cloud_a = self.submap_clouds[(- 1)]
(factor, wTb_estimate) = self.create_depth_factor(index_a, index_b, cloud_a, cloud_b, aTb_estimate)
self.optimizer.add_factor(factor, (X(index_b), wTb_estimate))
self.optimizer.optimize()
self.submap_clouds.append(cloud_b)
with self.depth_lock:
current_state = self.state_index
submap = self.submap_clouds.copy()
if (len(submap) > 2):
self.create_skip_connections(submap, current_state)
self.optimizer.optimize()
if (len(submap) == rospy.get_param('/depth/submap_length')):
self.publish_transformed_cloud(submap[0], ((current_state - len(submap)) + 1))
self.state_index += 1
def create_skip_connections(self, submap, current_index):
submap_length = len(submap)
for i in range((submap_length - 2)):
index_a = ((current_index - i) - 2)
index_b = current_index
self.create_skip_connection(submap, index_a, index_b)
def create_skip_connection(self, clouds, index_a, index_b):
(cloud_a, cloud_b) = (clouds[(- ((index_b - index_a) + 1))], clouds[(- 1)])
(factor, _) = self.create_depth_factor(index_a, index_b, cloud_a, cloud_b)
self.optimizer.add_factor(factor)
def publish_transformed_cloud(self, bTcloud, index):
wTb = self.optimizer.results.atPose3(X(index))
wTcloud = wTb.transformFrom(bTcloud)
wTcloud = wTcloud.T
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
pcd = point_cloud2.create_cloud_xyz32(header, wTcloud)
self.cloud_publisher.publish(pcd)
def initialize_params(self):
self.submap_clouds = deque([], rospy.get_param('/depth/submap_length'))
icp_noise_sigmas = rospy.get_param('/depth/icp_noise')
self.icp_noise_model = gtsam.noiseModel.Diagonal.Sigmas(np.array([np.deg2rad(icp_noise_sigmas[3]), np.deg2rad(icp_noise_sigmas[4]), np.deg2rad(icp_noise_sigmas[5]), icp_noise_sigmas[0], icp_noise_sigmas[1], icp_noise_sigmas[2]]))
self.correspondence_threshold = rospy.get_param('/depth/correspondence_threshold')
self.method = rospy.get_param('/depth/registration_method')
tf_buffer = tf2_ros.Buffer()
tf2_ros.TransformListener(tf_buffer)
rospy.sleep(1)
baseTdepth = tf_buffer.lookup_transform('base_link', 'camera_depth_optical_frame', rospy.Time())
translation = np.array([baseTdepth.transform.translation.x, baseTdepth.transform.translation.y, baseTdepth.transform.translation.z])
quaternion = np.array([baseTdepth.transform.rotation.w, baseTdepth.transform.rotation.x, baseTdepth.transform.rotation.y, baseTdepth.transform.rotation.z])
self.baseTdepth = gtsam.Pose3(gtsam.Rot3.Quaternion(*quaternion), translation) |
def load_checkpoint_to_model(checkpoint, model):
with tempfile.NamedTemporaryFile(delete=False) as file:
torch.save(checkpoint, file.name)
del checkpoint
model.load_state_dict(torch.load(file.name), strict=False)
os.remove(file.name) |
def _load_library(filename, lib='op', load_fn=None):
f = inspect.getfile(sys._getframe(1))
f = os.path.join(os.path.dirname(f), filename)
suffix = get_suffix()
if os.path.exists((f + suffix)):
f = (f + suffix)
filenames = [f]
datapath = os.environ.get('TFPLUS_DATAPATH')
if (datapath is not None):
f = os.path.join(datapath, os.path.relpath(f, os.path.dirname(filename)))
suffix = get_suffix()
if os.path.exists((f + suffix)):
f = (f + suffix)
filenames.append(f)
load_fn = (load_fn or (tf.load_op_library if (lib == 'op') else (lambda f: (tf.compat.v1.load_file_system_library(f) is None))))
errs = []
for f in filenames:
try:
l = load_fn(f)
if (l is not None):
return l
except errors.NotFoundError as e:
errs.append(str(e))
raise NotImplementedError(('unable to open file: ' + '{}, from paths: {}\ncaused by: {}'.format(filename, filenames, errs))) |
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, prefix='Test: ')
model.eval()
with torch.no_grad():
for (i, (input, target)) in enumerate(val_loader):
if (i >= args.warmup_iter):
start = time.time()
if (args.gpu is not None):
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
if (i >= args.warmup_iter):
batch_time.update((time.time() - start))
if ((i % args.print_freq) == 0):
progress.print(i)
if ((args.iter > 0) and (i >= ((args.warmup_iter + args.iter) - 1))):
break
print(('Batch size = %d' % args.batch_size))
print('Accuracy: {top1:.5f} {top5:.5f}'.format(top1=(top1.avg / 100), top5=(top5.avg / 100)))
return (top1.avg / 100) |
class IBertPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Adam_LBFGS(Optimizer):
def __init__(self, params, switch_epoch=10000, adam_param={'lr': 0.001, 'betas': (0.9, 0.999)}, lbfgs_param={'lr': 1, 'max_iter': 20}):
self.params = list(params)
self.switch_epoch = switch_epoch
self.adam = torch.optim.Adam(self.params, **adam_param)
self.lbfgs = torch.optim.LBFGS(self.params, **lbfgs_param)
super().__init__(self.params, defaults={})
self.state['current_step'] = 0
def step(self, closure=None):
self.state['current_step'] += 1
if (self.state['current_step'] < self.switch_epoch):
self.adam.step(closure)
else:
self.lbfgs.step(closure)
if (self.state['current_step'] == self.switch_epoch):
print(f'Switch to LBFGS at epoch {self.switch_epoch}') |
def train_neighbor_model(embedding_data, K=500):
neighbor_model = NearestNeighbors(n_neighbors=500, algorithm='kd_tree', n_jobs=(- 1))
neighbor_model.fit(embedding_data)
dump(neighbor_model, 'models/neighbor_model.joblib')
return neighbor_model |
def test_chained_config_scopes_can_access_preset():
def cfg1(c):
a = (10 + c)
def cfg2(a, c):
b = ((a * 2) + c)
(final_cfg, summary) = chain_evaluate_config_scopes([cfg1, cfg2], preset={'c': 32})
assert (set(final_cfg.keys()) == {'a', 'b', 'c'})
assert (final_cfg['a'] == 42)
assert (final_cfg['b'] == 116)
assert (final_cfg['c'] == 32) |
def wresnet38(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, out_internals=False, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
internals = []
x = wResStem(x, 64, momentum, eps, use_global_stats, bn_data=True, name=name, lr_mult=lr_mult, reuse=reuse)
x = wResBlock(x, 3, 128, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '2'), lr_mult, reuse)
x = wResBlock(x, 3, 256, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '3'), lr_mult, reuse)
x = wResBlock(x, 6, 512, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '4'), lr_mult, reuse)
x = wResBlock(x, 3, 1024, 1, 2, False, 0, momentum, eps, use_global_stats, (name + '5'), lr_mult, reuse, mid_filter=512, fst_dilate=1)
internals.append(x)
x = wResBlock(x, 1, 2048, 1, 4, True, 0.3, momentum, eps, use_global_stats, (name + '6'), lr_mult, reuse)
internals.append(x)
x = wResBlock(x, 1, 4096, 1, 4, True, 0.5, momentum, eps, use_global_stats, (name + '7'), lr_mult, reuse)
internals.append(x)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn7'), lr_mult=lr_mult, reuse=reuse)
if out_internals:
return (x, internals)
else:
return x |
def resolve_act_layer(kwargs, default='relu'):
act_layer = kwargs.pop('act_layer', default)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
return act_layer |
class TestCombineValidSubsets(unittest.TestCase):
def _train(self, extra_flags):
with self.assertLogs() as logs:
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir, num_examples=20)
preprocess_lm_data(data_dir)
shutil.copyfile(f'{data_dir}/valid.bin', f'{data_dir}/valid1.bin')
shutil.copyfile(f'{data_dir}/valid.idx', f'{data_dir}/valid1.idx')
train_language_model(data_dir, 'transformer_lm', (['--max-update', '0', '--log-format', 'json'] + extra_flags), run_validation=False)
return [x.message for x in logs.records]
def test_combined(self):
flags = ['--combine-valid-subsets', '--required-batch-size-multiple', '1']
logs = self._train(flags)
assert any([('valid1' in x) for x in logs])
assert (not any([('valid1_ppl' in x) for x in logs]))
def test_subsets(self):
flags = ['--valid-subset', 'valid,valid1', '--required-batch-size-multiple', '1']
logs = self._train(flags)
assert any([('valid_ppl' in x) for x in logs])
assert any([('valid1_ppl' in x) for x in logs]) |
def test_group_reid():
n_samples = 30
n_features = 50
n_times = 10
sigma = 1.0
rho = 0.9
corr = toeplitz(np.geomspace(1, (rho ** (n_times - 1)), n_times))
cov = (np.outer(sigma, sigma) * corr)
support_size = 2
(X, Y, beta, noise) = multivariate_temporal_simulation(n_samples=n_samples, n_features=n_features, n_times=n_times, support_size=support_size, sigma=sigma, rho_noise=rho)
(cov_hat, _) = group_reid(X, Y, tol=0.001, max_iter=1)
error_ratio = (cov_hat / cov)
assert_almost_equal(np.max(error_ratio), 1.0, decimal=0)
assert_almost_equal(np.log(np.min(error_ratio)), 0.0, decimal=1)
(cov_hat, _) = group_reid(X, Y, method='AR')
error_ratio = (cov_hat / cov)
assert_almost_equal(np.max(error_ratio), 1.0, decimal=0)
assert_almost_equal(np.log(np.min(error_ratio)), 0.0, decimal=0)
support_size = 0
(X, Y, beta, noise) = multivariate_temporal_simulation(n_samples=n_samples, n_features=n_features, n_times=n_times, support_size=support_size, sigma=sigma, rho_noise=rho, seed=1)
(cov_hat, _) = group_reid(X, Y)
error_ratio = (cov_hat / cov)
assert_almost_equal(np.max(error_ratio), 1.0, decimal=0)
assert_almost_equal(np.log(np.min(error_ratio)), 0.0, decimal=1)
(cov_hat, _) = group_reid(X, Y, fit_Y=False, stationary=False)
error_ratio = (cov_hat / cov)
assert_almost_equal(np.max(error_ratio), 1.0, decimal=0)
assert_almost_equal(np.log(np.min(error_ratio)), 0.0, decimal=0)
(cov_hat, _) = group_reid(X, Y, method='AR')
error_ratio = (cov_hat / cov)
assert_almost_equal(np.max(error_ratio), 1.0, decimal=0)
assert_almost_equal(np.log(np.min(error_ratio)), 0.0, decimal=1) |
class SuperMobileSPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc, nhidden=128):
super(SuperMobileSPADE, self).__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\\D+)(\\d)x\\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if (param_free_norm_type == 'instance'):
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif (param_free_norm_type == 'syncbatch'):
self.param_free_norm = SuperSynchronizedBatchNorm2d(norm_nc, affine=False)
elif (param_free_norm_type == 'batch'):
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError(('%s is not a recognized param-free norm type in SPADE' % param_free_norm_type))
pw = (ks // 2)
self.mlp_shared = nn.Sequential(SuperConv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU())
self.mlp_gamma = SuperSeparableConv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = SuperSeparableConv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap, config, verbose=False):
normalized = self.param_free_norm(x, config)
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
channel = config['hidden']
actv = self.mlp_shared[0](segmap, {'channel': channel})
actv = self.mlp_shared[1](actv)
gamma = self.mlp_gamma(actv, {'channel': x.shape[1]})
beta = self.mlp_beta(actv, {'channel': x.shape[1]})
out = ((normalized * (1 + gamma)) + beta)
return out |
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
for (module, alias) in evaluation_imports().iteritems():
globals()[alias] = importlib.import_module(module)
def polygon_from_points(points):
num_points = len(points)
resBoxes = np.empty([1, num_points], dtype='float32')
for inp in range(0, num_points, 2):
resBoxes[(0, (inp / 2))] = float(points[inp])
resBoxes[(0, ((inp / 2) + (num_points / 2)))] = float(points[(inp + 1)])
pointMat = resBoxes[0].reshape([2, (num_points / 2)]).T
return plg.Polygon(pointMat)
def rectangle_to_polygon(rect):
resBoxes = np.empty([1, 8], dtype='int32')
resBoxes[(0, 0)] = int(rect.xmin)
resBoxes[(0, 4)] = int(rect.ymax)
resBoxes[(0, 1)] = int(rect.xmin)
resBoxes[(0, 5)] = int(rect.ymin)
resBoxes[(0, 2)] = int(rect.xmax)
resBoxes[(0, 6)] = int(rect.ymin)
resBoxes[(0, 3)] = int(rect.xmax)
resBoxes[(0, 7)] = int(rect.ymax)
pointMat = resBoxes[0].reshape([2, 4]).T
return plg.Polygon(pointMat)
def rectangle_to_points(rect):
points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
return points
def get_union(pD, pG):
areaA = pD.area()
areaB = pG.area()
return ((areaA + areaB) - get_intersection(pD, pG))
def get_intersection_over_union(pD, pG):
try:
return (get_intersection(pD, pG) / get_union(pD, pG))
except:
return 0
def funcCt(x):
if (x <= 0.01):
return 1
else:
return (1 - x)
def get_text_intersection_over_union_recall(pD, pG):
try:
Ct = (pG.area() - get_intersection(pD, pG))
assert ((Ct >= 0) and (Ct <= pG.area())), 'Invalid Ct value'
assert (pG.area() > 0), 'Invalid Gt'
return ((get_intersection(pD, pG) * funcCt(((Ct * 1.0) / pG.area()))) / get_union(pD, pG))
except Exception as e:
return 0
def funcOt(x):
if (x <= 0.01):
return 1
else:
return (1 - x)
def get_text_intersection_over_union_precision(pD, pG, gtNum, gtPolys, gtDontCarePolsNum):
Ot = 0
try:
inside_pG = (pD & pG)
gt_union_inside_pD = None
gt_union_inside_pD_and_pG = None
count_initial = 0
for i in xrange(len(gtPolys)):
if ((i != gtNum) and (gtNum not in gtDontCarePolsNum)):
if (not (get_intersection(pD, gtPolys[i]) == 0)):
if (count_initial == 0):
gt_union_inside_pD = gtPolys[i]
gt_union_inside_pD_and_pG = (inside_pG & gtPolys[i])
count_initial = 1
continue
gt_union_inside_pD = (gt_union_inside_pD | gtPolys[i])
inside_pG_i = (inside_pG & gtPolys[i])
gt_union_inside_pD_and_pG = (gt_union_inside_pD_and_pG | inside_pG_i)
if (not (gt_union_inside_pD == None)):
pD_union_with_other_gt = (pD & gt_union_inside_pD)
Ot = (pD_union_with_other_gt.area() - gt_union_inside_pD_and_pG.area())
if (Ot <= 1e-10):
Ot = 0
else:
Ot = 0
assert ((Ot >= 0) and (Ot <= pD.area()))
assert (pD.area() > 0)
return ((get_intersection(pD, pG) * funcOt(((Ot * 1.0) / pD.area()))) / get_union(pD, pG))
except Exception as e:
return 0
def get_intersection(pD, pG):
pInt = (pD & pG)
if (len(pInt) == 0):
return 0
return pInt.area()
def get_intersection_three(pD, pG, pGi):
pInt = (pD & pG)
pInt_3 = (pInt & pGi)
if (len(pInt_3) == 0):
return 0
return pInt_3.area()
def compute_ap(confList, matchList, numGtCare):
correct = 0
AP = 0
if (len(confList) > 0):
confList = np.array(confList)
matchList = np.array(matchList)
sorted_ind = np.argsort((- confList))
confList = confList[sorted_ind]
matchList = matchList[sorted_ind]
for n in range(len(confList)):
match = matchList[n]
if match:
correct += 1
AP += (float(correct) / (n + 1))
if (numGtCare > 0):
AP /= numGtCare
return AP
perSampleMetrics = {}
matchedSum = 0
matchedSum_iou = 0
matchedSum_tiouGt = 0
matchedSum_tiouDt = 0
matchedSum_cutGt = 0
matchedSum_coverOtherGt = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
numGlobalCareGt = 0
numGlobalCareDet = 0
arrGlobalConfidences = []
arrGlobalMatches = []
totalNumGtPols = 0
totalNumDetPols = 0
fper_ = open('per_samle_result.txt', 'w')
for resFile in gt:
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
recall = 0
precision = 0
hmean = 0
detMatched = 0
detMatched_iou = 0
detMatched_tiouGt = 0
detMatched_tiouDt = 0
detMatched_cutGt = 0
detMatched_coverOtherGt = 0
iouMat = np.empty([1, 1])
gtPols = []
detPols = []
gtPolPoints = []
detPolPoints = []
gtDontCarePolsNum = []
detDontCarePolsNum = []
pairs = []
detMatchedNums = []
arrSampleConfidences = []
arrSampleMatch = []
sampleAP = 0
evaluationLog = ''
(pointsList, _, transcriptionsList) = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile, evaluationParams['CRLF'], evaluationParams['LTRB'], True, False)
for n in range(len(pointsList)):
points = pointsList[n]
transcription = transcriptionsList[n]
dontCare = (transcription == '###')
if evaluationParams['LTRB']:
gtRect = Rectangle(*points)
gtPol = rectangle_to_polygon(gtRect)
else:
gtPol = polygon_from_points(points)
gtPols.append(gtPol)
gtPolPoints.append(points)
if dontCare:
gtDontCarePolsNum.append((len(gtPols) - 1))
evaluationLog += (('GT polygons: ' + str(len(gtPols))) + (((' (' + str(len(gtDontCarePolsNum))) + " don't care)\n") if (len(gtDontCarePolsNum) > 0) else '\n'))
if (resFile in subm):
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
(pointsList, confidencesList, _) = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile, evaluationParams['CRLF'], evaluationParams['LTRB'], False, evaluationParams['CONFIDENCES'])
for n in range(len(pointsList)):
points = pointsList[n]
if evaluationParams['LTRB']:
detRect = Rectangle(*points)
detPol = rectangle_to_polygon(detRect)
else:
detPol = polygon_from_points(points)
detPols.append(detPol)
detPolPoints.append(points)
if (len(gtDontCarePolsNum) > 0):
for dontCarePol in gtDontCarePolsNum:
dontCarePol = gtPols[dontCarePol]
intersected_area = get_intersection(dontCarePol, detPol)
pdDimensions = detPol.area()
precision = (0 if (pdDimensions == 0) else (intersected_area / pdDimensions))
if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT']):
detDontCarePolsNum.append((len(detPols) - 1))
break
evaluationLog += (('DET polygons: ' + str(len(detPols))) + (((' (' + str(len(detDontCarePolsNum))) + " don't care)\n") if (len(detDontCarePolsNum) > 0) else '\n'))
if ((len(gtPols) > 0) and (len(detPols) > 0)):
outputShape = [len(gtPols), len(detPols)]
iouMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtPols), np.int8)
detRectMat = np.zeros(len(detPols), np.int8)
tiouRecallMat = np.empty(outputShape)
tiouPrecisionMat = np.empty(outputShape)
tiouGtRectMat = np.zeros(len(gtPols), np.int8)
tiouDetRectMat = np.zeros(len(detPols), np.int8)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
pG = gtPols[gtNum]
pD = detPols[detNum]
iouMat[(gtNum, detNum)] = get_intersection_over_union(pD, pG)
tiouRecallMat[(gtNum, detNum)] = get_text_intersection_over_union_recall(pD, pG)
tiouPrecisionMat[(gtNum, detNum)] = get_text_intersection_over_union_precision(pD, pG, gtNum, gtPols, gtDontCarePolsNum)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
if ((gtRectMat[gtNum] == 0) and (detRectMat[detNum] == 0) and (gtNum not in gtDontCarePolsNum) and (detNum not in detDontCarePolsNum)):
if (iouMat[(gtNum, detNum)] > evaluationParams['IOU_CONSTRAINT']):
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
detMatched += 1
detMatched_iou += iouMat[(gtNum, detNum)]
detMatched_tiouGt += tiouRecallMat[(gtNum, detNum)]
detMatched_tiouDt += tiouPrecisionMat[(gtNum, detNum)]
if (iouMat[(gtNum, detNum)] != tiouRecallMat[(gtNum, detNum)]):
detMatched_cutGt += 1
if (iouMat[(gtNum, detNum)] != tiouPrecisionMat[(gtNum, detNum)]):
detMatched_coverOtherGt += 1
pairs.append({'gt': gtNum, 'det': detNum})
detMatchedNums.append(detNum)
evaluationLog += (((('Match GT #' + str(gtNum)) + ' with Det #') + str(detNum)) + '\n')
if evaluationParams['CONFIDENCES']:
for detNum in range(len(detPols)):
if (detNum not in detDontCarePolsNum):
match = (detNum in detMatchedNums)
arrSampleConfidences.append(confidencesList[detNum])
arrSampleMatch.append(match)
arrGlobalConfidences.append(confidencesList[detNum])
arrGlobalMatches.append(match)
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
numDetCare = (len(detPols) - len(detDontCarePolsNum))
if (numGtCare == 0):
recall = float(1)
precision = (float(0) if (numDetCare > 0) else float(1))
sampleAP = precision
tiouRecall = float(1)
tiouPrecision = (float(0) if (numDetCare > 0) else float(1))
else:
recall = (float(detMatched) / numGtCare)
precision = (0 if (numDetCare == 0) else (float(detMatched) / numDetCare))
iouRecall = (float(detMatched_iou) / numGtCare)
iouPrecision = (0 if (numDetCare == 0) else (float(detMatched_iou) / numDetCare))
tiouRecall = (float(detMatched_tiouGt) / numGtCare)
tiouPrecision = (0 if (numDetCare == 0) else (float(detMatched_tiouDt) / numDetCare))
if (evaluationParams['CONFIDENCES'] and evaluationParams['PER_SAMPLE_RESULTS']):
sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare)
hmean = (0 if ((precision + recall) == 0) else (((2.0 * precision) * recall) / (precision + recall)))
tiouHmean = (0 if ((tiouPrecision + tiouRecall) == 0) else (((2.0 * tiouPrecision) * tiouRecall) / (tiouPrecision + tiouRecall)))
iouHmean = (0 if ((iouPrecision + iouRecall) == 0) else (((2.0 * iouPrecision) * iouRecall) / (iouPrecision + iouRecall)))
matchedSum += detMatched
matchedSum_iou += detMatched_iou
matchedSum_tiouGt += detMatched_tiouGt
matchedSum_tiouDt += detMatched_tiouDt
matchedSum_cutGt += detMatched_cutGt
matchedSum_coverOtherGt += detMatched_coverOtherGt
numGlobalCareGt += numGtCare
numGlobalCareDet += numDetCare
if evaluationParams['PER_SAMPLE_RESULTS']:
perSampleMetrics[resFile] = {'precision': precision, 'recall': recall, 'hmean': hmean, 'iouPrecision': iouPrecision, 'iouRecall': iouRecall, 'iouHmean': iouHmean, 'tiouPrecision': tiouPrecision, 'tiouRecall': tiouRecall, 'tiouHmean': tiouHmean, 'pairs': pairs, 'AP': sampleAP, 'iouMat': ([] if (len(detPols) > 100) else iouMat.tolist()), 'gtPolPoints': gtPolPoints, 'detPolPoints': detPolPoints, 'gtDontCare': gtDontCarePolsNum, 'detDontCare': detDontCarePolsNum, 'evaluationParams': evaluationParams, 'evaluationLog': evaluationLog}
fper_.writelines((resFile + '\t"IoU: (P: {:.3f}. R: {:.3f}. F: {:.3f})",\t"TIoU: (P: {:.3f}. R: {:.3f}. F: {:.3f})".\n'.format(precision, recall, hmean, tiouPrecision, tiouRecall, tiouHmean)))
try:
totalNumGtPols += len(gtPols)
totalNumDetPols += len(detPols)
except Exception as e:
raise e
fper_.close()
AP = 0
if evaluationParams['CONFIDENCES']:
AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)
print('num_gt, num_det: ', numGlobalCareGt, totalNumDetPols)
methodRecall = (0 if (numGlobalCareGt == 0) else (float(matchedSum) / numGlobalCareGt))
methodPrecision = (0 if (numGlobalCareDet == 0) else (float(matchedSum) / numGlobalCareDet))
methodHmean = (0 if ((methodRecall + methodPrecision) == 0) else (((2 * methodRecall) * methodPrecision) / (methodRecall + methodPrecision)))
methodRecall_iou = (0 if (numGlobalCareGt == 0) else (float(matchedSum_iou) / numGlobalCareGt))
methodPrecision_iou = (0 if (numGlobalCareDet == 0) else (float(matchedSum_iou) / numGlobalCareDet))
iouMethodHmean = (0 if ((methodRecall_iou + methodPrecision_iou) == 0) else (((2 * methodRecall_iou) * methodPrecision_iou) / (methodRecall_iou + methodPrecision_iou)))
methodRecall_tiouGt = (0 if (numGlobalCareGt == 0) else (float(matchedSum_tiouGt) / numGlobalCareGt))
methodPrecision_tiouDt = (0 if (numGlobalCareDet == 0) else (float(matchedSum_tiouDt) / numGlobalCareDet))
tiouMethodHmean = (0 if ((methodRecall_tiouGt + methodPrecision_tiouDt) == 0) else (((2 * methodRecall_tiouGt) * methodPrecision_tiouDt) / (methodRecall_tiouGt + methodPrecision_tiouDt)))
methodMetrics = {'precision': methodPrecision, 'recall': methodRecall, 'hmean': methodHmean}
iouMethodMetrics = {'iouPrecision': methodPrecision_iou, 'iouRecall': methodRecall_iou, 'iouHmean': iouMethodHmean}
tiouMethodMetrics = {'tiouPrecision': methodPrecision_tiouDt, 'tiouRecall': methodRecall_tiouGt, 'tiouHmean': tiouMethodHmean}
print('Origin:')
print('recall: ', round(methodRecall, 4), 'precision: ', round(methodPrecision, 4), 'hmean: ', round(methodHmean, 4))
print('TIoU-metric:')
print('tiouRecall:', round(methodRecall_tiouGt, 3), 'tiouPrecision:', round(methodPrecision_tiouDt, 3), 'tiouHmean:', round(tiouMethodHmean, 3))
resDict = {'calculated': True, 'Message': '', 'method': methodMetrics, 'per_sample': perSampleMetrics, 'iouMethod': iouMethodMetrics, 'tiouMethod': tiouMethodMetrics}
return resDict |
def PNBI_np(pred, true, mask_value=None):
if (mask_value != None):
mask = np.where((true > mask_value), True, False)
true = true[mask]
pred = pred[mask]
bias = (pred - true)
indicator = np.where((bias > 0), True, False)
return indicator.mean() |
_lr_scheduler('manual')
class ManualSchedule(LegacyFairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
self.epoch2lr = self.parse_manuallr_args(args.epoch2lr)
self.update2lr = self.parse_manuallr_args(args.update2lr)
logger.info(' ManualSchedule epoch2lr={}'.format(self.epoch2lr))
logger.info(' ManualSchedule update2lr={}'.format(self.update2lr))
if (1 in self.epoch2lr):
self.lr = self.epoch2lr[1]
elif (1 in self.update2lr):
self.lr = self.update2lr[1]
else:
self.lr = args.lr[0]
self.optimizer.set_lr(self.lr)
def parse_manuallr_args(self, lr_args_str):
lr_dict = ast.literal_eval(lr_args_str.replace(' ', ''))
if (not isinstance(lr_dict, dict)):
raise ValueError('epoch2lr/update2lr must be abel to evaluated to a dict')
lr_args = {}
logger.info(' after parsing input dictionary lr_dict = {}'.format(lr_dict))
for (key, val) in lr_dict.items():
if (',' in key):
for k in key.split(','):
lr_args[int(k)] = float(val)
elif ('-' in key):
s = int(key.split('-')[0])
e = int(key.split('-')[1])
for k in range(s, (e + 1), 1):
lr_args[k] = float(val)
else:
lr_args[int(key)] = float(val)
return lr_args
def add_args(parser):
parser.add_argument('--epoch2lr', type=str, metavar='DICT', default='{}', help='a dictionary used to set lr for each epoch manually')
parser.add_argument('--update2lr', type=str, metavar='DICT', default='{}', help='a dictionary used to set lr for each update manually')
def state_dict(self):
return {'lr': self.lr}
def load_state_dict(self, state_dict):
if ('lr' in state_dict):
self.lr = state_dict['lr']
def get_next_lr(self, epoch):
manual_keys = [k for k in self.epoch2lr if (k <= epoch)]
if manual_keys:
manual_lr = self.epoch2lr[max(manual_keys)]
else:
logger.warning(' epoch={} does not exist in manual lr input. epoch2lr={}...'.format(epoch, list(self.epoch2lr.items())[:min(10, (len(self.epoch2lr.keys()) - 1))]))
manual_lr = self.optimizer.get_lr()
return manual_lr
def step_begin_epoch(self, epoch):
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
manual_keys = [k for k in self.update2lr if (k <= num_updates)]
if manual_keys:
manual_lr = self.update2lr[max(manual_keys)]
else:
logger.warning('epoch={} does not exist in manual lr input update2lr={}...'.format(num_updates, list(self.update2lr.items())[:min(10, (len(self.update2lr.keys()) - 1))]))
manual_lr = self.optimizer.get_lr()
self.optimizer.set_lr(manual_lr)
return self.optimizer.get_lr() |
_module()
class Equalize(ColorTransform):
def _transform_img(self, results: dict, mag: float) -> None:
img = results['img']
results['img'] = mmcv.imequalize(img).astype(img.dtype) |
def comparison_negative(logical_line):
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if (match.group(2) == 'in'):
(yield (pos, "E713 test for membership should be 'not in'"))
else:
(yield (pos, "E714 test for object identity should be 'is not'")) |
def get_all_registered_configs() -> Dict[(str, BaseConfig)]:
return registered_configs.get(FRAMEWORK_NAME, {}) |
def build_categorical_aleatoric_loss(samples):
def categorical_aleatoric_loss(y_true, y_pred):
logits = y_pred[(..., 0)]
sigma = y_pred[(..., 1)]
simulations = ([None] * samples)
for sample in range(samples):
epsilon_t = K.random_normal(K.shape(sigma))
x = activations.softmax((logits + (sigma * epsilon_t)))
x_c = K.max((x * y_true), axis=(- 1))
simulations[sample] = (x_c - K.logsumexp(x, axis=(- 1)))
simulations = K.concatenate(simulations)
loss = (K.logsumexp(simulations, axis=(- 1)) - K.log(float(samples)))
return K.sum(loss)
return categorical_aleatoric_loss |
class SpeakerClassifier(nn.Module):
def __init__(self, c_in=512, c_h=512, n_class=8, dp=0.1, ns=0.01):
super(SpeakerClassifier, self).__init__()
(self.dp, self.ns) = (dp, ns)
self.conv1 = nn.Conv1d(c_in, c_h, kernel_size=5)
self.conv2 = nn.Conv1d(c_h, c_h, kernel_size=5)
self.conv3 = nn.Conv1d(c_h, c_h, kernel_size=5)
self.conv4 = nn.Conv1d(c_h, c_h, kernel_size=5)
self.conv5 = nn.Conv1d(c_h, c_h, kernel_size=5)
self.conv6 = nn.Conv1d(c_h, c_h, kernel_size=5)
self.conv7 = nn.Conv1d(c_h, (c_h // 2), kernel_size=3)
self.conv8 = nn.Conv1d((c_h // 2), (c_h // 4), kernel_size=3)
self.conv9 = nn.Conv1d((c_h // 4), n_class, kernel_size=16)
self.drop1 = nn.Dropout(p=dp)
self.drop2 = nn.Dropout(p=dp)
self.drop3 = nn.Dropout(p=dp)
self.drop4 = nn.Dropout(p=dp)
self.ins_norm1 = nn.InstanceNorm1d(c_h)
self.ins_norm2 = nn.InstanceNorm1d(c_h)
self.ins_norm3 = nn.InstanceNorm1d(c_h)
self.ins_norm4 = nn.InstanceNorm1d((c_h // 4))
def conv_block(self, x, conv_layers, after_layers, res=True):
out = x
for layer in conv_layers:
out = pad_layer(out, layer)
out = F.leaky_relu(out, negative_slope=self.ns)
for layer in after_layers:
out = layer(out)
if res:
out = (out + x)
return out
def forward(self, x):
out = self.conv_block(x, [self.conv1, self.conv2], [self.ins_norm1, self.drop1], res=False)
out = self.conv_block(out, [self.conv3, self.conv4], [self.ins_norm2, self.drop2], res=True)
out = self.conv_block(out, [self.conv5, self.conv6], [self.ins_norm3, self.drop3], res=True)
out = self.conv_block(out, [self.conv7, self.conv8], [self.ins_norm4, self.drop4], res=False)
out = self.conv9(out)
out = out.view(out.size()[0], (- 1))
return out |
_tf2
class TestTCNForecaster(TestCase):
def setUp(self):
from bigdl.chronos.forecaster.tf.tcn_forecaster import TCNForecaster
self.forecaster = TCNForecaster(past_seq_len=10, future_seq_len=2, input_feature_num=10, output_feature_num=2, num_channels=([15] * 7))
def tearDown(self):
del self.forecaster
def test_tcn_forecaster_fit_predict_evaluate(self):
(train_data, _, test_data) = create_data()
self.forecaster.fit(train_data, epochs=2, batch_size=32)
yhat = self.forecaster.predict(test_data[0], batch_size=32)
assert (yhat.shape == (400, 2, 2))
mse = self.forecaster.evaluate(test_data, batch_size=32, multioutput='raw_values')
assert (mse[0].shape == test_data[1].shape[1:])
def test_tcn_forecaster_fit_predict_evaluate_normalization(self):
(train_data, _, test_data) = create_data()
forecaster = TCNForecaster(past_seq_len=10, future_seq_len=2, input_feature_num=10, output_feature_num=2, num_channels=([15] * 7), normalization=True)
forecaster.fit(train_data, epochs=2, batch_size=32)
yhat = forecaster.predict(test_data[0], batch_size=32)
assert (yhat.shape == (400, 2, 2))
mse = forecaster.evaluate(test_data, batch_size=32, multioutput='raw_values')
assert (mse[0].shape == test_data[1].shape[1:])
def test_tcn_forecaster_evaluate(self):
(train_tsdata, _, test_tsdata) = create_tsdataset()
forecaster = TCNForecaster.from_tsdataset(train_tsdata, past_seq_len=24, future_seq_len=5)
forecaster.fit(train_tsdata, epochs=1, batch_size=32)
test = test_tsdata.to_tf_dataset(batch_size=32)
metrics = forecaster.evaluate(test, multioutput='uniform_average')
metrics_tsdata = forecaster.evaluate(test_tsdata, multioutput='uniform_average')
np.testing.assert_almost_equal(metrics, metrics_tsdata, decimal=5)
test_data = test_tsdata.to_numpy()
metrics_data = forecaster.evaluate(test_data, multioutput='uniform_average')
np.testing.assert_almost_equal(metrics_data, metrics_tsdata, decimal=5)
def test_tcn_forecaster_fit_tf_data(self):
(train_data, _, test_data) = create_data(tf_data=True)
self.forecaster.fit(train_data, epochs=2, batch_size=32)
yhat = self.forecaster.predict(test_data)
assert (yhat.shape == (400, 2, 2))
def test_tcn_forecaster_save_load(self):
(train_data, _, test_data) = create_data()
self.forecaster.fit(train_data, epochs=2)
yhat = self.forecaster.predict(test_data[0])
with tempfile.TemporaryDirectory() as tmp_dir_file:
tmp_dir_file = os.path.join(tmp_dir_file, 'tcn.ckpt')
self.forecaster.save(tmp_dir_file)
self.forecaster.load(tmp_dir_file)
from bigdl.chronos.model.tf2.TCN_keras import TemporalConvNet
assert isinstance(self.forecaster.internal, TemporalConvNet)
load_model_yhat = self.forecaster.predict(test_data[0])
assert (yhat.shape == (400, 2, 2))
np.testing.assert_almost_equal(yhat, load_model_yhat, decimal=5)
def test_tcn_customized_loss_metric(self):
(train_data, _, test_data) = create_data(tf_data=True)
loss = tf.keras.losses.MeanSquaredError()
def customized_metric(y_true, y_pred):
return tf.keras.losses.MeanSquaredError(tf.convert_to_tensor(y_pred), tf.convert_to_tensor(y_true)).numpy()
from bigdl.chronos.forecaster.tf.tcn_forecaster import TCNForecaster
self.forecaster = TCNForecaster(past_seq_len=10, future_seq_len=2, input_feature_num=10, output_feature_num=2, num_channels=([15] * 7), loss=loss, metrics=[customized_metric], lr=0.01)
self.forecaster.fit(train_data, epochs=2, batch_size=32)
yhat = self.forecaster.predict(test_data)
with tempfile.TemporaryDirectory() as tmp_dir_file:
tmp_dir_file = os.path.join(tmp_dir_file, 'tcn.ckpt')
self.forecaster.save(tmp_dir_file)
self.forecaster.load(tmp_dir_file)
from bigdl.chronos.model.tf2.TCN_keras import TemporalConvNet
assert isinstance(self.forecaster.internal, TemporalConvNet)
load_model_yhat = self.forecaster.predict(test_data)
assert (yhat.shape == (400, 2, 2))
np.testing.assert_almost_equal(yhat, load_model_yhat, decimal=5)
def test_tcn_from_tsdataset(self):
(train, _, test) = create_tsdataset(roll=True)
tcn = TCNForecaster.from_tsdataset(train, num_channels=([16] * 2))
tcn.fit(train, epochs=2, batch_size=32)
yhat = tcn.predict(test, batch_size=32)
test.roll(lookback=tcn.model_config['past_seq_len'], horizon=tcn.model_config['future_seq_len'])
(_, y_test) = test.to_numpy()
assert (yhat.shape == y_test.shape)
del tcn
(train, _, test) = create_tsdataset(roll=False)
tcn = TCNForecaster.from_tsdataset(train, past_seq_len=24, future_seq_len=5, num_channels=([16] * 2))
tcn.fit(train, epochs=2, batch_size=32)
yhat = tcn.predict(test, batch_size=None)
test.roll(lookback=tcn.model_config['past_seq_len'], horizon=tcn.model_config['future_seq_len'])
(_, y_test) = test.to_numpy()
assert (yhat.shape == y_test.shape)
_distributed
def test_tcn_forecaster_distributed(self):
from bigdl.orca import init_orca_context, stop_orca_context
(train_data, val_data, test_data) = create_data()
init_orca_context(cores=4, memory='4g')
forecaster = TCNForecaster(past_seq_len=10, future_seq_len=2, input_feature_num=10, output_feature_num=2, kernel_size=3, lr=0.001, distributed=True)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data[0])
distributed_eval = forecaster.evaluate(val_data)
model = forecaster.get_model()
from bigdl.chronos.model.tf2.TCN_keras import TemporalConvNet
assert isinstance(model, TemporalConvNet)
with tempfile.TemporaryDirectory() as tmp_file_name:
name = os.path.join(tmp_file_name, 'tcn.ckpt')
test_pred_save = forecaster.predict(test_data[0])
forecaster.save(name)
forecaster.load(name)
test_pred_load = forecaster.predict(test_data[0])
np.testing.assert_almost_equal(test_pred_save, test_pred_load)
forecaster.to_local()
local_pred = forecaster.predict(test_data[0])
local_eval = forecaster.evaluate(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred, decimal=5)
stop_orca_context()
_distributed
def test_tcn_forecaster_distributed_illegal_input(self):
from bigdl.orca import init_orca_context, stop_orca_context
init_orca_context(cores=4, memory='4g')
forecaster = TCNForecaster(past_seq_len=10, future_seq_len=2, input_feature_num=2, output_feature_num=2, kernel_size=3, lr=0.001, distributed=True)
(train_data, _, test_data) = create_data(tf_data=True)
(ts_train, _, ts_test) = create_tsdataset(roll=False)
(_, y_test) = ts_test.roll(lookback=10, horizon=2).to_numpy()
forecaster.fit(ts_train, epochs=2)
yhat = forecaster.predict(ts_test)
assert (yhat.shape == y_test.shape)
res = forecaster.evaluate(ts_test)
with pytest.raises(RuntimeError):
forecaster.fit(train_data)
with pytest.raises(RuntimeError):
forecaster.evaluate(test_data)
stop_orca_context()
_inference
def test_tcn_keras_forecaster_quantization(self):
(train_data, _, test_data) = create_data()
forecaster = TCNForecaster(past_seq_len=10, future_seq_len=2, input_feature_num=10, output_feature_num=2)
forecaster.fit(train_data, epochs=1)
forecaster.quantize(input_data=train_data[0], target_data=train_data[1])
assert forecaster.accelerated_model
assert (forecaster.accelerate_method == 'tensorflow_int8')
pred_q = forecaster.predict(test_data[0], quantize=True)
eval_q = forecaster.evaluate(test_data, quantize=True)
assert (pred_q.shape == test_data[1].shape) |
class TwoWayABlock(nn.Module):
def __init__(self):
super(TwoWayABlock, self).__init__()
in_channels = 384
self.branches = Concurrent()
self.branches.add_module('branch1', ConvSeqBranch(in_channels=in_channels, out_channels_list=(32, 48, 64), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1)))
self.branches.add_module('branch2', ConvSeqBranch(in_channels=in_channels, out_channels_list=(32, 32), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1)))
self.branches.add_module('branch3', Conv1x1Branch(in_channels=in_channels, out_channels=32))
self.conv = conv1x1_block(in_channels=128, out_channels=in_channels, activation=None)
def forward(self, x):
x = self.branches(x)
x = self.conv(x)
return x |
def get_available_video_models():
return [k for (k, v) in models.video.__dict__.items() if (callable(v) and (k[0].lower() == k[0]) and (k[0] != '_'))] |
def test_list_insert():
run_cell('lst = [0, 1, 2, 4, 5, 6]')
name = lookup_symbol(2).readable_name
assert (name == 'lst[2]'), ('got %s' % name)
name = lookup_symbol(4).readable_name
assert (name == 'lst[3]'), ('got %s' % name)
sym = lookup_symbol(3)
assert (sym is None)
run_cell('lst.insert(3, 3)')
sym_2 = lookup_symbol(2)
assert (sym_2.readable_name == 'lst[2]'), ('got %s' % sym_2.readable_name)
assert (sym_2.obj == 2)
sym_3 = lookup_symbol(3)
assert (sym_3.readable_name == 'lst[3]'), ('got %s' % sym_3.readable_name)
assert (sym_3.obj == 3)
sym_4 = lookup_symbol(4)
assert (sym_4.readable_name == 'lst[4]'), ('got %s' % sym_4.readable_name)
assert (sym_4.obj == 4)
assert (sym_4.containing_namespace.lookup_data_symbol_by_name_this_indentation(4, is_subscript=True) is sym_4) |
class NoopProgressBar(BaseProgressBar):
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
(yield obj)
def log(self, stats, tag=None, step=None):
pass
def print(self, stats, tag=None, step=None):
pass |
class BertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
return (([self._convert_token_to_id(self.cls_token)] + token_ids) + [self._convert_token_to_id(self.sep_token)])
def add_special_tokens_sentences_pair(self, *token_ids):
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return ((((cls + token_ids[0]) + sep) + token_ids[1]) + sep)
def save_vocabulary(self, vocab_path):
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + u'\n'))
index += 1
return (vocab_file,)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
if (pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES):
if (('-cased' in pretrained_model_name_or_path) and kwargs.get('do_lower_case', True)):
logger.warning('The pre-trained model you are loading is a cased model but you have not set `do_lower_case` to False. We are setting `do_lower_case=False` for you but you may want to check this behavior.')
kwargs['do_lower_case'] = False
elif (('-cased' not in pretrained_model_name_or_path) and (not kwargs.get('do_lower_case', True))):
logger.warning('The pre-trained model you are loading is an uncased model but you have set `do_lower_case` to False. We are setting `do_lower_case=True` for you but you may want to check this behavior.')
kwargs['do_lower_case'] = True
return super(BertTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) |
_module()
class StandardRoIHead(BaseRoIHead):
def init_assigner_sampler(self) -> None:
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner)
self.bbox_sampler = TASK_UTILS.build(self.train_cfg.sampler, default_args=dict(context=self))
def init_bbox_head(self, bbox_roi_extractor: ConfigType, bbox_head: ConfigType) -> None:
self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor)
self.bbox_head = MODELS.build(bbox_head)
def init_mask_head(self, mask_roi_extractor: ConfigType, mask_head: ConfigType) -> None:
if (mask_roi_extractor is not None):
self.mask_roi_extractor = MODELS.build(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = MODELS.build(mask_head)
def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList=None) -> tuple:
results = ()
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
rois = bbox2roi(proposals)
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
results = (results + (bbox_results['cls_score'], bbox_results['bbox_pred']))
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
results = (results + (mask_results['mask_preds'],))
return results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: List[DetDataSample]) -> dict:
assert (len(rpn_results_list) == len(batch_data_samples))
outputs = unpack_gt_instances(batch_data_samples)
(batch_gt_instances, batch_gt_instances_ignore, _) = outputs
num_imgs = len(batch_data_samples)
sampling_results = []
for i in range(num_imgs):
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(rpn_results, batch_gt_instances[i], batch_gt_instances_ignore[i])
sampling_result = self.bbox_sampler.sample(assign_result, rpn_results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
if self.with_bbox:
bbox_results = self.bbox_loss(x, sampling_results)
losses.update(bbox_results['loss_bbox'])
if self.with_mask:
mask_results = self.mask_loss(x, sampling_results, bbox_results['bbox_feats'], batch_gt_instances)
losses.update(mask_results['loss_mask'])
return losses
def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
bbox_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def bbox_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult]) -> dict:
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_loss_and_target = self.bbox_head.loss_and_target(cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg)
bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])
return bbox_results
def mask_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], bbox_feats: Tensor, batch_gt_instances: InstanceList) -> dict:
if (not self.share_roi_extractor):
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(torch.ones(res.pos_priors.shape[0], device=device, dtype=torch.uint8))
pos_inds.append(torch.zeros(res.neg_priors.shape[0], device=device, dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_loss_and_target = self.mask_head.loss_and_target(mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg)
mask_results.update(loss_mask=mask_loss_and_target['loss_mask'])
return mask_results
def _mask_forward(self, x: Tuple[Tensor], rois: Tensor=None, pos_inds: Optional[Tensor]=None, bbox_feats: Optional[Tensor]=None) -> dict:
assert ((rois is not None) ^ ((pos_inds is not None) and (bbox_feats is not None)))
if (rois is not None):
mask_feats = self.mask_roi_extractor(x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert (bbox_feats is not None)
mask_feats = bbox_feats[pos_inds]
mask_preds = self.mask_head(mask_feats)
mask_results = dict(mask_preds=mask_preds, mask_feats=mask_feats)
return mask_results
def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool=False) -> InstanceList:
proposals = [res.bboxes for res in rpn_results_list]
rois = bbox2roi(proposals)
if (rois.shape[0] == 0):
return empty_instances(batch_img_metas, rois.device, task_type='bbox', box_type=self.bbox_head.predict_box_type, num_classes=self.bbox_head.num_classes, score_per_cls=(rcnn_test_cfg is None))
bbox_results = self._bbox_forward(x, rois)
cls_scores = bbox_results['cls_score']
bbox_preds = bbox_results['bbox_pred']
num_proposals_per_img = tuple((len(p) for p in proposals))
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
if (bbox_preds is not None):
if isinstance(bbox_preds, torch.Tensor):
bbox_preds = bbox_preds.split(num_proposals_per_img, 0)
else:
bbox_preds = self.bbox_head.bbox_pred_split(bbox_preds, num_proposals_per_img)
else:
bbox_preds = ((None,) * len(proposals))
result_list = self.bbox_head.predict_by_feat(rois=rois, cls_scores=cls_scores, bbox_preds=bbox_preds, batch_img_metas=batch_img_metas, rcnn_test_cfg=rcnn_test_cfg, rescale=rescale)
return result_list
def predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: InstanceList, rescale: bool=False) -> InstanceList:
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if (mask_rois.shape[0] == 0):
results_list = empty_instances(batch_img_metas, mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
num_mask_rois_per_img = [len(res) for res in results_list]
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
results_list = self.mask_head.predict_by_feat(mask_preds=mask_preds, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale)
return results_list |
class CanineTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = CanineTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
tokenizer = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
_property
def canine_tokenizer(self):
return CanineTokenizer.from_pretrained('google/canine-s')
def get_tokenizer(self, **kwargs) -> CanineTokenizer:
tokenizer = self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
tokenizer._unicode_vocab_size = 1024
return tokenizer
_torch
def test_prepare_batch_integration(self):
tokenizer = self.canine_tokenizer
src_text = ['Life is like a box of chocolates.', "You never know what you're gonna get."]
expected_src_tokens = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
batch = tokenizer(src_text, padding=True, return_tensors='pt')
self.assertIsInstance(batch, BatchEncoding)
result = list(batch.input_ids.numpy()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 39), batch.input_ids.shape)
self.assertEqual((2, 39), batch.attention_mask.shape)
_torch
def test_encoding_keys(self):
tokenizer = self.canine_tokenizer
src_text = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
batch = tokenizer(src_text, padding=True, return_tensors='pt')
self.assertIn('input_ids', batch)
self.assertIn('attention_mask', batch)
self.assertIn('token_type_ids', batch)
_torch
def test_max_length_integration(self):
tokenizer = self.canine_tokenizer
tgt_text = ["What's the weater?", "It's about 25 degrees."]
with tokenizer.as_target_tokenizer():
targets = tokenizer(tgt_text, max_length=32, padding='max_length', truncation=True, return_tensors='pt')
self.assertEqual(32, targets['input_ids'].shape[1])
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
self.assertListEqual(before_tokens, after_tokens)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
additional_special_tokens = tokenizer.additional_special_tokens
new_additional_special_token = chr(57351)
additional_special_tokens.append(new_additional_special_token)
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
self.assertListEqual(before_tokens, after_tokens)
self.assertIn(new_additional_special_token, after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(input_text, ids) = self.get_clean_sequence(tokenizer)
SPECIAL_TOKEN = 57349
special_token = chr(SPECIAL_TOKEN)
tokenizer.add_special_tokens({'cls_token': special_token})
encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
text = tokenizer.decode((ids + encoded_special_token), clean_up_tokenization_spaces=False)
encoded = tokenizer.encode(text, add_special_tokens=False)
input_encoded = tokenizer.encode(input_text, add_special_tokens=False)
special_token_id = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(encoded, (input_encoded + special_token_id))
decoded = tokenizer.decode(encoded, skip_special_tokens=True)
self.assertTrue((special_token not in decoded))
def test_tokenize_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
SPECIAL_TOKEN_1 = chr(57349)
SPECIAL_TOKEN_2 = chr(57350)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]})
token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1)
token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2)
self.assertEqual(len(token_1), 1)
self.assertEqual(len(token_2), 1)
self.assertEqual(token_1[0], SPECIAL_TOKEN_1)
self.assertEqual(token_2[0], SPECIAL_TOKEN_2)
_tokenizers
def test_added_token_serializable(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
NEW_TOKEN = 57350
new_token = chr(NEW_TOKEN)
new_token = AddedToken(new_token, lstrip=True)
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(tmp_dir_name)
tokenizer.from_pretrained(tmp_dir_name)
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for (tokenizer_class, tokenizer_utils) in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), encoding='utf-8') as json_file:
special_tokens_map = json.load(json_file)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), encoding='utf-8') as json_file:
tokenizer_config = json.load(json_file)
NEW_TOKEN = 57350
new_token_1 = chr(NEW_TOKEN)
special_tokens_map['additional_special_tokens'] = [new_token_1]
tokenizer_config['additional_special_tokens'] = [new_token_1]
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(special_tokens_map, outfile)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(tokenizer_config, outfile)
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir, extra_ids=0)
self.assertIn(new_token_1, tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual([new_token_1], tokenizer_without_change_in_init.convert_ids_to_tokens(tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_1])))
NEW_TOKEN = 57351
new_token_2 = chr(NEW_TOKEN)
new_added_tokens = [AddedToken(new_token_2, lstrip=True)]
tokenizer = tokenizer_class.from_pretrained(tmp_dir, additional_special_tokens=new_added_tokens, extra_ids=0)
self.assertIn(new_token_2, tokenizer.additional_special_tokens)
self.assertEqual([new_token_2], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_2])))
_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
input = 'hello world'
if self.space_between_special_tokens:
output = '[CLS] hello world [SEP]'
else:
output = input
encoded = tokenizer.encode(input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_add_tokens_tokenizer(self):
pass
def test_added_tokens_do_lower_case(self):
pass
def test_np_encode_plus_sent_to_model(self):
pass
def test_torch_encode_plus_sent_to_model(self):
pass
def test_pretrained_model_lists(self):
pass
def test_get_vocab(self):
pass
def test_pretokenized_inputs(self):
pass
def test_conversion_reversible(self):
pass |
def main():
g = Github(os.environ['GITHUB_TOKEN'])
repo = g.get_repo('huggingface/diffusers')
open_issues = repo.get_issues(state='open')
for issue in open_issues:
comments = sorted(issue.get_comments(), key=(lambda i: i.created_at), reverse=True)
last_comment = (comments[0] if (len(comments) > 0) else None)
if ((last_comment is not None) and (last_comment.user.login == 'github-actions[bot]') and ((dt.now(timezone.utc) - issue.updated_at).days > 7) and ((dt.now(timezone.utc) - issue.created_at).days >= 30) and (not any(((label.name.lower() in LABELS_TO_EXEMPT) for label in issue.get_labels())))):
issue.edit(state='closed')
elif (('stale' in issue.get_labels()) and (last_comment is not None) and (last_comment.user.login != 'github-actions[bot]')):
issue.edit(state='open')
issue.remove_from_labels('stale')
elif (((dt.now(timezone.utc) - issue.updated_at).days > 23) and ((dt.now(timezone.utc) - issue.created_at).days >= 30) and (not any(((label.name.lower() in LABELS_TO_EXEMPT) for label in issue.get_labels())))):
issue.create_comment('This issue has been automatically marked as stale because it has not had recent activity. If you think this still needs to be addressed please comment on this thread.\n\nPlease note that issues that do not follow the [contributing guidelines]( are likely to be ignored.')
issue.add_to_labels('stale') |
class SmoothedValue():
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, num=1):
self.deque.append(value)
self.count += num
self.total += (value * num)
def synchronize_between_processes(self):
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
def global_avg(self):
return (self.total / self.count)
def max(self):
return max(self.deque)
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) |
_module()
_DATASETS.register_module()
class S3DISSegDataset(_S3DISSegDataset):
def __init__(self, data_root, ann_files, pipeline=None, classes=None, palette=None, modality=None, test_mode=False, ignore_index=None, scene_idxs=None):
ann_files = self._check_ann_files(ann_files)
scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files))
super().__init__(data_root=data_root, ann_file=ann_files[0], pipeline=pipeline, classes=classes, palette=palette, modality=modality, test_mode=test_mode, ignore_index=ignore_index, scene_idxs=scene_idxs[0])
datasets = [_S3DISSegDataset(data_root=data_root, ann_file=ann_files[i], pipeline=pipeline, classes=classes, palette=palette, modality=modality, test_mode=test_mode, ignore_index=ignore_index, scene_idxs=scene_idxs[i]) for i in range(len(ann_files))]
self.concat_data_infos([dst.data_infos for dst in datasets])
self.concat_scene_idxs([dst.scene_idxs for dst in datasets])
if (not self.test_mode):
self._set_group_flag()
def concat_data_infos(self, data_infos):
self.data_infos = [info for one_data_infos in data_infos for info in one_data_infos]
def concat_scene_idxs(self, scene_idxs):
self.scene_idxs = np.array([], dtype=np.int32)
offset = 0
for one_scene_idxs in scene_idxs:
self.scene_idxs = np.concatenate([self.scene_idxs, (one_scene_idxs + offset)]).astype(np.int32)
offset = (np.unique(self.scene_idxs).max() + 1)
def _duplicate_to_list(x, num):
return [x for _ in range(num)]
def _check_ann_files(self, ann_file):
if (not isinstance(ann_file, (list, tuple))):
ann_file = self._duplicate_to_list(ann_file, 1)
return ann_file
def _check_scene_idxs(self, scene_idx, num):
if (scene_idx is None):
return self._duplicate_to_list(scene_idx, num)
if isinstance(scene_idx, str):
return self._duplicate_to_list(scene_idx, num)
if isinstance(scene_idx[0], str):
return scene_idx
if isinstance(scene_idx[0], (list, tuple, np.ndarray)):
return scene_idx
return self._duplicate_to_list(scene_idx, num) |
class CarsDataModule(pl.LightningDataModule):
def __init__(self, args):
super().__init__()
self.data_root = args.data_root
self.batch_size = args.batch_size
self.num_workers = args.num_workers
self.train_dataset = CarsDataset(args.data_root, args.resolution, 'train', use_flip=False, use_rescale=args.use_rescale)
self.val_dataset = CarsDataset(args.data_root, args.resolution, 'val', use_flip=False, use_rescale=args.use_rescale)
self.test_dataset = CarsDataset(args.data_root, args.resolution, 'test', use_flip=False, use_rescale=args.use_rescale)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True) |
def parameters():
params = TrackerParams()
params.debug = 0
params.visualization = False
params.use_gpu = True
params.net = NetWithBackbone(net_path='transt.pth', use_gpu=params.use_gpu)
return params |
def save_csv(f_name, names, *args):
with open(f_name, 'w') as f:
w = csv.writer(f)
valid_idx = [i for i in xrange(len(args)) if (args[i] is not None)]
valid_names = [names[i] for i in valid_idx]
valid_args = [args[i] for i in valid_idx]
w.writerow(valid_names)
w.writerows(zip(*valid_args)) |
def download_train_test_url_txt():
global FashionVideo_root_dir, FashionVideo_train_url_txt, FashionVideo_test_url_txt, TRAIN_URL, TEST_URL
success = download_from_url_to_file(TRAIN_URL, FashionVideo_train_url_txt)
if ((not success) or (not os.path.exists(FashionVideo_train_url_txt))):
raise_error(f'Download {TRAIN_URL} failed.')
success = download_from_url_to_file(TEST_URL, FashionVideo_test_url_txt)
if ((not success) or (not os.path.exists(FashionVideo_test_url_txt))):
raise_error(f'Download {TEST_URL} failed.') |
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
logger = logging.getLogger(__name__)
logger.info('Remapping conv weights for deformable conv weights')
layer_keys = sorted(state_dict.keys())
for (ix, stage_with_dcn) in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if (not stage_with_dcn):
continue
for old_key in layer_keys:
pattern = '.*layer{}.*conv2.*'.format(ix)
r = re.match(pattern, old_key)
if (r is None):
continue
for param in ['weight', 'bias']:
if (old_key.find(param) is (- 1)):
continue
new_key = old_key.replace('conv2.{}'.format(param), 'conv2.conv.{}'.format(param))
logger.info('pattern: {}, old_key: {}, new_key: {}'.format(pattern, old_key, new_key))
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
return state_dict |
class SegformerDecodeHead(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def main():
opt = TestOptions().parse()
opt.no_flip = True
opt.batchSize = 1
data_loader = CreateDataLoader(opt)
model = SingleGAN()
model.initialize(opt)
web_dir = os.path.join(opt.results_dir, 'test')
webpage = html.HTML(web_dir, 'task {}'.format(opt.name))
for (i, data) in enumerate(islice(data_loader, opt.how_many)):
print(('process input image %3.3d/%3.3d' % (i, opt.how_many)))
(all_images, all_names) = model.translation(data)
img_path = ('image%3.3i' % i)
save_images(webpage, all_images, all_names, img_path, None, width=opt.fineSize)
webpage.save() |
def get_final_epoch(config):
cfg = mmcv.Config.fromfile(('./configs/' + config))
return cfg.total_epochs |
class Validator(object):
def __init__(self, state_vars, action_vars, plots=[], rows=1, cols=1):
self.threads = []
self.best_thread = []
self.best_thread = 0
self.next_thread = 0
self.state_vars = state_vars
self.action_vars = action_vars
self.plots = plots
self.rows = rows
self.cols = cols
def __call__(self, policies, node):
path = policies.extract(node)
self.process(node)
self.best_thread = self.process_path(path)
'\n This actually does the work. It descends through a set of MCTS nodes and\n collects data from each of them, adding each one to the appropriate "thread"\n tracking that simulated trajectory.\n\n If thread is none, there\'s no parent. Otherwise we can copy data from the\n parent.\n '
def process(self, node, paths=[]):
pass
def process_path(self, path):
import matplotlib.pyplot as plt
data = []
for node in path:
for (s, a) in node.traj:
sample = {}
current_state_vars = vars(s)
current_action_vars = vars(a)
for var in self.state_vars:
sample[('state.%s' % var)] = current_state_vars[var]
for var in self.action_vars:
sample[('action.%s' % var)] = current_action_vars[var]
data.append(sample)
if (len(self.plots) > 0):
plt.figure()
for (i, (x, y)) in enumerate(self.plots):
xdata = [sample[x] for sample in data]
ydata = [sample[y] for sample in data]
plt.subplot(self.rows, self.cols, (i + 1))
plt.plot(xdata, ydata)
plt.xlabel(x)
plt.ylabel(y) |
_module
class FusedSemanticHead(nn.Module):
def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2, conv_cfg=None, norm_cfg=None):
super(FusedSemanticHead, self).__init__()
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(ConvModule(self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (self.in_channels if (i == 0) else conv_out_channels)
self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
def init_weights(self):
kaiming_init(self.conv_logits)
_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[(- 2):])
for (i, feat) in enumerate(feats):
if (i != self.fusion_level):
feat = F.interpolate(feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return (mask_pred, x)
_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg |
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, bias):
super().__init__()
self.conv = conv(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
self.batchnorm = nn.BatchNorm2d((out_channels * factor))
def forward(self, x):
x = self.conv(x)
outputs = self.batchnorm(x)
return outputs |
def test_moduledict_weight_init():
models_cfg = dict(foo_conv_1d=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), foo_conv_2d=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0)))
layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()}
modeldict = ModuleDict(layers)
modeldict.init_weights()
assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0))
assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0))
layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()}
modeldict = ModuleDict(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0))
modeldict.init_weights()
assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0))
assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0)) |
class BNTranspose(nn.Module):
def __init__(self, num_features):
super(BNTranspose, self).__init__()
self.num_features = num_features
self.weight = nn.Parameter(torch.Tensor(num_features))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input):
return (input * self.weight)
def __repr__(self):
return (((self.__class__.__name__ + ' (') + str(self.num_features)) + ')') |
def english_cleaners(text, table=None):
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
if (table is not None):
text = remove_punctuation(text, table)
text = collapse_whitespace(text)
return text |
def get_human_info(split):
data_root = cfg.virt_data_root
data_name = data_root.split('/')[(- 1)]
if (split == 'train'):
human_info = {'CoreView_313': {'begin_i': 0, 'i_intv': 1, 'ni': 60}, 'CoreView_315': {'begin_i': 0, 'i_intv': 6, 'ni': 400}, 'CoreView_377': {'begin_i': 0, 'i_intv': 30, 'ni': 300}, 'CoreView_386': {'begin_i': 0, 'i_intv': 6, 'ni': 300}, 'CoreView_390': {'begin_i': 700, 'i_intv': 6, 'ni': 300}, 'CoreView_392': {'begin_i': 0, 'i_intv': 6, 'ni': 300}, 'CoreView_396': {'begin_i': 810, 'i_intv': 5, 'ni': 270}}
elif (split == 'test'):
if (cfg.test_mode == 'model_o_motion_o'):
human_info = {'CoreView_313': {'begin_i': 0, 'i_intv': 1, 'ni': 60}, 'CoreView_315': {'begin_i': 0, 'i_intv': 1, 'ni': 400}, 'CoreView_377': {'begin_i': 0, 'i_intv': 1, 'ni': 300}, 'CoreView_386': {'begin_i': 0, 'i_intv': 1, 'ni': 300}, 'CoreView_390': {'begin_i': 700, 'i_intv': 1, 'ni': 300}, 'CoreView_392': {'begin_i': 0, 'i_intv': 1, 'ni': 300}, 'CoreView_396': {'begin_i': 810, 'i_intv': 1, 'ni': 270}}
elif (cfg.test_mode == 'model_o_motion_x'):
human_info = {'CoreView_313': {'begin_i': 60, 'i_intv': 1, 'ni': 1000}, 'CoreView_315': {'begin_i': 400, 'i_intv': 1, 'ni': 1000}, 'CoreView_377': {'begin_i': 300, 'i_intv': 1, 'ni': 317}, 'CoreView_386': {'begin_i': 300, 'i_intv': 1, 'ni': 346}, 'CoreView_390': {'begin_i': 0, 'i_intv': 1, 'ni': 700}, 'CoreView_392': {'begin_i': 300, 'i_intv': 1, 'ni': 256}, 'CoreView_396': {'begin_i': 1080, 'i_intv': 1, 'ni': 270}}
elif (cfg.test_mode == 'model_x_motion_x'):
human_info = {'CoreView_387': {'begin_i': 0, 'i_intv': 1, 'ni': 654}, 'CoreView_393': {'begin_i': 0, 'i_intv': 1, 'ni': 658}, 'CoreView_394': {'begin_i': 0, 'i_intv': 1, 'ni': 859}}
return human_info |
def split_speaker(root, num_train):
speaker_list = os.listdir(root)
random.shuffle(speaker_list)
print(len(speaker_list))
with open(log_speaker_path, 'w', encoding='utf-8') as f:
f.write('train:\n')
for i in speaker_list[:num_train]:
f.write((i + ' '))
f.write('\n')
f.write('test:\n')
for i in speaker_list[num_train:]:
f.write((i + ' ')) |
class simam_module(torch.nn.Module):
def __init__(self, channels=None, e_lambda=0.0001):
super(simam_module, self).__init__()
self.activaton = nn.Sigmoid()
self.e_lambda = e_lambda
def __repr__(self):
s = (self.__class__.__name__ + '(')
s += ('lambda=%f)' % self.e_lambda)
return s
def get_module_name():
return 'simam'
def forward(self, x):
(b, c, h, w) = x.size()
n = ((w * h) - 1)
x_minus_mu_square = (x - x.mean(dim=[2, 3], keepdim=True)).pow(2)
y = ((x_minus_mu_square / (4 * ((x_minus_mu_square.sum(dim=[2, 3], keepdim=True) / n) + self.e_lambda))) + 0.5)
return (x * self.activaton(y)) |
def split_combined_args(kwargs):
new_kwargs = dict(kwargs)
for (key, value) in kwargs.items():
if key.startswith('__'):
keys = key.split('__')[1:]
values = value.split(';')
assert (len(keys) == len(values)), f"Combined arguments should have equal number of keys and values. Keys are separated by '__' and Values are separated with ';'. For example, '__n_bins__lr=256;0.001. Given (keys,values) is ({keys}, {values})"
for (k, v) in zip(keys, values):
new_kwargs[k] = v
return new_kwargs |
def test_config_build_detector():
from mmcv import Config
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [p for p in config_fpaths if (p.find('_base_') == (- 1))]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
config_mod.model
print(f'Building detector, config_fpath = {config_fpath}')
if ('pretrained' in config_mod.model):
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model)
assert (detector is not None)
_check_numclasscheckhook(detector, config_mod)
optimizer = build_optimizer(detector, config_mod.optimizer)
assert isinstance(optimizer, torch.optim.Optimizer)
if ('roi_head' in config_mod.model.keys()):
assert (detector.roi_head.with_bbox and detector.with_bbox)
assert (detector.roi_head.with_mask == detector.with_mask)
head_config = config_mod.model['roi_head']
_check_roi_head(head_config, detector.roi_head) |
def save_wav_full(save_dir, data_root, json_root):
json_paths = glob.glob(f'{json_root}/*.json')
os.makedirs(save_dir, exist_ok=True)
for json_path in tqdm.tqdm(json_paths):
song_name = os.path.basename(json_path).replace('.json', '')
with open(json_path, 'r') as json_file:
segments_info = json.load(json_file)
name_wav_dict = {}
song_name_list = []
for (key, item) in segments_info.items():
for (gain_key, gain_item) in item['gain_adjustment'].items():
song_name_list.append(gain_key)
song_name_list = list(set(song_name_list))
print(song_name_list)
for segment_name in song_name_list:
name_wav_dict[segment_name] = librosa.load(f'{data_root}/{song_name}/{song_name}_RAW/{segment_name}.wav', sr=None, mono=True)[0]
for (key, item) in segments_info.items():
for (gain_key, gain_item) in item['gain_adjustment'].items():
name_wav_dict[gain_key][librosa.time_to_samples(item['start_sec'], sr=sr):librosa.time_to_samples(item['end_sec'], sr=sr)] = (name_wav_dict[gain_key][librosa.time_to_samples(item['start_sec'], sr=sr):librosa.time_to_samples(item['end_sec'], sr=sr)] * db2linear(item['gain_adjustment'][gain_key], eps=0.0))
os.makedirs(f'{save_dir}/full/{song_name}', exist_ok=True)
for segment_name in song_name_list:
sf.write(f'{save_dir}/full/{song_name}/{segment_name}.wav', name_wav_dict[segment_name], sr) |
def parse_sample(input_files):
inputs = []
for inp in input_files:
inp = utils.load_image_op(inp)
inp = utils.resize_image_op(inp, image_shape_original, conf.image_shape, interpolation=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
inp = utils.one_hot_encode_image_op(inp, conf.one_hot_palette_input)
inputs.append(inp)
inputs = (inputs[0] if (n_inputs == 1) else tuple(inputs))
return inputs |
def p2():
return [[[[0.5, 0.5], [1.0, 0.0]], [[0.5, 0.5], [0.5, 0.5]]], [[[0.5, 0.5], [0.7, 0.3], [0.1, 0.9]], [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]]] |
def train_autokeras(classes, alldata, labels, mtype, jsonfile, problemtype, default_features):
modelname = (jsonfile[0:(- 5)] + ('_autokeras_%s' % default_features))
TEST_FOLDER = modelname
(x_train, x_test, y_train, y_test) = train_test_split(alldata, labels, train_size=0.75, test_size=0.25)
x_train = x_train.reshape((x_train.shape + (1,)))
y_train = y_train.reshape(((y_train.shape + (1,)) + (1,)))
x_test = x_test.reshape((x_test.shape + (1,)))
y_test = y_test.reshape(((y_test.shape + (1,)) + (1,)))
print(x_train.shape)
print(y_train.shape)
tensor_x = torch.stack([torch.Tensor(i) for i in x_train])
tensor_y = torch.stack([torch.Tensor(i) for i in y_train])
my_dataset = utils.TensorDataset(tensor_x, tensor_y)
training_data = utils.DataLoader(my_dataset)
tensor_x = torch.stack([torch.Tensor(i) for i in x_test])
tensor_y = torch.stack([torch.Tensor(i) for i in y_test])
my_dataset = utils.TensorDataset(tensor_x, tensor_y)
test_data = utils.DataLoader(my_dataset)
print(test_data)
input_shape = x_train[0].shape
n_output_node = 1
if (mtype == 'c'):
mlpModule = MlpModule(loss=classification_loss, metric=Accuracy, searcher_args={}, path=TEST_FOLDER, verbose=True)
elif (mtype == 'r'):
mlpModule = MlpModule(loss=regression_loss, metric=MSE, searcher_args={}, path=TEST_FOLDER, verbose=True)
timelimit = 60
print(('training MLP model for %s hours' % (timelimit / (60 * 60))))
mlpModule.fit(n_output_node, input_shape, training_data, test_data, time_limit=timelimit)
mlpModule.final_fit(training_data, test_data, trainer_args=None, retrain=False)
cur_dir2 = os.getcwd()
try:
os.chdir((problemtype + '_models'))
except:
os.mkdir((problemtype + '_models'))
os.chdir((problemtype + '_models'))
shutil.copytree(((cur_dir2 + '/') + TEST_FOLDER), ((os.getcwd() + '/') + TEST_FOLDER))
shutil.rmtree(((cur_dir2 + '/') + TEST_FOLDER)) |
def test_categorical_new():
rng = np.random.RandomState(2)
precision = 4
shape = (20, 3, 5)
weights = (rng.random((np.prod(shape), 4)) + 1)
ps = (weights / np.sum(weights, axis=(- 1), keepdims=True))
data = np.reshape([rng.choice(4, p=p) for p in ps], shape)
weights = np.reshape(weights, (shape + (4,)))
check_codec(shape, cs.CategoricalNew(weights, precision), data) |
def _change_reference_point(algorithm: SMPSORP):
number_of_reference_points = len(algorithm.reference_points)
number_of_objectives = algorithm.problem.number_of_objectives
while True:
print(f'Enter {number_of_reference_points}-points of dimension {number_of_objectives}: ')
read = [float(x) for x in input().split()]
reference_points = []
for i in range(0, len(read), number_of_objectives):
reference_points.append(read[i:(i + number_of_objectives)])
algorithm.update_reference_point(reference_points) |
class SparseDispatcher(object):
def __init__(self, num_experts, gates):
self._gates = gates
self._num_experts = num_experts
where = tf.to_int32(tf.where((tf.transpose(gates) > 0)))
(self._expert_index, self._batch_index) = tf.unstack(where, num=2, axis=1)
self._part_sizes_tensor = tf.reduce_sum(tf.to_int32((gates > 0)), [0])
self._nonzero_gates = tf.gather(tf.reshape(self._gates, [(- 1)]), ((self._batch_index * num_experts) + self._expert_index))
def Dispatch(self, inp):
inp = tf.gather(inp, self._batch_index)
return tf.split(inp, self._part_sizes_tensor, 0)
def Combine(self, expert_out, multiply_by_gates=True):
stitched = ConvertGradientToTensor(tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0])
return combined
def ExpertToGates(self):
return tf.split(self._nonzero_gates, self._part_sizes_tensor, 0)
def part_sizes(self):
return self._part_sizes_tensor |
class PFNN(NN):
def __init__(self, layer_sizes, activation, kernel_initializer, split_mask=None):
super().__init__()
self.activation = activation_dict[activation]
initializer = initializer_dict[kernel_initializer]
initializer_zero = initializer_dict['zeros']
self.split_mask = torch.as_tensor(split_mask)
if (len(layer_sizes) <= 1):
raise ValueError('must specify input and output sizes')
if (not isinstance(layer_sizes[0], int)):
raise ValueError('input size must be integer')
if (not isinstance(layer_sizes[(- 1)], int)):
raise ValueError('output size must be integer')
n_output = layer_sizes[(- 1)]
def make_linear(n_input, n_output):
linear = torch.nn.Linear(n_input, n_output, dtype=config.real(torch))
initializer(linear.weight)
initializer_zero(linear.bias)
return linear
self.layers = torch.nn.ModuleList()
for i in range(1, (len(layer_sizes) - 1)):
prev_layer_size = layer_sizes[(i - 1)]
curr_layer_size = layer_sizes[i]
if isinstance(curr_layer_size, (list, tuple)):
if (len(curr_layer_size) != n_output):
raise ValueError('number of sub-layers should equal number of network outputs')
if isinstance(prev_layer_size, (list, tuple)):
self.layers.append(torch.nn.ModuleList([make_linear(prev_layer_size[j], curr_layer_size[j]) for j in range(n_output)]))
else:
self.layers.append(torch.nn.ModuleList([make_linear(prev_layer_size, curr_layer_size[j]) for j in range(n_output)]))
else:
if (not isinstance(prev_layer_size, int)):
raise ValueError('cannot rejoin parallel subnetworks after splitting')
self.layers.append(make_linear(prev_layer_size, curr_layer_size))
if isinstance(layer_sizes[(- 2)], (list, tuple)):
self.layers.append(torch.nn.ModuleList([make_linear(layer_sizes[(- 2)][j], 1) for j in range(n_output)]))
else:
self.layers.append(make_linear(layer_sizes[(- 2)], n_output))
def forward(self, inputs):
x = inputs
if (self._input_transform is not None):
x = self._input_transform(x)
for layer in self.layers[:(- 1)]:
if isinstance(layer, torch.nn.ModuleList):
if isinstance(x, list):
x = [self.activation(f(x_)) for (f, x_) in zip(layer, x)]
elif (self.split_mask is not None):
x = [self.activation(f((x * self.split_mask[i]))) for (i, f) in enumerate(layer)]
else:
x = [self.activation(f(x)) for f in layer]
else:
x = self.activation(layer(x))
if isinstance(x, list):
x = torch.cat([f(x_) for (f, x_) in zip(self.layers[(- 1)], x)], dim=1)
else:
x = self.layers[(- 1)](x)
if (self._output_transform is not None):
x = self._output_transform(inputs, x)
return x |
class TranAD_SelfConditioning(nn.Module):
def __init__(self, feats):
super(TranAD_SelfConditioning, self).__init__()
self.name = 'TranAD_SelfConditioning'
self.lr = lr
self.batch = 128
self.n_feats = feats
self.n_window = 10
self.n = (self.n_feats * self.n_window)
self.pos_encoder = PositionalEncoding((2 * feats), 0.1, self.n_window)
encoder_layers = TransformerEncoderLayer(d_model=(2 * feats), nhead=feats, dim_feedforward=16, dropout=0.1)
self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
decoder_layers1 = TransformerDecoderLayer(d_model=(2 * feats), nhead=feats, dim_feedforward=16, dropout=0.1)
self.transformer_decoder1 = TransformerDecoder(decoder_layers1, 1)
decoder_layers2 = TransformerDecoderLayer(d_model=(2 * feats), nhead=feats, dim_feedforward=16, dropout=0.1)
self.transformer_decoder2 = TransformerDecoder(decoder_layers2, 1)
self.fcn = nn.Sequential(nn.Linear((2 * feats), feats), nn.Sigmoid())
def encode(self, src, c, tgt):
src = torch.cat((src, c), dim=2)
src = (src * math.sqrt(self.n_feats))
src = self.pos_encoder(src)
memory = self.transformer_encoder(src)
tgt = tgt.repeat(1, 1, 2)
return (tgt, memory)
def forward(self, src, tgt):
c = torch.zeros_like(src)
x1 = self.fcn(self.transformer_decoder1(*self.encode(src, c, tgt)))
x2 = self.fcn(self.transformer_decoder2(*self.encode(src, c, tgt)))
return (x1, x2) |
class Cider():
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
self._n = n
self._sigma = sigma
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
for id in imgIds:
hypo = res[id]
ref = gts[id]
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
cider_scorer += (hypo[0], ref)
(score, scores) = cider_scorer.compute_score()
return (score, scores)
def method(self):
return 'CIDEr' |
def get_stem_fun(stem_type):
stem_funs = {'res_stem_cifar': ResStemCifar, 'res_stem_in': ResStemIN, 'simple_stem_in': SimpleStemIN}
assert (stem_type in stem_funs.keys()), "Stem type '{}' not supported".format(stem_type)
return stem_funs[stem_type] |
def tf_efficientnet_l2_ns(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model |
class PSPDec(nn.Module):
def __init__(self, in_features, out_features, downsize, upsize=(64, 128)):
super(PSPDec, self).__init__()
self.features = nn.Sequential(nn.AvgPool2d(downsize, stride=downsize), nn.Conv2d(in_features, out_features, 1, bias=False), nn.BatchNorm2d(out_features, momentum=0.95), nn.ReLU(inplace=True), nn.Upsample(size=upsize, mode='bilinear'))
def forward(self, x):
return self.features(x) |
class CBBNorm2d(_CBBNorm):
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim())) |
def main():
ui_scores = {1: {11: 3, 12: 4, 13: 5, 14: 6, 15: 7}}
gt = {1: [11, 15]}
evaluate_all(ui_scores, gt, 5) |
def test_sgd_classification_small_example():
(w0, w, V, y, X) = get_test_problem(task='classification')
X_test = X.copy()
X_train = sp.csc_matrix(X)
fm = sgd.FMClassification(n_iter=1000, init_stdev=0.1, l2_reg_w=0, l2_reg_V=0, rank=2, step_size=0.1)
fm.fit(X_train, y)
y_pred = fm.predict(X_test)
print(y_pred)
assert (metrics.accuracy_score(y, y_pred) > 0.95) |
class PytorchONNXRuntimeINCMetic(ONNXRuntimeINCMetic):
def stack(self, preds, labels):
(preds, labels) = super().stack(preds, labels)
preds = torch.from_numpy(preds)
labels = torch.from_numpy(labels)
return (preds, labels)
def to_scalar(self, tensor):
return tensor.item() |
class Transweather_base(nn.Module):
def __init__(self, path=None, **kwargs):
super(Transweather_base, self).__init__()
self.Tenc = Tenc()
self.convproj = convprojection_base()
self.clean = ConvLayer(8, 3, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
if (path is not None):
self.load(path)
def forward(self, x):
x1 = self.Tenc(x)
x = self.convproj(x1)
clean = self.active(self.clean(x))
return clean
def load(self, path):
checkpoint = torch.load(path, map_location=(lambda storage, loc: storage))
model_state_dict_keys = self.state_dict().keys()
checkpoint_state_dict_noprefix = strip_prefix_if_present(checkpoint['state_dict'], 'module.')
self.load_state_dict(checkpoint_state_dict_noprefix, strict=False)
del checkpoint
torch.cuda.empty_cache() |
def ProcessRoundDescriptor(segment, parent_node_name, affix, edge_attributes=None):
dot_graph = []
label = 'Round ({0})'.format(segment['arguments'][1])
style = None
if (edge_attributes is not None):
if ('label' in edge_attributes):
label = '{0} {1}'.format(edge_attributes['label'], label)
if ('style' in edge_attributes):
style = 'style={0}'.format(edge_attributes['style'])
attr_string = 'label="{0}"'.format(label)
if (style is not None):
attr_string += ' {0}'.format(style)
dot_graph.append('{0}->{1} [ {2} ]'.format(GetDotNodeName(segment['arguments'][0])['node'], GetDotNodeName(parent_node_name)['node'], attr_string))
if segment['sub_segments']:
raise Exception('Round can just deal with forwarding descriptor, no sub-segments allowed')
return dot_graph |
def build_dataset(args, rank=0, is_test=False):
tok = get_tokenizer(args)
feat_db_train = ImageFeaturesDB(args.img_ft_file, args.image_feat_size, args.img_aug_ft_file)
feat_db_val = ImageFeaturesDB(args.img_ft_file, args.image_feat_size)
if (args.dataset == 'r2r_back'):
dataset_class = R2RBackBatch
else:
dataset_class = R2RBatch
train_instr_data = construct_instrs(args.anno_dir, args.dataset, ['train'], tokenizer=tok, max_instr_len=args.max_instr_len)
train_env = dataset_class(feat_db_train, train_instr_data, args.connectivity_dir, batch_size=args.batch_size, angle_feat_size=args.angle_feat_size, seed=(args.seed + rank), sel_data_idxs=None, name='train')
if (args.aug is not None):
aug_instr_data = construct_instrs(args.anno_dir, args.dataset, [args.aug], tokenizer=tok, max_instr_len=args.max_instr_len)
aug_env = dataset_class(feat_db_train, aug_instr_data, args.connectivity_dir, batch_size=args.batch_size, angle_feat_size=args.angle_feat_size, seed=(args.seed + rank), sel_data_idxs=None, name='aug')
else:
aug_env = None
val_env_names = ['val_train_seen', 'val_seen']
if (args.test or (args.dataset != 'r4r')):
val_env_names.append('val_unseen')
else:
val_env_names.append('val_unseen_sampled')
if args.submit:
if (args.dataset == 'r2r'):
val_env_names.append('test')
elif (args.dataset == 'rxr'):
val_env_names.extend(['test_challenge_public', 'test_standard_public'])
val_envs = {}
for split in val_env_names:
val_instr_data = construct_instrs(args.anno_dir, args.dataset, [split], tokenizer=tok, max_instr_len=args.max_instr_len)
val_env = dataset_class(feat_db_val, val_instr_data, args.connectivity_dir, batch_size=args.batch_size, angle_feat_size=args.angle_feat_size, seed=(args.seed + rank), sel_data_idxs=(None if (args.world_size < 2) else (rank, args.world_size)), name=split)
val_envs[split] = val_env
return (train_env, val_envs, aug_env) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-file_type', default='text', choices=['text', 'field'], required=True, help="Options for vocabulary creation.\n The default is 'text' where the user passes\n a corpus or a list of corpora files for which\n they want to create a vocabulary from.\n If choosing the option 'field', we assume\n the file passed is a torch file created during\n the preprocessing stage of an already\n preprocessed corpus. The vocabulary file created\n will just be the vocabulary inside the field\n corresponding to the argument 'side'.")
parser.add_argument('-file', type=str, nargs='+', required=True)
parser.add_argument('-out_file', type=str, required=True)
parser.add_argument('-side', type=str)
opt = parser.parse_args()
vocabulary = {}
if (opt.file_type == 'text'):
print('Reading input file...')
for batch in read_files_batch(opt.file):
for sentence in batch:
for w in sentence:
if (w in vocabulary):
vocabulary[w] += 1
else:
vocabulary[w] = 1
print('Writing vocabulary file...')
with open(opt.out_file, 'w') as f:
for (w, count) in sorted(vocabulary.items(), key=(lambda x: x[1]), reverse=True):
f.write('{0}\n'.format(w))
else:
import torch
from onmt.inputters.inputter import _old_style_vocab
print('Reading input file...')
if (not (len(opt.file) == 1)):
raise ValueError("If using -file_type='field', only pass one argument for -file.")
vocabs = torch.load(opt.file[0])
voc = dict(vocabs)[opt.side]
if _old_style_vocab(voc):
word_list = voc.itos
else:
try:
word_list = voc[0][1].base_field.vocab.itos
except AttributeError:
word_list = voc[0][1].vocab.itos
print('Writing vocabulary file...')
with open(opt.out_file, 'wb') as f:
for w in word_list:
f.write(u'{0}\n'.format(w).encode('utf-8')) |
class BatchSampler(Sampler):
def __init__(self, dataset: OxfordDataset, batch_size: int, batch_size_limit: int=None, batch_expansion_rate: float=None):
if (batch_expansion_rate is not None):
assert (batch_expansion_rate > 1.0), 'batch_expansion_rate must be greater than 1'
assert (batch_size <= batch_size_limit), 'batch_size_limit must be greater or equal to batch_size'
self.batch_size = batch_size
self.batch_size_limit = batch_size_limit
self.batch_expansion_rate = batch_expansion_rate
self.dataset = dataset
self.k = 2
if (self.batch_size < (2 * self.k)):
self.batch_size = (2 * self.k)
print('WARNING: Batch too small. Batch size increased to {}.'.format(self.batch_size))
self.batch_idx = []
self.elems_ndx = {}
for ndx in self.dataset.queries:
self.elems_ndx[ndx] = True
def __iter__(self):
self.generate_batches()
for batch in self.batch_idx:
(yield batch)
def __len(self):
return len(self.batch_idx)
def expand_batch(self):
if (self.batch_expansion_rate is None):
print('WARNING: batch_expansion_rate is None')
return
if (self.batch_size >= self.batch_size_limit):
return
old_batch_size = self.batch_size
self.batch_size = int((self.batch_size * self.batch_expansion_rate))
self.batch_size = min(self.batch_size, self.batch_size_limit)
print('=> Batch size increased from: {} to {}'.format(old_batch_size, self.batch_size))
def generate_batches(self):
self.batch_idx = []
unused_elements_ndx = copy.deepcopy(self.elems_ndx)
current_batch = []
assert (self.k == 2), 'sampler can sample only k=2 elements from the same class'
while True:
if ((len(current_batch) >= self.batch_size) or (len(unused_elements_ndx) == 0)):
if (len(current_batch) >= (2 * self.k)):
assert ((len(current_batch) % self.k) == 0), 'Incorrect bach size: {}'.format(len(current_batch))
self.batch_idx.append(current_batch)
current_batch = []
if (len(unused_elements_ndx) == 0):
break
selected_element = random.choice(list(unused_elements_ndx))
unused_elements_ndx.pop(selected_element)
positives = self.dataset.get_positives_ndx(selected_element)
if (len(positives) == 0):
continue
unused_positives = [e for e in positives if (e in unused_elements_ndx)]
if (len(unused_positives) > 0):
second_positive = random.choice(unused_positives)
unused_elements_ndx.pop(second_positive)
else:
second_positive = random.choice(positives)
current_batch += [selected_element, second_positive]
for batch in self.batch_idx:
assert ((len(batch) % self.k) == 0), 'Incorrect bach size: {}'.format(len(batch)) |
()
('--outdir', help='Where to save the results', metavar='DIR', required=True)
('--cfg', help='Base configuration', type=click.Choice(['fastgan', 'fastgan_lite', 'stylegan2']), required=True)
('--data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True)
('--gpus', help='Number of GPUs to use', metavar='INT', type=click.IntRange(min=1), required=True)
('--batch', help='Total batch size', metavar='INT', type=click.IntRange(min=1), required=True)
('--target', help='Discriminator target', metavar='FLOAT', type=float, default=0.6, required=True)
('--d_pos', help='Diffusion adding position', metavar='STR', type=str, default='first')
('--noise_sd', help='Diffusion noise standard deviation', metavar='FLOAT', type=float, default=0.5)
('--ada_kimg', help='# kimgs needed to push diffusion to maximum level', type=int, default=100)
('--cond', help='Train conditional model', metavar='BOOL', type=bool, default=False, show_default=True)
('--mirror', help='Enable dataset x-flips', metavar='BOOL', type=bool, default=True, show_default=True)
('--resume', help='Resume from given network pickle', metavar='[PATH|URL]', type=str)
('--batch-gpu', help='Limit batch size per GPU', metavar='INT', type=click.IntRange(min=1))
('--cbase', help='Capacity multiplier', metavar='INT', type=click.IntRange(min=1), default=32768, show_default=True)
('--cmax', help='Max. feature maps', metavar='INT', type=click.IntRange(min=1), default=512, show_default=True)
('--glr', help='G learning rate [default: varies]', metavar='FLOAT', type=click.FloatRange(min=0), default=0.0002, show_default=True)
('--dlr', help='D learning rate', metavar='FLOAT', type=click.FloatRange(min=0), default=0.0002, show_default=True)
('--map-depth', help='Mapping network depth [default: varies]', metavar='INT', type=click.IntRange(min=1))
('--desc', help='String to include in result dir name', metavar='STR', type=str)
('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True)
('--kimg', help='Total training duration', metavar='KIMG', type=click.IntRange(min=1), default=25000, show_default=True)
('--tick', help='How often to print progress', metavar='KIMG', type=click.IntRange(min=1), default=4, show_default=True)
('--snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=50, show_default=True)
('--seed', help='Random seed', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
('--fp32', help='Disable mixed-precision', metavar='BOOL', type=bool, default=False, show_default=True)
('--nobench', help='Disable cuDNN benchmarking', metavar='BOOL', type=bool, default=False, show_default=True)
('--workers', help='DataLoader worker processes', metavar='INT', type=click.IntRange(min=1), default=3, show_default=True)
('-n', '--dry-run', help='Print training options and exit', is_flag=True)
('--restart_every', help='Time interval in seconds to restart code', metavar='INT', type=int, default=9999999, show_default=True)
def main(**kwargs):
opts = dnnlib.EasyDict(kwargs)
c = dnnlib.EasyDict()
c.G_kwargs = dnnlib.EasyDict(class_name=None, z_dim=64, w_dim=128, mapping_kwargs=dnnlib.EasyDict())
c.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0, 0.99], eps=1e-08)
c.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0, 0.99], eps=1e-08)
c.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, prefetch_factor=2)
c.target = opts.target
c.ada_kimg = opts.ada_kimg
(c.training_set_kwargs, dataset_name) = init_dataset_kwargs(data=opts.data)
if (opts.cond and (not c.training_set_kwargs.use_labels)):
raise click.ClickException('--cond=True requires labels specified in dataset.json')
c.training_set_kwargs.use_labels = opts.cond
c.training_set_kwargs.xflip = opts.mirror
c.num_gpus = opts.gpus
c.batch_size = opts.batch
c.batch_gpu = (opts.batch_gpu or (opts.batch // opts.gpus))
c.G_kwargs.channel_base = opts.cbase
c.G_kwargs.channel_max = opts.cmax
c.G_kwargs.mapping_kwargs.num_layers = 2
c.G_opt_kwargs.lr = ((0.002 if (opts.cfg == 'stylegan2') else 0.0025) if (opts.glr is None) else opts.glr)
c.D_opt_kwargs.lr = opts.dlr
c.metrics = opts.metrics
c.total_kimg = opts.kimg
c.kimg_per_tick = opts.tick
c.image_snapshot_ticks = c.network_snapshot_ticks = opts.snap
c.random_seed = c.training_set_kwargs.random_seed = opts.seed
c.data_loader_kwargs.num_workers = opts.workers
if ((c.batch_size % c.num_gpus) != 0):
raise click.ClickException('--batch must be a multiple of --gpus')
if ((c.batch_size % (c.num_gpus * c.batch_gpu)) != 0):
raise click.ClickException('--batch must be a multiple of --gpus times --batch-gpu')
if any(((not metric_main.is_valid_metric(metric)) for metric in c.metrics)):
raise click.ClickException('\n'.join((['--metrics can only contain the following values:'] + metric_main.list_valid_metrics())))
c.ema_kimg = ((c.batch_size * 10) / 32)
if (opts.cfg == 'stylegan2'):
c.G_opt_kwargs.lr = c.D_opt_kwargs.lr = (0.002 if (c.training_set_kwargs.resolution >= 1024) else 0.0025)
c.G_kwargs.class_name = 'pg_modules.networks_stylegan2.Generator'
c.G_kwargs.fused_modconv_default = 'inference_only'
use_separable_discs = True
elif (opts.cfg in ['fastgan', 'fastgan_lite']):
c.G_kwargs = dnnlib.EasyDict(class_name='pg_modules.networks_fastgan.Generator', cond=opts.cond, synthesis_kwargs=dnnlib.EasyDict())
c.G_kwargs.synthesis_kwargs.lite = (opts.cfg == 'fastgan_lite')
c.G_opt_kwargs.lr = c.D_opt_kwargs.lr = 0.0002
use_separable_discs = False
if (opts.resume is not None):
c.resume_pkl = opts.resume
c.ema_rampup = None
c.restart_every = opts.restart_every
if opts.fp32:
c.G_kwargs.num_fp16_res = 0
c.G_kwargs.conv_clamp = None
if opts.nobench:
c.cudnn_benchmark = False
desc = f'{opts.cfg:s}-{dataset_name:s}-gpus{c.num_gpus:d}-batch{c.batch_size:d}-d_pos-{opts.d_pos}-noise_sd-{opts.noise_sd}'
if opts.d_pos:
desc += f'-target{opts.target}'
if opts.ada_kimg:
desc += f'-ada_kimg{opts.ada_kimg}'
if (opts.desc is not None):
desc += f'-{opts.desc}'
c.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.ProjectedGANLoss')
c.D_kwargs = dnnlib.EasyDict(class_name='pg_modules.discriminator.ProjectedDiscriminator', diffaug=True, interp224=(c.training_set_kwargs.resolution < 224), backbone_kwargs=dnnlib.EasyDict())
c.D_kwargs.backbone_kwargs.d_pos = opts.d_pos
c.D_kwargs.backbone_kwargs.noise_sd = opts.noise_sd
c.D_kwargs.backbone_kwargs.cout = 64
c.D_kwargs.backbone_kwargs.expand = True
c.D_kwargs.backbone_kwargs.proj_type = 2
c.D_kwargs.backbone_kwargs.num_discs = 4
c.D_kwargs.backbone_kwargs.separable = use_separable_discs
c.D_kwargs.backbone_kwargs.cond = opts.cond
launch_training(c=c, desc=desc, outdir=opts.outdir, dry_run=opts.dry_run)
last_snapshot = misc.get_ckpt_path(c.run_dir)
if os.path.isfile(last_snapshot):
with dnnlib.util.open_url(last_snapshot) as f:
cur_nimg = legacy.load_network_pkl(f)['progress']['cur_nimg'].item()
if ((cur_nimg // 1000) < c.total_kimg):
print('Restart: exit with code 3')
exit(3) |
def get_num_classes(dataset: str):
if (dataset == 'imagenet'):
return 1000
elif (dataset == 'cifar10'):
return 10
elif (dataset == 'mnist'):
return 10 |
class MMIFrameScorer(PartialScorerInterface):
def __init__(self, lang, device, idim, sos_id, rank, use_segment, char_list, weight_path):
self.lang = lang
self.device = device
self.lexicon = Lexicon(lang)
self.oov = self.oovid = open((self.lang / 'oov.txt')).read().strip()
self.graph_compiler = MmiTrainingGraphCompiler(self.lexicon, self.device, self.oov)
self.phone_ids = self.lexicon.phone_symbols()
self.lo = torch.nn.Linear(idim, (len(self.phone_ids) + 1))
self.lm_scores = None
for i in range(10):
try:
self.load_weight(rank, weight_path)
except:
print(f'{i}-th trail to load MMI matrix weight but fail')
self.P = create_bigram_phone_lm(self.phone_ids)
self.P.set_scores_stochastic_(self.lm_scores)
self.char_list = char_list
self.eos = sos_id
self.blank = 0
self.logzero = (- 10000)
def load_weight(self, rank, path):
ckpt_path = os.path.join(path, f'mmi_param.{rank}.pth')
ckpt_dict = torch.load(ckpt_path)
for v in ckpt_dict.values():
v.requires_grad = False
self.lm_scores = ckpt_dict['lm_scores']
lo_dict = {'weight': ckpt_dict['lo.1.weight'], 'bias': ckpt_dict['lo.1.bias']}
self.lo.load_state_dict(lo_dict)
def init_state(self, x):
texts = ['<UNK>']
(_, den) = self.graph_compiler.compile(texts, self.P, replicate_den=True)
nnet_output = self.lo(x.unsqueeze(0))
supervision = torch.Tensor([[0, 0, nnet_output.size(1)]]).int()
dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision)
den_scores = step_intersect(den, dense_fsa_vec)[0].unsqueeze(0)
prev_score = torch.Tensor([0.0]).to(torch.float32)
return (nnet_output, den_scores, prev_score)
def select_state(self, states, j):
(nnet_output_single, den_scores, prev_scores) = states
return (nnet_output_single, den_scores, prev_scores[j])
def score(**kargs):
raise NotImplementedError
def score_partial(self, y, next_tokens, state, hs_pad):
(nnet_output_single, den_scores, prev_score) = state
batch_size = len(next_tokens)
ys = torch.cat([y.unsqueeze(0).repeat(batch_size, 1), next_tokens.unsqueeze(1)], dim=1)
texts = [' '.join([self.char_list[tid] for tid in text[1:]]) for text in ys]
texts = [text.replace('<eos>', '').strip() for text in texts]
(num, _) = self.graph_compiler.compile(texts, self.P, replicate_den=False)
supervision = torch.stack([torch.arange(batch_size), torch.zeros(batch_size), (torch.ones(batch_size) * nnet_output_single.size(1))], dim=1).to(torch.int32)
dense_fsa_vec = k2.DenseFsaVec(nnet_output_single.repeat(batch_size, 1, 1), supervision)
num_scores = torch.stack(step_intersect(num, dense_fsa_vec), dim=0)
tot_scores_frame = (num_scores - den_scores)
tot_scores = torch.logsumexp(tot_scores_frame, dim=(- 1))
eos_pos = torch.where((next_tokens == self.eos))[0]
if (len(eos_pos) > 0):
tot_scores[eos_pos] = tot_scores_frame[(eos_pos.item(), (- 1))]
blk_pos = torch.where((next_tokens == self.blank))[0]
if (len(blk_pos) > 0):
tot_scores[blk_pos] = self.logzero
tok_scores = (tot_scores - prev_score)
state = (nnet_output_single, den_scores, tot_scores)
return (tok_scores, state)
def final_score(self, state):
return 0 |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, lr_scheduler: torch.optim.lr_scheduler, max_norm: float=0, k_one2many: int=1, lambda_one2many: float=1.0, use_wandb: bool=False, use_fp16: bool=False, scaler: torch.cuda.amp.GradScaler=None):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
prefetcher = data_prefetcher(data_loader, device, prefetch=True)
(samples, targets) = prefetcher.next()
for idx in metric_logger.log_every(range(len(data_loader)), print_freq, header):
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=use_fp16):
outputs = model(samples)
if (k_one2many > 0):
loss_dict = train_hybrid(outputs, targets, k_one2many, criterion, lambda_one2many)
else:
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict)))
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
if use_fp16:
scaler.scale(losses).backward()
scaler.unscale_(optimizer)
if (max_norm > 0):
grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
else:
grad_total_norm = utils.get_total_grad_norm(model.parameters(), norm_type=2)
scaler.step(optimizer)
scaler.update()
else:
losses.backward()
if (max_norm > 0):
grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
else:
grad_total_norm = utils.get_total_grad_norm(model.parameters(), norm_type=2)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.update(grad_norm=grad_total_norm)
(samples, targets) = prefetcher.next()
lr_scheduler.step()
if (use_wandb and ((idx % print_freq) == 0) and (dist.get_rank() == 0)):
log_data = dict(loss=loss_value, lr=optimizer.param_groups[0]['lr'], grad_norm=grad_total_norm, **loss_dict_reduced_scaled)
log_data = {('train/' + k): v for (k, v) in log_data.items()}
wandb.log(data=log_data, step=((epoch * len(data_loader)) + idx))
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
.parametrize('func,args', [(foo, [1]), (bariza, [1, 2, 3, 4]), (complex_function_name, [1, 2, 3, 4]), (old_name, [1, 2]), (renamed, [1, 2])])
def test_construct_arguments_with_unexpected_args_raises_typeerror(func, args):
unexpected = re.compile('.*unexpected.*')
with pytest.raises(TypeError) as excinfo:
Signature(func).construct_arguments(args, {}, {})
assert unexpected.match(excinfo.value.args[0]) |
class ImagesFromDataList(data.Dataset):
def __init__(self, images, transform=None):
if (len(images) == 0):
raise RuntimeError('Dataset contains 0 images!')
self.images = images
self.transform = transform
def __getitem__(self, index):
img = self.images[index]
if (self.transform is not None):
img = self.transform(img)
if len(img.size()):
img = img.unsqueeze(0)
return img
def __len__(self):
return len(self.images)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of images: {}\n'.format(self.__len__())
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
class CloudpickleWrapper():
def __init__(self, fn: Callable):
self.fn = fn
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.fn)
def __setstate__(self, ob):
import pickle
self.fn = pickle.loads(ob)
def __call__(self):
return self.fn() |
def get_incomplete_data(voxels, p=0.3):
num_points = int(np.prod(voxels.shape))
num_ones = int((num_points * p))
mask = np.append(np.ones(num_ones), np.zeros((num_points - num_ones)))
np.random.shuffle(mask)
mask = mask.reshape(*voxels.shape)
voxel_mean = (np.sum((voxels * mask)) / np.sum(mask))
return (((voxels * mask) + ((np.ones(shape=voxels.shape) * voxel_mean) * (1 - mask))), mask) |
def data_transforms(dataset_type='train', normlize_type='-1-1'):
transforms = {'train': Compose([ReSize(size=0.97), Reshape(), Normalize(normlize_type), Retype()]), 'val': Compose([ReSize(size=0.97), Reshape(), Normalize(normlize_type), Retype()])}
return transforms[dataset_type] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.