code stringlengths 101 5.91M |
|---|
_HEADS_REGISTRY.register()
class PointRendROIHeads(StandardROIHeads):
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
self._init_mask_head(cfg, input_shape)
def _init_mask_head(self, cfg, input_shape):
self.mask_on = cfg.MODEL.MASK_ON
if (not self.mask_on):
return
self.mask_coarse_in_features = cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES
self.mask_coarse_side_size = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
self._feature_scales = {k: (1.0 / v.stride) for (k, v) in input_shape.items()}
in_channels = np.sum([input_shape[f].channels for f in self.mask_coarse_in_features])
self.mask_coarse_head = build_mask_head(cfg, ShapeSpec(channels=in_channels, width=self.mask_coarse_side_size, height=self.mask_coarse_side_size))
self._init_point_head(cfg, input_shape)
def _init_point_head(self, cfg, input_shape):
self.mask_point_on = cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON
if (not self.mask_point_on):
return
assert (cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES)
self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
self.mask_point_oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO
self.mask_point_importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO
self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
in_channels = np.sum([input_shape[f].channels for f in self.mask_point_in_features])
self.mask_point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
def _forward_mask(self, features, instances):
if (not self.mask_on):
return ({} if self.training else instances)
if self.training:
(proposals, _) = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
mask_coarse_logits = self._forward_mask_coarse(features, proposal_boxes)
losses = {'loss_mask': mask_rcnn_loss(mask_coarse_logits, proposals)}
losses.update(self._forward_mask_point(features, mask_coarse_logits, proposals))
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
mask_coarse_logits = self._forward_mask_coarse(features, pred_boxes)
mask_logits = self._forward_mask_point(features, mask_coarse_logits, instances)
mask_rcnn_inference(mask_logits, instances)
return instances
def _forward_mask_coarse(self, features, boxes):
point_coords = generate_regular_grid_point_coords(np.sum((len(x) for x in boxes)), self.mask_coarse_side_size, boxes[0].device)
mask_coarse_features_list = [features[k] for k in self.mask_coarse_in_features]
features_scales = [self._feature_scales[k] for k in self.mask_coarse_in_features]
(mask_features, _) = point_sample_fine_grained_features(mask_coarse_features_list, features_scales, boxes, point_coords)
return self.mask_coarse_head(mask_features)
def _forward_mask_point(self, features, mask_coarse_logits, instances):
if (not self.mask_point_on):
return ({} if self.training else mask_coarse_logits)
mask_features_list = [features[k] for k in self.mask_point_in_features]
features_scales = [self._feature_scales[k] for k in self.mask_point_in_features]
if self.training:
proposal_boxes = [x.proposal_boxes for x in instances]
gt_classes = cat([x.gt_classes for x in instances])
with torch.no_grad():
point_coords = get_uncertain_point_coords_with_randomness(mask_coarse_logits, (lambda logits: calculate_uncertainty(logits, gt_classes)), self.mask_point_train_num_points, self.mask_point_oversample_ratio, self.mask_point_importance_sample_ratio)
(fine_grained_features, point_coords_wrt_image) = point_sample_fine_grained_features(mask_features_list, features_scales, proposal_boxes, point_coords)
coarse_features = point_sample(mask_coarse_logits, point_coords, align_corners=False)
point_logits = self.mask_point_head(fine_grained_features, coarse_features)
return {'loss_mask_point': roi_mask_point_loss(point_logits, instances, point_coords_wrt_image)}
else:
pred_boxes = [x.pred_boxes for x in instances]
pred_classes = cat([x.pred_classes for x in instances])
if (len(pred_classes) == 0):
return mask_coarse_logits
mask_logits = mask_coarse_logits.clone()
for subdivions_step in range(self.mask_point_subdivision_steps):
mask_logits = interpolate(mask_logits, scale_factor=2, mode='bilinear', align_corners=False)
(H, W) = mask_logits.shape[(- 2):]
if ((self.mask_point_subdivision_num_points >= ((4 * H) * W)) and (subdivions_step < (self.mask_point_subdivision_steps - 1))):
continue
uncertainty_map = calculate_uncertainty(mask_logits, pred_classes)
(point_indices, point_coords) = get_uncertain_point_coords_on_grid(uncertainty_map, self.mask_point_subdivision_num_points)
(fine_grained_features, _) = point_sample_fine_grained_features(mask_features_list, features_scales, pred_boxes, point_coords)
coarse_features = point_sample(mask_coarse_logits, point_coords, align_corners=False)
point_logits = self.mask_point_head(fine_grained_features, coarse_features)
(R, C, H, W) = mask_logits.shape
point_indices = point_indices.unsqueeze(1).expand((- 1), C, (- 1))
mask_logits = mask_logits.reshape(R, C, (H * W)).scatter_(2, point_indices, point_logits).view(R, C, H, W)
return mask_logits |
def get_random_entry_subset(entry_tuples, manualseed, subset_numimgs):
logger.debug('using manual random seed: {}'.format(manualseed))
random.seed(manualseed)
entry_random_subset = random.sample(entry_tuples, subset_numimgs)
logger.debug('subselected {} random images from total of {}:'.format(subset_numimgs, len(entry_random_subset), entry_random_subset))
return entry_random_subset |
def display_as_slider(*img_viewers):
def load_img(index=0):
for img_viewer in img_viewers:
display(Image(open(img_viewer.filenames[index], 'rb').read()))
interact(load_img, index=(0, (len(img_viewers[0].filenames) - 1))) |
class SMPL_DATA(data.Dataset):
def __init__(self, train, npoints=6890, shuffle_point=False):
self.train = train
self.shuffle_point = shuffle_point
self.npoints = npoints
self.path = './smpl_data/'
def __getitem__(self, index):
identity_mesh_i = np.random.randint(0, 16)
identity_mesh_p = np.random.randint(200, 600)
pose_mesh_i = np.random.randint(0, 16)
pose_mesh_p = np.random.randint(200, 600)
identity_mesh = pymesh.load_mesh(((((self.path + str(identity_mesh_i)) + '_') + str(identity_mesh_p)) + '.obj'))
pose_mesh = pymesh.load_mesh(((((self.path + str(pose_mesh_i)) + '_') + str(pose_mesh_p)) + '.obj'))
gt_mesh = pymesh.load_mesh(((((self.path + str(identity_mesh_i)) + '_') + str(pose_mesh_p)) + '.obj'))
pose_points = pose_mesh.vertices
identity_points = identity_mesh.vertices
identity_faces = identity_mesh.faces
gt_points = gt_mesh.vertices
pose_points = (pose_points - ((pose_mesh.bbox[0] + pose_mesh.bbox[1]) / 2))
pose_points = torch.from_numpy(pose_points.astype(np.float32))
identity_points = (identity_points - ((identity_mesh.bbox[0] + identity_mesh.bbox[1]) / 2))
identity_points = torch.from_numpy(identity_points.astype(np.float32))
gt_points = (gt_points - ((gt_mesh.bbox[0] + gt_mesh.bbox[1]) / 2))
gt_points = torch.from_numpy(gt_points.astype(np.float32))
random_sample = np.random.choice(self.npoints, size=self.npoints, replace=False)
random_sample2 = np.random.choice(self.npoints, size=self.npoints, replace=False)
new_face = identity_faces
if self.shuffle_point:
pose_points = pose_points[random_sample2]
identity_points = identity_points[random_sample]
gt_points = gt_points[random_sample]
face_dict = {}
for i in range(len(random_sample)):
face_dict[random_sample[i]] = i
new_f = []
for i in range(len(identity_faces)):
new_f.append([face_dict[identity_faces[i][0]], face_dict[identity_faces[i][1]], face_dict[identity_faces[i][2]]])
new_face = np.array(new_f)
return (pose_points, random_sample, gt_points, identity_points, new_face)
def __len__(self):
return 4000 |
class FasterRCNNNASFeatureExtractor(faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0):
if (first_stage_features_stride != 16):
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNNASFeatureExtractor, self).__init__(is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
def _extract_proposal_features(self, preprocessed_inputs, scope):
del scope
if (len(preprocessed_inputs.get_shape().as_list()) != 4):
raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape()))
with slim.arg_scope(nasnet_large_arg_scope_for_detection(is_batch_norm_training=self._train_batch_norm)):
(_, end_points) = nasnet.build_nasnet_large(preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_11')
rpn_feature_map = tf.concat([end_points['Cell_10'], end_points['Cell_11']], 3)
batch = preprocessed_inputs.get_shape().as_list()[0]
shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
rpn_feature_map_shape = ([batch] + shape_without_batch)
rpn_feature_map.set_shape(rpn_feature_map_shape)
return rpn_feature_map
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
del scope
(hidden_previous, hidden) = tf.split(proposal_feature_maps, 2, axis=3)
hparams = nasnet._large_imagenet_config(is_training=self._is_training)
total_num_cells = (hparams.num_cells + 2)
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training):
with arg_scope([slim.batch_norm], is_training=self._train_batch_norm):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format):
start_cell_num = 12
true_cell_num = 15
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
net = _build_nasnet_base(hidden_previous, hidden, normal_cell=normal_cell, reduction_cell=reduction_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num)
proposal_classifier_features = net
return proposal_classifier_features
def restore_from_classification_checkpoint_fn(self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope):
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(first_stage_feature_extractor_scope):
var_name = variable.op.name.replace((first_stage_feature_extractor_scope + '/'), '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
if variable.op.name.startswith(second_stage_feature_extractor_scope):
var_name = variable.op.name.replace((second_stage_feature_extractor_scope + '/'), '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
return variables_to_restore |
class BinaryAccuracy(ZooKerasCreator, JavaValue):
def __init__(self, bigdl_type='float'):
super(BinaryAccuracy, self).__init__(None, bigdl_type) |
_module()
class MultiRotateAugOCR():
def __init__(self, transforms, rotate_degrees=None, force_rotate=False):
self.transforms = Compose(transforms)
self.force_rotate = force_rotate
if (rotate_degrees is not None):
self.rotate_degrees = (rotate_degrees if isinstance(rotate_degrees, list) else [rotate_degrees])
assert mmcv.is_list_of(self.rotate_degrees, int)
for degree in self.rotate_degrees:
assert (0 <= degree < 360)
assert ((degree % 90) == 0)
if (0 not in self.rotate_degrees):
self.rotate_degrees.append(0)
else:
self.rotate_degrees = [0]
def __call__(self, results):
img_shape = results['img_shape']
(ori_height, ori_width) = img_shape[:2]
if ((not self.force_rotate) and (ori_height <= ori_width)):
rotate_degrees = [0]
else:
rotate_degrees = self.rotate_degrees
aug_data = []
for degree in set(rotate_degrees):
_results = results.copy()
if (degree == 0):
pass
elif (degree == 90):
_results['img'] = np.rot90(_results['img'], 1)
elif (degree == 180):
_results['img'] = np.rot90(_results['img'], 2)
elif (degree == 270):
_results['img'] = np.rot90(_results['img'], 3)
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for (key, val) in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'rotate_degrees={self.rotate_degrees})'
return repr_str |
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
results = collections.defaultdict(list)
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
if (i in state):
results[i].append(state[i])
for (k, v) in results.items():
event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count) |
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen': 0, 'reflen': 0, 'guess': ([0] * n), 'correct': ([0] * n)}
for comps in allcomps:
for key in ['testlen', 'reflen']:
totalcomps[key] += comps[key]
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if ((smooth == 1) and (k > 0)):
addsmooth = 1
logbleu += (math.log(((correct + addsmooth) + sys.float_info.min)) - math.log(((guess + addsmooth) + sys.float_info.min)))
if (guess == 0):
all_bleus.append((- ))
else:
all_bleus.append((math.log((correct + sys.float_info.min)) - math.log(guess)))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0, (1 - (float((totalcomps['reflen'] + 1)) / (totalcomps['testlen'] + 1))))
for i in range(len(all_bleus)):
if (i == 0):
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus |
def test_doi_subdivisions():
ref_line = u'[10] A. Smith et al., "Introduction to Particle Physics", 2017, Springer Publishing, ISBN: , DOI: 10.978.819252/12214.'
res = get_references(ref_line)
references = res[0]
assert (references[0]['doi'] == [u'doi:10.978.819252/12214'])
assert (references[0]['linemarker'] == [u'10']) |
class SharedEncoder(super_sac.nets.Encoder):
def __init__(self, dim):
super().__init__()
self._dim = dim
self.fc0 = nn.Linear(dim, 128)
self.fc1 = nn.Linear(128, dim)
def embedding_dim(self):
return self._dim
def forward(self, obs_dict):
x = F.relu(self.fc0(obs_dict['obs']))
x = F.relu(self.fc1(x))
return x |
def dobldobl_decomposition(deg):
from phcpy.phcpy2c3 import py2c_factor_number_of_dobldobl_components
from phcpy.phcpy2c3 import py2c_factor_witness_points_of_dobldobl_component
from phcpy.phcpy2c3 import py2c_factor_dobldobl_trace_sum_difference as dtf
nbcmp = py2c_factor_number_of_dobldobl_components()
result = []
for i in range(1, (nbcmp + 1)):
compnt = py2c_factor_witness_points_of_dobldobl_component(deg, i)
tsd = dtf(deg, i, len(compnt), compnt)
result.append((eval(compnt), tsd))
return result |
def get(data_path, seed, fixed_order=False, pc_valid=0):
data = {}
taskcla = []
size = [1, 28, 28]
mean = (0.1307,)
std = (0.3081,)
dat = {}
dat['train'] = datasets.MNIST(data_path, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
dat['test'] = datasets.MNIST(data_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
data[0] = {}
data[0]['name'] = 'mnist-0-1'
data[0]['ncla'] = 2
data[1] = {}
data[1]['name'] = 'mnist-2-3'
data[1]['ncla'] = 2
data[2] = {}
data[2]['name'] = 'mnist-4-5'
data[2]['ncla'] = 2
data[3] = {}
data[3]['name'] = 'mnist-6-7'
data[3]['ncla'] = 2
data[4] = {}
data[4]['name'] = 'mnist-8-9'
data[4]['ncla'] = 2
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[0][s] = {'x': [], 'y': []}
data[1][s] = {'x': [], 'y': []}
data[2][s] = {'x': [], 'y': []}
data[3][s] = {'x': [], 'y': []}
data[4][s] = {'x': [], 'y': []}
for (image, target) in loader:
label = target.numpy()
if (label == 0):
data[0][s]['x'].append(image)
data[0][s]['y'].append(0)
if (label == 1):
data[0][s]['x'].append(image)
data[0][s]['y'].append(1)
if (label == 2):
data[1][s]['x'].append(image)
data[1][s]['y'].append(0)
if (label == 3):
data[1][s]['x'].append(image)
data[1][s]['y'].append(1)
if (label == 4):
data[2][s]['x'].append(image)
data[2][s]['y'].append(0)
if (label == 5):
data[2][s]['x'].append(image)
data[2][s]['y'].append(1)
if (label == 6):
data[3][s]['x'].append(image)
data[3][s]['y'].append(0)
if (label == 7):
data[3][s]['x'].append(image)
data[3][s]['y'].append(1)
if (label == 8):
data[4][s]['x'].append(image)
data[4][s]['y'].append(0)
if (label == 9):
data[4][s]['x'].append(image)
data[4][s]['y'].append(1)
for n in range(5):
for s in ['train', 'test']:
data[n][s]['x'] = torch.stack(data[n][s]['x']).view((- 1), size[0], size[1], size[2])
data[n][s]['y'] = torch.LongTensor(np.array(data[n][s]['y'], dtype=int)).view((- 1))
for t in data.keys():
data[t]['valid'] = {}
data[t]['valid']['x'] = data[t]['train']['x'].clone()
data[t]['valid']['y'] = data[t]['train']['y'].clone()
n = 0
for t in data.keys():
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
return (data, taskcla, size) |
def conv3x3(in_planes, out_planes, kernel=3, strd=1, dilation=1, padding=1, bias=False):
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel, dilation=dilation, stride=strd, padding=padding, bias=bias) |
def graph_propagation_soft(edges, score, max_sz, step=0.1, **kwargs):
edges = np.sort(edges, axis=1)
th = score.min()
score_dict = {}
for (i, e) in enumerate(edges):
score_dict[(e[0], e[1])] = score[i]
nodes = np.sort(np.unique(edges.flatten()))
mapping = ((- 1) * np.ones((nodes.max() + 1), dtype=np.int))
mapping[nodes] = np.arange(nodes.shape[0])
link_idx = mapping[edges]
vertex = [Data(n) for n in nodes]
for (l, s) in zip(link_idx, score):
vertex[l[0]].add_link(vertex[l[1]], s)
(comps, remain) = connected_components_constraint(vertex, max_sz)
first_vertex_idx = np.array([mapping[n.name] for c in comps for n in c])
fusion_vertex_idx = np.setdiff1d(np.arange(nodes.shape[0]), first_vertex_idx, assume_unique=True)
components = comps[:]
while remain:
th = (th + ((1 - th) * step))
(comps, remain) = connected_components_constraint(remain, max_sz, score_dict, th)
components.extend(comps)
label_dict = {}
for (i, c) in enumerate(components):
for n in c:
label_dict[n.name] = i
log('Propagation ...')
prop_vertex = [vertex[idx] for idx in fusion_vertex_idx]
(label, label_fusion) = diffusion(prop_vertex, label_dict, score_dict, **kwargs)
return (label, label_fusion) |
class KerasONNXRuntimeModel(ONNXRuntimeModel, KerasOptimizedModel):
def __init__(self, model, input_spec=None, onnxruntime_session_options=None, **export_kwargs):
KerasOptimizedModel.__init__(self)
with TemporaryDirectory() as tmpdir:
if isinstance(model, tf.keras.Model):
invalidInputError((hasattr(model, 'input_shape') or (input_spec is not None)), 'Subclassed model must specify `input_spec` parameter.')
self._mode = 'arg'
if (input_spec is None):
input_spec = tf.TensorSpec(model.input_shape, model.dtype)
elif isinstance(input_spec, dict):
input_spec = [tf.TensorSpec.from_spec(spec, name=name) for (name, spec) in input_spec.items()]
self._mode = 'kwarg'
if isinstance(input_spec, tf.TensorSpec):
input_spec = (input_spec,)
if (self._mode == 'arg'):
self._inputs_dtypes = [spec.dtype.as_numpy_dtype for spec in input_spec]
else:
self._inputs_dtypes = {spec.name: spec.dtype.as_numpy_dtype for spec in input_spec}
onnx_path = os.path.join(tmpdir, 'tmp.onnx')
tf2onnx.convert.from_keras(model, input_signature=input_spec, output_path=onnx_path, **export_kwargs)
else:
onnx_path = model
ONNXRuntimeModel.__init__(self, onnx_path, session_options=onnxruntime_session_options)
def preprocess(self, args: Sequence[Any], kwargs: Dict[(str, Any)]):
invalidInputError((self.ortsess is not None), 'Please create an instance by InferenceOptimizer.trace()')
inputs = (args if (self._mode == 'arg') else kwargs)
inputs = convert_all(inputs, 'numpy', self._inputs_dtypes)
return inputs
def forward(self, inputs: Union[(Sequence[Any], Dict[(str, Any)])]):
if isinstance(inputs, Sequence):
return self.forward_step(*inputs)
else:
return self.ortsess.run(None, inputs)
def postprocess(self, outputs: Sequence[Any]):
outputs = convert_all(outputs, types='tf', dtypes=tf.float32)
if (len(outputs) == 1):
outputs = outputs[0]
return outputs
def status(self):
status = super().status
status.update({'onnx_path': 'onnx_saved_model.onnx', 'attr_path': 'onnx_saved_model_attr.pkl', 'compile_path': 'onnx_saved_model_compile.pkl', 'intra_op_num_threads': self.session_options.intra_op_num_threads, 'inter_op_num_threads': self.session_options.inter_op_num_threads})
return status
def _load(path):
status = KerasONNXRuntimeModel._load_status(path)
if status.get('onnx_path', None):
onnx_path = Path(status['onnx_path'])
invalidInputError((onnx_path.suffix == '.onnx'), "Path of onnx model must be with '.onnx' suffix.")
else:
invalidInputError(False, "nano_model_meta.yml must specify 'onnx_path' for loading.")
onnx_path = (Path(path) / status['onnx_path'])
onnxruntime_session_options = onnxruntime.SessionOptions()
onnxruntime_session_options.intra_op_num_threads = status['intra_op_num_threads']
onnxruntime_session_options.inter_op_num_threads = status['inter_op_num_threads']
model = KerasONNXRuntimeModel(model=str(onnx_path), input_sample=None, onnxruntime_session_options=onnxruntime_session_options)
with open((Path(path) / status['attr_path']), 'rb') as f:
attrs = SafePickle.load(f)
for (attr_name, attr_value) in attrs.items():
setattr(model, attr_name, attr_value)
if os.path.exists((Path(path) / status['compile_path'])):
with open((Path(path) / status['compile_path']), 'rb') as f:
kwargs = SafePickle.load(f)
model.compile(**kwargs)
return model
def _save(self, path, compression='fp32'):
path = Path(path)
path.mkdir(exist_ok=True)
self._dump_status(path)
super()._save_model((path / self.status['onnx_path']))
attrs = {'_mode': self._mode, '_inputs_dtypes': self._inputs_dtypes}
with open((path / self.status['attr_path']), 'wb') as f:
SafePickle.dump(attrs, f)
if self._is_compiled:
kwargs = {'run_eagerly': self._run_eagerly, 'steps_per_execution': int(self._steps_per_execution)}
if (self.compiled_loss is not None):
kwargs['loss'] = self.compiled_loss._user_losses
kwargs['loss_weights'] = self.compiled_loss._user_loss_weights
if (self.compiled_metrics is not None):
user_metric = self.compiled_metrics._user_metrics
if isinstance(user_metric, (list, tuple)):
kwargs['metrics'] = [m._name for m in user_metric]
else:
kwargs['metrics'] = user_metric._name
weighted_metrics = self.compiled_metrics._user_weighted_metrics
if (weighted_metrics is not None):
if isinstance(weighted_metrics, (list, str)):
kwargs['weighted_metrics'] = [m._name for m in weighted_metrics]
else:
kwargs['weighted_metrics'] = weighted_metrics._name
with open((path / self.status['compile_path']), 'wb') as f:
SafePickle.dump(kwargs, f) |
def type_of_target(y, input_name=''):
valid = ((isinstance(y, Sequence) or issparse(y) or hasattr(y, '__array__')) and (not isinstance(y, str)))
if (not valid):
raise ValueError(('Expected array-like (array or non-string sequence), got %r' % y))
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
check_y_kwargs = dict(accept_sparse=True, allow_nd=True, force_all_finite=False, ensure_2d=False, ensure_min_samples=0, ensure_min_features=0)
sklearn_catch_warnings(y, check_y_kwargs)
sklearn_check_old_format(y)
sklearn_check_invalid_inputs(y)
if ((y.ndim == 2) and (y.shape[1] > 1)):
suffix = '-multioutput'
else:
suffix = ''
if (y.dtype.kind == 'f'):
data = (y.data if issparse(y) else y)
if np.any((data != np.floor(data))):
_assert_all_finite(data)
return ('continuous' + suffix)
first_row = (y[0] if (not issparse(y)) else y.getrow(0).data)
if ((np.unique(y).shape[0] > 2) or ((y.ndim == 2) and (len(first_row) > 1))):
return ('multiclass' + suffix)
else:
return 'binary' |
class ScaledSolar(AbuModel):
version = '10000'
def _abu_massfrac_raw(self, scale):
scaled = ((self.sun * scale) + (self.bbn * (1 - scale)))
if (scale > 1.0):
(jj,) = np.argwhere((scaled.iso == isotope.ion('He4')))
bbn = ((self.sun * 0) + self.bbn)
for j in np.argwhere((scaled.abu < self.sun.abu)).flat:
scaled.abu[jj] += scaled.abu[j]
scaled.abu[j] = (self.sun.abu[j] * np.exp(((scale - 1) * (1 - (self.bbn.abu[j] / self.sun.abu[j])))))
scaled.abu[jj] -= scaled.abu[j]
scaled.normalize()
return scaled.abu
def __init__(self, scale=1, **kw):
silent = kw.setdefault('silent', False)
self.setup_logger(silent=silent)
solar = kw.pop('solar', None)
if (solar is None):
solar = SolAbu.default
zero = kw.pop('zero', None)
if (zero is None):
zero = BBNAbu.default
self.sun = SolAbu(solar)
self.bbn = BBNAbu(zero)
check = kw.get('check', False)
if check:
assert (len(self.bbn) == len(self.sun) == len(self.ions))
super().__init__(scale, **kw)
self.is_sorted = self.sun.is_sorted
self.comment = ('Version {:6s} - {:s}'.format(self.version, (time.asctime(time.gmtime()) + ' UTC')), 'Scaled solar abundances: {:g} solar'.format(scale), 'Sun: {:s} - {:s}'.format(solar, self.sun.filename), 'BBN: {:s} - {:s}'.format(zero, self.bbn.filename), 'X = {:8G}, Y = {:8G}, Z = {:8G}'.format(*self.XYZ()))
self.logger.info('X = {:8G}, Y = {:8G}, Z = {:8G}'.format(*self.XYZ()))
self.close_logger() |
def test_numeration_not_finding_year2():
ref_line = u'[138] Y.-B. Park, R. Mnig, and C. A. Volkert, Frequency effect on thermal fatigue damage in Cu interconnects, Thin Solid Films, vol. 515, pp. 3253 3258, 2007.'
res = get_references(ref_line)
references = res[0]
expected = [{'author': [u'Y.-B. Park, R. Mnig, and C. A. Volkert'], 'journal_page': [u'3253-3258'], 'journal_reference': [u'Thin Solid Films 515 (2007) 3253-3258'], 'journal_title': [u'Thin Solid Films'], 'journal_volume': [u'515'], 'journal_year': [u'2007'], 'linemarker': [u'138'], 'year': [u'2007'], 'title': [u'Frequency effect on thermal fatigue damage in Cu interconnects'], 'raw_ref': [ref_line]}]
assert (references == expected) |
def li_regularizer(scale, scope=None):
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
if isinstance(scale, numbers.Integral):
raise ValueError(('scale cannot be an integer: %s' % scale))
if isinstance(scale, numbers.Real):
if (scale < 0.0):
raise ValueError(('Setting a scale less than 0 on a regularizer: %g' % scale))
if (scale >= 1.0):
raise ValueError(('Setting a scale greater than 1 on a regularizer: %g' % scale))
if (scale == 0.0):
logging.info('Scale of 0 disables regularizer.')
return (lambda _, name=None: None)
def li(weights, name=None):
with tf.name_scope('li_regularizer') as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
if (tf.__version__ <= '0.12'):
standard_ops_fn = standard_ops.mul
else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), name=scope)
return li |
def test_shufflenetv2_unit():
data = torch.randn(1, 24, 56, 56)
inplanes = 24
planes = 116
stride = 2
branch_planes = (planes // 2)
downsample = nn.Sequential(nn.Conv2d(inplanes, branch_planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(branch_planes), nn.Conv2d(branch_planes, branch_planes, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_planes), nn.ReLU(inplace=True))
model = ShuffleNetV2Unit(inplanes, branch_planes, stride, downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (1, planes, 28, 28))
stride = 1
branch_planes = (planes // 2)
downsample = None
model = ShuffleNetV2Unit(branch_planes, branch_planes, stride, downsample)
print(model)
outputs = model(outputs)
print(outputs.shape)
assert (outputs.shape == (1, planes, 28, 28)) |
class TFStats(object):
def __init__(self):
self.raw_counts = {}
self.max_counts_for_term = {}
def get_term_frequency(self, term, doc, weighting_scheme='raw', normalization_factor=0.5):
if (weighting_scheme == 'binary'):
return (1 if ((term, doc) in self.raw_counts) else 0)
if (weighting_scheme == 'raw'):
return self.raw_counts.get((term, doc), 0)
if (weighting_scheme == 'log'):
if ((term, doc) in self.raw_counts):
return (1 + math.log(self.raw_counts[(term, doc)]))
return 0
if (weighting_scheme == 'normalized'):
return (normalization_factor + (((1 - normalization_factor) * self.raw_counts.get((term, doc), 0)) / (1.0 + self.max_counts_for_term.get(term, 0))))
raise KeyError('Unknown tf-weighting-scheme {0}'.format(weighting_scheme))
def accumulate(self, doc, text, ngram_order):
for n in range(1, (ngram_order + 1)):
for i in range(len(text)):
term = tuple(text[i:(i + n)])
self.raw_counts.setdefault((term, doc), 0)
self.raw_counts[(term, doc)] += 1
def compute_term_stats(self, idf_stats=None):
if (len(self.raw_counts) == 0):
raise RuntimeError('No (term, doc) found in tf-stats.')
for (tup, counts) in self.raw_counts.items():
term = tup[0]
if (counts > self.max_counts_for_term.get(term, 0)):
self.max_counts_for_term[term] = counts
if (idf_stats is not None):
idf_stats.accumulate(term)
def __str__(self):
lines = []
for (tup, counts) in self.raw_counts.items():
(term, doc) = tup
lines.append('{order} {term} {doc} {counts}'.format(order=len(term), term=' '.join(term), doc=doc, counts=counts))
return '\n'.join(lines)
def read(self, file_handle, ngram_order=None, idf_stats=None):
for line in file_handle:
parts = line.strip().split()
order = parts[0]
assert ((len(parts) - 3) == order)
if ((ngram_order is not None) and (order > ngram_order)):
continue
term = tuple(parts[1:(order + 1)])
doc = parts[(- 2)]
counts = float(parts[(- 1)])
self.raw_counts[(term, doc)] = counts
if (counts > self.max_counts_for_term.get(term, 0)):
self.max_counts_for_term[term] = counts
if (idf_stats is not None):
idf_stats.accumulate(term)
if (len(self.raw_counts) == 0):
raise RuntimeError('Read no TF stats.') |
('image-folder')
class ImageFolder(Dataset):
def __init__(self, root_path, split_file=None, split_key=None, first_k=None, repeat=1, cache='none'):
self.repeat = repeat
self.cache = cache
if (split_file is None):
filenames = sorted(os.listdir(root_path))
else:
with open(split_file, 'r') as f:
filenames = json.load(f)[split_key]
if (first_k is not None):
filenames = filenames[:first_k]
self.files = []
for filename in filenames:
file = os.path.join(root_path, filename)
if (cache == 'none'):
self.files.append(file)
elif (cache == 'bin'):
bin_root = os.path.join(os.path.dirname(root_path), ('_bin_' + os.path.basename(root_path)))
if (not os.path.exists(bin_root)):
os.mkdir(bin_root)
print('mkdir', bin_root)
bin_file = os.path.join(bin_root, (filename.split('.')[0] + '.pkl'))
if (not os.path.exists(bin_file)):
with open(bin_file, 'wb') as f:
pickle.dump(imageio.imread(file), f)
print('dump', bin_file)
self.files.append(bin_file)
elif (cache == 'in_memory'):
self.files.append(transforms.ToTensor()(Image.open(file).convert('RGB')))
def __len__(self):
return (len(self.files) * self.repeat)
def __getitem__(self, idx):
x = self.files[(idx % len(self.files))]
if (self.cache == 'none'):
return transforms.ToTensor()(Image.open(x).convert('RGB'))
elif (self.cache == 'bin'):
with open(x, 'rb') as f:
x = pickle.load(f)
x = np.ascontiguousarray(x.transpose(2, 0, 1))
x = (torch.from_numpy(x).float() / 255)
return x
elif (self.cache == 'in_memory'):
return x |
def chunked(seq: Sequence[_T], n: int) -> Iterable[Sequence[_T]]:
return (seq[i:(i + n)] for i in range(0, len(seq), n)) |
class TrainManager(object):
def __init__(self, student, teacher=None, train_loader=None, test_loader=None, train_config={}):
self.student = student
self.teacher = teacher
self.have_teacher = bool(self.teacher)
self.device = train_config['device']
self.name = train_config['name']
self.optimizer = optim.SGD(self.student.parameters(), lr=train_config['learning_rate'], momentum=train_config['momentum'], weight_decay=train_config['weight_decay'])
if self.have_teacher:
self.teacher.eval()
self.teacher.train(mode=False)
self.train_loader = train_loader
self.test_loader = test_loader
self.config = train_config
def train(self):
lambda_ = self.config['lambda_student']
T = self.config['T_student']
epochs = self.config['epochs']
trial_id = self.config['trial_id']
max_val_acc = 0
iteration = 0
best_acc = 0
criterion = nn.CrossEntropyLoss()
for epoch in range(epochs):
self.student.train()
self.adjust_learning_rate(self.optimizer, epoch)
loss = 0
for (batch_idx, (data, target)) in enumerate(self.train_loader):
iteration += 1
data = data.to(self.device)
target = target.to(self.device)
self.optimizer.zero_grad()
output = self.student(data)
loss_SL = criterion(output, target)
loss = loss_SL
if self.have_teacher:
teacher_outputs = self.teacher(data)
loss_KD = nn.KLDivLoss()(F.log_softmax((output / T), dim=1), F.softmax((teacher_outputs / T), dim=1))
loss = (((1 - lambda_) * loss_SL) + (((lambda_ * T) * T) * loss_KD))
loss.backward()
self.optimizer.step()
print('epoch {}/{}'.format(epoch, epochs))
val_acc = self.validate(step=epoch)
if (val_acc > best_acc):
best_acc = val_acc
self.save(epoch, name='{}_{}_best.pth.tar'.format(self.name, trial_id))
return best_acc
def validate(self, step=0):
self.student.eval()
with torch.no_grad():
correct = 0
total = 0
acc = 0
for (images, labels) in self.test_loader:
images = images.to(self.device)
labels = labels.to(self.device)
outputs = self.student(images)
(_, predicted) = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = ((100 * correct) / total)
print('{{"metric": "{}_val_accuracy", "value": {}}}'.format(self.name, acc))
return acc
def save(self, epoch, name=None):
trial_id = self.config['trial_id']
if (name is None):
torch.save({'epoch': epoch, 'model_state_dict': self.student.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict()}, '{}_{}_epoch{}.pth.tar'.format(self.name, trial_id, epoch))
else:
torch.save({'model_state_dict': self.student.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'epoch': epoch}, name)
def adjust_learning_rate(self, optimizer, epoch):
epochs = self.config['epochs']
models_are_plane = self.config['is_plane']
if models_are_plane:
lr = 0.01
elif (epoch < int((epoch / 2.0))):
lr = 0.1
elif (epoch < int(((epochs * 3) / 4.0))):
lr = (0.1 * 0.1)
else:
lr = (0.1 * 0.01)
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
class SigmoidFlow(Flow):
def __init__(self, inverse=False):
super(SigmoidFlow, self).__init__(inverse)
def forward(self, input: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = input.sigmoid()
logdet = (F.softplus(input) + F.softplus((- input)))
logdet = (logdet.view(logdet.size(0), (- 1)).sum(dim=1) * (- 1.0))
return (out, logdet)
def backward(self, input: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
eps = 1e-12
out = (torch.log(((torch.reciprocal((input + eps)) - 1.0) + eps)) * (- 1.0))
logdet = (torch.log((input + eps)) + torch.log(((1.0 - input) + eps)))
logdet = (logdet.view(logdet.size(0), (- 1)).sum(dim=1) * (- 1.0))
return (out, logdet)
def init(self, data, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]:
with torch.no_grad():
return self.forward(data)
def extra_repr(self):
return 'inverse={}'.format(self.inverse)
def from_params(cls, params: Dict) -> 'SigmoidFlow':
return SigmoidFlow(**params) |
def test_VisualizeFCAMs():
import datetime as dt
import torch
import torch.nn.functional as F
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
debug_fd = join(root_dir, 'data/debug/input')
img_pil = Image.open(join(debug_fd, 'Black_Footed_Albatross_0002_55.jpg'), 'r').convert('RGB')
(w, h) = img_pil.size
cam_low = torch.rand(size=(int((h / 32.0)), int((w / 32.0))), dtype=torch.float)
cam_inter = F.interpolate(input=cam_low.unsqueeze(0).unsqueeze(0), size=(h, w), mode='bilinear', align_corners=True).squeeze()
cam = cam_inter
mask_pred = (cam > 0.5).float()
mask_pred_inter = torch.rand(size=(h, w), dtype=torch.float)
mask_pred_inter = (mask_pred_inter > 0.3).float()
true_mask = Image.open(join(debug_fd, 'Black_Footed_Albatross_0002_55.png'), 'r').convert('L')
true_mask = (np.array(true_mask) > (255 / 2.0)).astype(np.uint8)
debug_out = join(root_dir, 'data/debug/visualization')
im_recon = torch.rand(size=(1, 3, h, w), dtype=torch.float)
im_recon = ((im_recon - 0.5) / 0.5)
im_recon = im_recon.squeeze().numpy()
im_recon = None
seg_ignore_idx = (- 255)
seed = torch.rand(size=(h, w), dtype=torch.float).numpy()
seed[np.where((seed > 0.9))] = 1.0
seed[np.where((seed < 0.1))] = 0.0
seed[np.where(np.logical_and((0.1 <= seed), (seed <= 0.9)))] = seg_ignore_idx
visu = VisualizeFCAMs(fdout=debug_out, mask_color_map=get_bin_colormap(), task=constants.F_CL, alpha=100, height_tag=60, multi_label_flag=False, dpi=50, seg_ignore_idx=seg_ignore_idx)
t0 = dt.datetime.now()
visu(img_pil=img_pil, true_mask=true_mask, pred_mask=mask_pred.numpy(), mask_pred_inter=mask_pred_inter.numpy(), cam_low=torch.sigmoid(cam_low).numpy(), cam_low_raw=cam_low.numpy(), cam_inter=torch.sigmoid(cam_inter).numpy(), cam_inter_raw=cam_inter.numpy(), cam=torch.sigmoid(cam).numpy(), cam_raw=cam.numpy(), im_recon=im_recon, seed=seed, basefile=('Black_Footed_Albatross_0002_55' + visu.task))
print('Work time: {}'.format((dt.datetime.now() - t0))) |
def write_monomial_map(dim, ind, nbvar):
str_map = monomial_map_strings(dim, ind, nbvar)
print(str_map)
for str_var in str_map:
print(str_var) |
class Lang():
def __init__(self, name):
self.name = name
self.word2index = {'RE_DIGITS': 1, 'UNKNOWN': 2, 'PADDING': 0}
self.word2count = {'RE_DIGITS': 1, 'UNKNOWN': 1, 'PADDING': 1}
self.index2word = {0: 'PADDING', 1: 'RE_DIGITS', 2: 'UNKNOWN'}
self.n_words = 3
def addSentence(self, sentence):
for word in sentence.strip('\n').strip('\r').split(' '):
self.addWord(word)
def addWord(self, word):
if (word not in self.word2index):
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1 |
def construct_mask(row_exs: List, col_exs: List=None) -> torch.tensor:
positive_on_diagonal = (col_exs is None)
num_row = len(row_exs)
col_exs = (row_exs if (col_exs is None) else col_exs)
num_col = len(col_exs)
row_entity_ids = torch.LongTensor([entity_dict.entity_to_idx(ex.tail_id) for ex in row_exs])
col_entity_ids = (row_entity_ids if positive_on_diagonal else torch.LongTensor([entity_dict.entity_to_idx(ex.tail_id) for ex in col_exs]))
triplet_mask = (row_entity_ids.unsqueeze(1) != col_entity_ids.unsqueeze(0))
if positive_on_diagonal:
triplet_mask.fill_diagonal_(True)
for i in range(num_row):
(head_id, relation) = (row_exs[i].head_id, row_exs[i].relation)
neighbor_ids = train_triplet_dict.get_neighbors(head_id, relation)
if (len(neighbor_ids) <= 1):
continue
for j in range(num_col):
if ((i == j) and positive_on_diagonal):
continue
tail_id = col_exs[j].tail_id
if (tail_id in neighbor_ids):
triplet_mask[i][j] = False
return triplet_mask |
class DummyDataset(data.Dataset):
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
self.sequence_a = 'intel-extension-for-transformers is based in SH'
self.sequence_b = 'Where is intel-extension-for-transformers based? NYC or SH'
self.encoded_dict = self.tokenizer(self.sequence_a, self.sequence_b)
self.encoded_dict['labels'] = ([(- 100)] * len(self.encoded_dict['input_ids']))
self.encoded_dict['labels'][1] = 17953
self.encoded_dict['next_sentence_label'] = 0
def __len__(self):
return 1
def __getitem__(self, index):
return self.encoded_dict |
_module()
class TridentFasterRCNN(FasterRCNN):
'Implementation of `TridentNet <
def __init__(self, backbone: ConfigType, rpn_head: ConfigType, roi_head: ConfigType, train_cfg: ConfigType, test_cfg: ConfigType, neck: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg)
assert (self.backbone.num_branch == self.roi_head.num_branch)
assert (self.backbone.test_branch_idx == self.roi_head.test_branch_idx)
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> tuple:
num_branch = (self.num_branch if (self.training or (self.test_branch_idx == (- 1))) else 1)
trident_data_samples = (batch_data_samples * num_branch)
return super()._forward(batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> dict:
num_branch = (self.num_branch if (self.training or (self.test_branch_idx == (- 1))) else 1)
trident_data_samples = (batch_data_samples * num_branch)
return super().loss(batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self, batch_inputs: Tensor, batch_data_samples: SampleList, rescale: bool=True) -> SampleList:
num_branch = (self.num_branch if (self.training or (self.test_branch_idx == (- 1))) else 1)
trident_data_samples = (batch_data_samples * num_branch)
return super().predict(batch_inputs=batch_inputs, batch_data_samples=trident_data_samples, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
x = self.extract_feats(imgs)
num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1)
trident_img_metas = [(img_metas * num_branch) for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) |
def conv_1x1_bn(inp, oup):
return nn.Sequential(Conv2d(inp, oup, 1, 1, 0, bias=False), BatchNorm2d(oup), nn.ReLU6(inplace=True)) |
class RLAv1p_ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(RLAv1p_ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
if (ECA is None):
ECA = ([None] * 4)
elif (len(ECA) != 4):
raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA))
self.rla_channel = rla_channel
self.flops = False
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv_outs = ([None] * 4)
recurrent_convs = ([None] * 4)
stages = ([None] * 4)
stage_bns = ([None] * 4)
(stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0])
(stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0])
(stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1])
(stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2])
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_convs = nn.ModuleList(recurrent_convs)
self.stages = nn.ModuleList(stages)
self.stage_bns = nn.ModuleList(stage_bns)
self.tanh = nn.Tanh()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_last_bn:
for m in self.modules():
if isinstance(m, RLAv1p_Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False):
conv_out = conv1x1((planes * block.expansion), rla_channel)
recurrent_conv = conv3x3(rla_channel, rla_channel)
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
bns = [norm_layer(rla_channel) for _ in range(blocks)]
return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs):
for (layer, bn) in zip(layers, bns):
(x, y, h, identity) = layer(x, h)
y_out = conv_out(y)
h = (h + y_out)
h = recurrent_conv(h)
h = bn(h)
h = self.tanh(h)
x = torch.cat((x, h), dim=1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) |
def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter='_', block_slice_pos=5):
all_keys = list(state_dict.keys())
sgm_patterns = ['input_blocks', 'middle_block', 'output_blocks']
is_in_sgm_format = False
for key in all_keys:
if any(((p in key) for p in sgm_patterns)):
is_in_sgm_format = True
break
if (not is_in_sgm_format):
return state_dict
new_state_dict = {}
inner_block_map = ['resnets', 'attentions', 'upsamplers']
(input_block_ids, middle_block_ids, output_block_ids) = (set(), set(), set())
for layer in all_keys:
if ('text' in layer):
new_state_dict[layer] = state_dict.pop(layer)
else:
layer_id = int(layer.split(delimiter)[:block_slice_pos][(- 1)])
if (sgm_patterns[0] in layer):
input_block_ids.add(layer_id)
elif (sgm_patterns[1] in layer):
middle_block_ids.add(layer_id)
elif (sgm_patterns[2] in layer):
output_block_ids.add(layer_id)
else:
raise ValueError(f'Checkpoint not supported because layer {layer} not supported.')
input_blocks = {layer_id: [key for key in state_dict if (f'input_blocks{delimiter}{layer_id}' in key)] for layer_id in input_block_ids}
middle_blocks = {layer_id: [key for key in state_dict if (f'middle_block{delimiter}{layer_id}' in key)] for layer_id in middle_block_ids}
output_blocks = {layer_id: [key for key in state_dict if (f'output_blocks{delimiter}{layer_id}' in key)] for layer_id in output_block_ids}
for i in input_block_ids:
block_id = ((i - 1) // (unet_config.layers_per_block + 1))
layer_in_block_id = ((i - 1) % (unet_config.layers_per_block + 1))
for key in input_blocks[i]:
inner_block_id = int(key.split(delimiter)[block_slice_pos])
inner_block_key = (inner_block_map[inner_block_id] if ('op' not in key) else 'downsamplers')
inner_layers_in_block = (str(layer_in_block_id) if ('op' not in key) else '0')
new_key = delimiter.join(((key.split(delimiter)[:(block_slice_pos - 1)] + [str(block_id), inner_block_key, inner_layers_in_block]) + key.split(delimiter)[(block_slice_pos + 1):]))
new_state_dict[new_key] = state_dict.pop(key)
for i in middle_block_ids:
key_part = None
if (i == 0):
key_part = [inner_block_map[0], '0']
elif (i == 1):
key_part = [inner_block_map[1], '0']
elif (i == 2):
key_part = [inner_block_map[0], '1']
else:
raise ValueError(f'Invalid middle block id {i}.')
for key in middle_blocks[i]:
new_key = delimiter.join(((key.split(delimiter)[:(block_slice_pos - 1)] + key_part) + key.split(delimiter)[block_slice_pos:]))
new_state_dict[new_key] = state_dict.pop(key)
for i in output_block_ids:
block_id = (i // (unet_config.layers_per_block + 1))
layer_in_block_id = (i % (unet_config.layers_per_block + 1))
for key in output_blocks[i]:
inner_block_id = int(key.split(delimiter)[block_slice_pos])
inner_block_key = inner_block_map[inner_block_id]
inner_layers_in_block = (str(layer_in_block_id) if (inner_block_id < 2) else '0')
new_key = delimiter.join(((key.split(delimiter)[:(block_slice_pos - 1)] + [str(block_id), inner_block_key, inner_layers_in_block]) + key.split(delimiter)[(block_slice_pos + 1):]))
new_state_dict[new_key] = state_dict.pop(key)
if (len(state_dict) > 0):
raise ValueError('At this point all state dict entries have to be converted.')
return new_state_dict |
def write_tt(sentences, stream=sys.stdout):
for sentence in sentences:
for node in sentence['nodes']:
form = node['form']
negation = node.get('negation')
if (negation and len(negation)):
negation = negation[0]
if (not negation):
label = 'T'
elif (negation.get('cue') == form):
label = 'C'
elif ('cue' in negation):
label = 'A'
elif ('scope' in negation):
label = 'F'
else:
label = 'T'
print('{}\t{}'.format(form, label), file=stream)
print(file=stream) |
def download_mnist(dirpath):
data_dir = os.path.join(dirpath, 'mnist')
if os.path.exists(data_dir):
print('Found MNIST - skip')
return
else:
os.mkdir(data_dir)
url_base = '
file_names = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
for file_name in file_names:
url = (url_base + file_name).format(**locals())
print(url)
out_path = os.path.join(data_dir, file_name)
cmd = ['curl', url, '-o', out_path]
print('Downloading ', file_name)
subprocess.call(cmd)
cmd = ['gzip', '-d', out_path]
print('Decompressing ', file_name)
subprocess.call(cmd) |
def calc_distance_heuristic(gx, gy, ox, oy, resolution, rr):
goal_node = Node(round((gx / resolution)), round((gy / resolution)), 0.0, (- 1))
ox = [(iox / resolution) for iox in ox]
oy = [(ioy / resolution) for ioy in oy]
(obstacle_map, min_x, min_y, max_x, max_y, x_w, y_w) = calc_obstacle_map(ox, oy, resolution, rr)
motion = get_motion_model()
(open_set, closed_set) = (dict(), dict())
open_set[calc_index(goal_node, x_w, min_x, min_y)] = goal_node
priority_queue = [(0, calc_index(goal_node, x_w, min_x, min_y))]
while 1:
if (not priority_queue):
break
(cost, c_id) = heapq.heappop(priority_queue)
if (c_id in open_set):
current = open_set[c_id]
closed_set[c_id] = current
open_set.pop(c_id)
else:
continue
if show_animation:
plt.plot((current.x * resolution), (current.y * resolution), 'xc')
plt.gcf().canvas.mpl_connect('key_release_event', (lambda event: [(exit(0) if (event.key == 'escape') else None)]))
if ((len(closed_set.keys()) % 10) == 0):
plt.pause(0.001)
for (i, _) in enumerate(motion):
node = Node((current.x + motion[i][0]), (current.y + motion[i][1]), (current.cost + motion[i][2]), c_id)
n_id = calc_index(node, x_w, min_x, min_y)
if (n_id in closed_set):
continue
if (not verify_node(node, obstacle_map, min_x, min_y, max_x, max_y)):
continue
if (n_id not in open_set):
open_set[n_id] = node
heapq.heappush(priority_queue, (node.cost, calc_index(node, x_w, min_x, min_y)))
elif (open_set[n_id].cost >= node.cost):
open_set[n_id] = node
heapq.heappush(priority_queue, (node.cost, calc_index(node, x_w, min_x, min_y)))
return closed_set |
def process(args):
root = Path(args.data_root).absolute()
lang = args.tgt_lang
cur_root = (root / f'en-{lang}')
if (not cur_root.is_dir()):
print(f'{cur_root.as_posix()} does not exist. Skipped.')
df = load_df_from_tsv((cur_root / f'{split}_raw_seg.tsv'))
train_text = []
for (_, row) in df.iterrows():
train_text.append(row['src_text'])
train_text.append(row['tgt_text'])
v_size_str = ('' if (args.vocab_type == 'char') else str(args.vocab_size))
spm_filename_prefix = f'spm_{args.vocab_type}{v_size_str}_raw'
with NamedTemporaryFile(mode='w') as f:
for t in train_text:
f.write((t + '\n'))
gen_vocab(Path(f.name), (cur_root / spm_filename_prefix), args.vocab_type, args.vocab_size)
gen_config_yaml_raw(cur_root, (spm_filename_prefix + '.model'), yaml_filename=f'config_raw.yaml') |
def GetResnetTransform():
x0 = x
x = BatchNormalization(momentum=0.9, name=('normalize_%d_%d' % (idx, j)))(x)
x = Activation('relu', name=('reluA_%d_%d' % (idx, j)))(x)
x = Conv2D(filters, kernel_size=[5, 5], strides=(1, 1), padding='same', name=('transformA_%d_%d' % (idx, j)))(x)
x = BatchNormalization(momentum=0.9, name=('normalizeB_%d_%d' % (idx, j)))(x)
x = Activation('relu', name=('reluB_%d_%d' % (idx, j)))(x)
x = Conv2D(filters, kernel_size=[5, 5], strides=(1, 1), padding='same', name=('transformB_%d_%d' % (idx, j)))(x)
x = Add()([x, x0]) |
def convert_xmod_checkpoint_to_pytorch(xmod_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool):
data_dir = Path('data_bin')
xmod = FairseqXmodModel.from_pretrained(model_name_or_path=str(Path(xmod_checkpoint_path).parent), checkpoint_file=Path(xmod_checkpoint_path).name, _name='xmod_base', arch='xmod_base', task='multilingual_masked_lm', data_name_or_path=str(data_dir), bpe='sentencepiece', sentencepiece_model=str((Path(xmod_checkpoint_path).parent / 'sentencepiece.bpe.model')), src_dict=str((data_dir / 'dict.txt')))
xmod.eval()
print(xmod)
xmod_sent_encoder = xmod.model.encoder.sentence_encoder
config = XmodConfig(vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, 'bottleneck', 2), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages)
if classification_head:
config.num_labels = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:', config)
model = (XmodForSequenceClassification(config) if classification_head else XmodForMaskedLM(config))
model.eval()
model.roberta.embeddings.word_embeddings.weight = xmod_sent_encoder.embed_tokens.weight
model.roberta.embeddings.position_embeddings.weight = xmod_sent_encoder.embed_positions.weight
model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
model.roberta.embeddings.LayerNorm.weight = xmod_sent_encoder.layernorm_embedding.weight
model.roberta.embeddings.LayerNorm.bias = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
layer = model.roberta.encoder.layer[i]
xmod_layer = xmod_sent_encoder.layers[i]
self_attn = layer.attention.self
if (not (xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)))):
raise AssertionError('Dimensions of self-attention weights do not match.')
self_attn.query.weight.data = xmod_layer.self_attn.q_proj.weight
self_attn.query.bias.data = xmod_layer.self_attn.q_proj.bias
self_attn.key.weight.data = xmod_layer.self_attn.k_proj.weight
self_attn.key.bias.data = xmod_layer.self_attn.k_proj.bias
self_attn.value.weight.data = xmod_layer.self_attn.v_proj.weight
self_attn.value.bias.data = xmod_layer.self_attn.v_proj.bias
self_output = layer.attention.output
if (self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape):
raise AssertionError('Dimensions of self-attention output weights do not match.')
self_output.dense.weight = xmod_layer.self_attn.out_proj.weight
self_output.dense.bias = xmod_layer.self_attn.out_proj.bias
self_output.LayerNorm.weight = xmod_layer.self_attn_layer_norm.weight
self_output.LayerNorm.bias = xmod_layer.self_attn_layer_norm.bias
intermediate = layer.intermediate
if (intermediate.dense.weight.shape != xmod_layer.fc1.weight.shape):
raise AssertionError('Dimensions of intermediate weights do not match.')
intermediate.dense.weight = xmod_layer.fc1.weight
intermediate.dense.bias = xmod_layer.fc1.bias
bert_output = layer.output
if (bert_output.dense.weight.shape != xmod_layer.fc2.weight.shape):
raise AssertionError('Dimensions of feed-forward weights do not match.')
bert_output.dense.weight = xmod_layer.fc2.weight
bert_output.dense.bias = xmod_layer.fc2.bias
bert_output.LayerNorm.weight = xmod_layer.final_layer_norm.weight
bert_output.LayerNorm.bias = xmod_layer.final_layer_norm.bias
if (bert_output.adapter_layer_norm is not None):
bert_output.adapter_layer_norm.weight = xmod_layer.adapter_layer_norm.weight
bert_output.adapter_layer_norm.bias = xmod_layer.adapter_layer_norm.bias
if (sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys())):
raise AssertionError('Lists of language adapters do not match.')
for (lang_code, adapter) in xmod_layer.adapter_modules.items():
to_adapter = bert_output.adapter_modules[lang_code]
from_adapter = xmod_layer.adapter_modules[lang_code]
to_adapter.dense1.weight = from_adapter.fc1.weight
to_adapter.dense1.bias = from_adapter.fc1.bias
to_adapter.dense2.weight = from_adapter.fc2.weight
to_adapter.dense2.bias = from_adapter.fc2.bias
if (xmod_sent_encoder.layer_norm is not None):
model.roberta.encoder.LayerNorm.weight = xmod_sent_encoder.layer_norm.weight
model.roberta.encoder.LayerNorm.bias = xmod_sent_encoder.layer_norm.bias
if classification_head:
model.classifier.dense.weight = xmod.model.classification_heads['mnli'].dense.weight
model.classifier.dense.bias = xmod.model.classification_heads['mnli'].dense.bias
model.classifier.out_proj.weight = xmod.model.classification_heads['mnli'].out_proj.weight
model.classifier.out_proj.bias = xmod.model.classification_heads['mnli'].out_proj.bias
else:
model.lm_head.dense.weight = xmod.model.encoder.lm_head.dense.weight
model.lm_head.dense.bias = xmod.model.encoder.lm_head.dense.bias
model.lm_head.layer_norm.weight = xmod.model.encoder.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = xmod.model.encoder.lm_head.layer_norm.bias
model.lm_head.decoder.weight = xmod.model.encoder.lm_head.weight
model.lm_head.decoder.bias = xmod.model.encoder.lm_head.bias
input_ids = xmod.encode(SAMPLE_TEXT).unsqueeze(0)
model.roberta.set_default_language(SAMPLE_LANGUAGE)
our_output = model(input_ids)[0]
if classification_head:
their_output = xmod.model.classification_heads['mnli'](xmod.extract_features(input_ids))
else:
their_output = xmod.model(input_ids, lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item()
print(f'max_absolute_diff = {max_absolute_diff}')
success = torch.allclose(our_output, their_output, atol=0.001)
print('Do both models output the same tensors?', ('' if success else ''))
if (not success):
raise Exception('Something went wRoNg')
Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(pytorch_dump_folder_path) |
def add_tagged_journal_in_place_of_IBID(previous_match):
return (' %s%s%s' % (CFG_REFEXTRACT_MARKER_OPENING_TITLE_IBID, previous_match['title'], CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID)) |
class ProcessMonitor():
def __init__(self, process_infos, sc, ray_rdd, raycontext, verbose=False):
self.sc = sc
self.raycontext = raycontext
self.verbose = verbose
self.ray_rdd = ray_rdd
self.master = []
self.slaves = []
self.pgids = []
self.node_ips = []
self.process_infos = process_infos
for process_info in process_infos:
self.pgids.append(process_info.pgid)
self.node_ips.append(process_info.node_ip)
if process_info.master_addr:
self.master.append(process_info)
else:
self.slaves.append(process_info)
invalidInputError((len(self.master) == 1), 'We should got 1 master only, but we got {}'.format(len(self.master)))
self.master = self.master[0]
if (not is_local(self.sc)):
self.print_ray_remote_err_out()
def print_ray_remote_err_out(self):
if (self.master.errorcode != 0):
invalidInputError(False, str(self.master))
for slave in self.slaves:
if (slave.errorcode != 0):
invalidInputError(False, str(slave))
if self.verbose:
print(self.master)
for slave in self.slaves:
print(slave) |
def generate_data_list():
annotation_root = '/media/heyonghao/HYH-4T-WD/public_dataset/Caltech/Caltech_new_annotations/anno_test_1xnew'
image_root = '/media/heyonghao/HYH-4T-WD/public_dataset/Caltech/Caltech_data/extracted_data'
list_file_path = './data_folder/data_list_caltech_test.txt'
if (not os.path.exists(os.path.dirname(list_file_path))):
os.makedirs(os.path.dirname(list_file_path))
fout = open(list_file_path, 'w')
counter = 0
for (parent, dirnames, filenames) in os.walk(annotation_root):
for filename in filenames:
if (not filename.endswith('.txt')):
continue
filename_splits = filename[:(- 4)].split('_')
set_name = filename_splits[0]
seq_name = filename_splits[1]
img_name = filename_splits[2]
img_path = os.path.join(image_root, set_name, seq_name, 'images', img_name)
if (not os.path.exists(img_path)):
print(('The corresponding image does not exist! [%s]' % img_path))
continue
line = img_path
fin_anno = open(os.path.join(parent, filename), 'r')
bbox_list = []
for (i, anno) in enumerate(fin_anno):
if (i == 0):
continue
anno = anno.strip('\n').split(' ')
if (anno[0] != 'person'):
continue
x = math.floor(float(anno[1]))
y = math.floor(float(anno[2]))
width = math.ceil(float(anno[3]))
height = math.ceil(float(anno[4]))
width_vis = math.ceil(float(anno[8]))
height_vis = math.ceil(float(anno[9]))
if (((width_vis * height_vis) / (width * height)) < 0.2):
continue
bbox_list.append((x, y, width, height))
if (len(bbox_list) == 0):
line += ',0,0'
fout.write((line + '\n'))
else:
bbox_line = ''
for bbox in bbox_list:
bbox_line += (((((((',' + str(bbox[0])) + ',') + str(bbox[1])) + ',') + str(bbox[2])) + ',') + str(bbox[3]))
line += ((',1,' + str(len(bbox_list))) + bbox_line)
fout.write((line + '\n'))
counter += 1
print(counter)
fout.close() |
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
self.conv = conv7x7_block(in_channels=2, out_channels=1, activation=None)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att1 = x.max(dim=1)[0].unsqueeze(1)
att2 = x.mean(dim=1).unsqueeze(1)
att = torch.cat((att1, att2), dim=1)
att = self.conv(att)
att = self.sigmoid(att)
x = (x * att)
return x |
class KeypointTarget():
def __init__(self):
self.stride = cfg.TRAIN.STRIDE
self.radius = (cfg.TRAIN.OUTPUT_SIZE / 8)
self.std = (self.radius / 2)
if cfg.TRAIN.OFFSETS:
self.keypoints = np.zeros((2, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.OUTPUT_SIZE), dtype=np.float32)
for i in range(cfg.TRAIN.OUTPUT_SIZE):
for j in range(cfg.TRAIN.OUTPUT_SIZE):
self.keypoints[0][i][j] = (((cfg.TRAIN.SEARCH_SIZE - 1) / 2) + (self.stride * (j - (cfg.TRAIN.OUTPUT_SIZE // 2))))
self.keypoints[1][i][j] = (((cfg.TRAIN.SEARCH_SIZE - 1) / 2) + (self.stride * (i - (cfg.TRAIN.OUTPUT_SIZE // 2))))
def __call__(self, target, size, neg=False):
heatmap_label0 = np.zeros((1, size, size), dtype=np.float32)
if (cfg.TRAIN.STACK == 0):
heatmap_label = [heatmap_label0]
else:
heatmap_label = [heatmap_label0 for i in range(cfg.TRAIN.STACK)]
objsize_label = np.zeros((2, size, size), dtype=np.float32)
if cfg.TRAIN.OFFSETS:
offsets_label = np.zeros((2, size, size), dtype=np.float32)
if neg:
if cfg.TRAIN.OFFSETS:
offsets_label = np.zeros((2, size, size), dtype=np.float32)
return (heatmap_label, offsets_label, objsize_label)
else:
return (heatmap_label, objsize_label)
(tcx, tcy, tw, th) = corner2center(target)
heat_cx = ((cfg.TRAIN.OUTPUT_SIZE // 2) + ((tcx - ((cfg.TRAIN.SEARCH_SIZE - 1) / 2)) / self.stride))
heat_cy = ((cfg.TRAIN.OUTPUT_SIZE // 2) + ((tcy - ((cfg.TRAIN.SEARCH_SIZE - 1) / 2)) / self.stride))
pos_x = round(heat_cx)
pos_y = round(heat_cy)
if cfg.TRAIN.DIF_STD:
std = [self.std, (self.std * 0.9), (self.std * 0.81)]
radius = [self.radius, self.radius, self.radius]
else:
std = [self.std, self.std, self.std]
radius = [self.radius, self.radius, self.radius]
for i in range(cfg.TRAIN.OUTPUT_SIZE):
for j in range(cfg.TRAIN.OUTPUT_SIZE):
distance = (((i - heat_cy) ** 2) + ((j - heat_cx) ** 2))
if (math.sqrt(distance) < self.radius):
for (idx, hm) in enumerate(heatmap_label):
if (math.sqrt(distance) < radius[idx]):
hm[(0, i, j)] = np.exp(((- distance) / (2 * (std[idx] ** 2))))
if cfg.TRAIN.OFFSETS:
if cfg.TRAIN.SAMEOFF:
offsets_label[(0, i, j)] = (((heat_cx - pos_x) * self.stride) / 64)
offsets_label[(1, i, j)] = (((heat_cy - pos_y) * self.stride) / 64)
else:
offsets_label[(0, i, j)] = ((tcx - self.keypoints[(0, i, j)]) / 64)
offsets_label[(1, i, j)] = ((tcy - self.keypoints[(1, i, j)]) / 64)
if cfg.TRAIN.NORMWH:
objsize_label[(0, i, j)] = np.log((tw / 64))
objsize_label[(1, i, j)] = np.log((th / 64))
else:
objsize_label[(0, i, j)] = tw
objsize_label[(1, i, j)] = th
if ((i == pos_y) and (j == pos_x)):
for (idx, hm) in enumerate(heatmap_label):
hm[(0, i, j)] = 1
if cfg.TRAIN.OFFSETS:
return (heatmap_label, offsets_label, objsize_label)
else:
return (heatmap_label, objsize_label) |
class StorageDevice(Device):
def __init__(self, capacity: float=None, efficiency: float=None, loss_coefficient: float=None, initial_soc: float=None, **kwargs: Any):
self.capacity = capacity
self.loss_coefficient = loss_coefficient
self.initial_soc = initial_soc
super().__init__(efficiency=efficiency, **kwargs)
def capacity(self) -> float:
return self.__capacity
def loss_coefficient(self) -> float:
return self.__loss_coefficient
def initial_soc(self) -> float:
return self.__initial_soc
def soc(self) -> np.ndarray:
return self.__soc
def energy_init(self) -> float:
return max(0.0, ((self.__soc[(self.time_step - 1)] * self.capacity) * (1 - self.loss_coefficient)))
def energy_balance(self) -> np.ndarray:
return self.__energy_balance
def round_trip_efficiency(self) -> float:
return (self.efficiency ** 0.5)
def capacity(self, capacity: float):
capacity = (0.0 if (capacity is None) else capacity)
assert (capacity >= 0), 'capacity must be >= 0.'
self.__capacity = capacity
_coefficient.setter
def loss_coefficient(self, loss_coefficient: float):
if (loss_coefficient is None):
self.__loss_coefficient = 0.006
else:
assert (0 <= loss_coefficient <= 1), 'initial_soc must be >= 0 and <= 1.'
self.__loss_coefficient = loss_coefficient
_soc.setter
def initial_soc(self, initial_soc: float):
if (initial_soc is None):
self.__initial_soc = 0.0
else:
assert (0.0 <= initial_soc <= 1.0), 'initial_soc must be >= 0.0 and <= 1.0.'
self.__initial_soc = initial_soc
def get_metadata(self) -> Mapping[(str, Any)]:
return {**super().get_metadata(), 'capacity': self.capacity, 'loss_coefficient': self.loss_coefficient, 'initial_soc': self.initial_soc, 'round_trip_efficiency': self.round_trip_efficiency}
def charge(self, energy: float):
energy_final = (min((self.energy_init + (energy * self.round_trip_efficiency)), self.capacity) if (energy >= 0) else max(0.0, (self.energy_init + (energy / self.round_trip_efficiency))))
self.__soc[self.time_step] = (energy_final / max(self.capacity, ZERO_DIVISION_PLACEHOLDER))
self.__energy_balance[self.time_step] = self.set_energy_balance(energy_final)
def set_energy_balance(self, energy: float) -> float:
energy -= self.energy_init
energy_balance = ((energy / self.round_trip_efficiency) if (energy >= 0) else (energy * self.round_trip_efficiency))
return energy_balance
def autosize(self, demand: Iterable[float], safety_factor: float=None):
safety_factor = (1.0 if (safety_factor is None) else safety_factor)
self.capacity = (np.nanmax(demand) * safety_factor)
def reset(self):
super().reset()
self.__soc = np.zeros(self.episode_tracker.episode_time_steps, dtype='float32')
self.__soc[0] = self.initial_soc
self.__energy_balance = np.zeros(self.episode_tracker.episode_time_steps, dtype='float32') |
def vocab_parallel_logit_helper(embed, lm_output):
if isinstance(lm_output, torch.Tensor):
lm_output = lm_output
elif isinstance(lm_output, tuple):
lm_output = lm_output[0]
else:
raise ValueError(f'Expect lm_output as tensor or tuple but get {type(lm_output)}')
lm_output = copy_to_group(lm_output, group=parallel_group('tensor'))
return F.linear(lm_output, embed.weight) |
def makeCluster(cluster):
print('subgraph cluster_{} {{\n\tcolor={};'.format(cluster, clusterColour[cluster]))
print('\tlabel = "{}";'.format(cluster))
for mod in mods:
if (mod[0] == cluster):
printNode(mod)
for (mod, data) in deps.items():
if (mod[0] != cluster):
continue
for (depMod, count) in data.items():
if (depMod[0] != cluster):
continue
printEdge(mod, depMod, count, True)
print('}') |
def load_train_history(jobs_dir, limit=None):
jobs = os.listdir(jobs_dir)
if limit:
matching = [d for d in jobs if (limit in d)]
else:
matching = jobs
dataframes = []
for job_dir in matching:
try:
df = load_model_info(jobs_dir, job_dir)
except (FileNotFoundError, ValueError) as e:
print('Failed to load job {}: {}'.format(job_dir, str(e)))
continue
dataframes.append(df)
df = pandas.concat(dataframes)
return df |
def draw_fig_2(cnndm_spec_name, xsum_spec_name):
fig = plt.figure(figsize=(FIG_SIZE_x, ysize_figure2))
draw_x_rel_postion_y_entropy(dir_datadrive, cnndm_spec_name, xsum_spec_name, SEPS=10, FIG_SIZE_x=GLOBAL_FIGURE_WIDTH)
fig.tight_layout()
plt.savefig(f'x_rel_postion_y_entropy{cnndm_spec_name}{xsum_spec_name}.pdf', dpi=dpi)
plt.show()
plt.close() |
class Config(dict):
def __init__(self, filename=None):
assert os.path.exists(filename), "ERROR: Config File doesn't exist."
try:
with open(filename, 'r') as f:
self._cfg_dict = yaml.load(f, Loader)
except EnvironmentError:
logger.error('Please check the file with name of "%s"', filename)
logger.info(' APP CONFIG '.center(80, '-'))
self.show()
logger.info(''.center(80, '-'))
def __getattr__(self, name):
value = self._cfg_dict[name]
if isinstance(value, dict):
value = DictAsMember(value)
return value
def show(self, cfg_dict=None, indent=0):
if (cfg_dict is None):
cfg_dict = self._cfg_dict
for key in cfg_dict:
value = cfg_dict[key]
if isinstance(value, dict):
str_list = ([' '] * indent)
str_list.append(str(key))
str_list.append(': ')
logger.info(''.join(str_list))
indent = (indent + 1)
indent = self.show(value, indent)
else:
str_list = ([' '] * indent)
str_list.append(str(key))
str_list.append(': ')
str_list.append(str(value))
logger.info(''.join(str_list))
return (indent - 1) |
def normalize_english(text):
def fn(m):
word = m.group()
if (word in english_dictionary):
return english_dictionary.get(word)
else:
return word
text = re.sub('([A-Za-z]+)', fn, text)
return text |
def require_pytesseract(test_case):
if (not is_pytesseract_available()):
return unittest.skip('test requires PyTesseract')(test_case)
else:
return test_case |
def TestExitCodeAndOutput(command):
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert(('InitGoogleTest' in p.output)) |
class CommonMetricPrinter(EventWriter):
def __init__(self, max_iter):
self.logger = logging.getLogger(__name__)
self._max_iter = max_iter
def write(self):
storage = get_event_storage()
iteration = storage.iter
(data_time, time) = (None, None)
eta_string = 'N/A'
try:
data_time = storage.history('data_time').avg(20)
time = storage.history('time').global_avg()
eta_seconds = (storage.history('time').median(1000) * (self._max_iter - iteration))
storage.put_scalar('eta_seconds', eta_seconds, smoothing_hint=False)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
except KeyError:
pass
try:
lr = '{:.6f}'.format(storage.history('lr').latest())
except KeyError:
lr = 'N/A'
if torch.cuda.is_available():
max_mem_mb = ((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0)
else:
max_mem_mb = None
self.logger.info('eta: {eta} iter: {iter} {losses} {time} {data_time} lr: {lr} {memory}'.format(eta=eta_string, iter=iteration, losses=' '.join(['{}: {:.3f}'.format(k, v.median(20)) for (k, v) in storage.histories().items() if ('loss' in k)]), time=('time: {:.4f}'.format(time) if (time is not None) else ''), data_time=('data_time: {:.4f}'.format(data_time) if (data_time is not None) else ''), lr=lr, memory=('max_mem: {:.0f}M'.format(max_mem_mb) if (max_mem_mb is not None) else ''))) |
class DenseLayer(Layer):
def __init__(self, input_layer, n_outputs, weights_std, init_bias_value, nonlinearity=rectify, dropout=0.0):
self.n_outputs = n_outputs
self.input_layer = input_layer
self.weights_std = numpy.float32(weights_std)
self.init_bias_value = numpy.float32(init_bias_value)
self.nonlinearity = nonlinearity
self.dropout = dropout
self.mb_size = self.input_layer.mb_size
input_shape = self.input_layer.get_output_shape()
self.n_inputs = int(numpy.prod(input_shape[1:]))
self.flatinput_shape = (self.mb_size, self.n_inputs)
self.W = shared_single(2)
self.b = shared_single(1)
self.trainable = True
self.params = [self.W, self.b]
self.bias_params = [self.b]
self.reset_params()
def reset_params(self):
self.W.set_value((numpy.random.randn(self.n_inputs, self.n_outputs).astype(numpy.float32) * self.weights_std))
self.b.set_value((numpy.ones(self.n_outputs).astype(numpy.float32) * self.init_bias_value))
def get_output_shape(self):
return (self.mb_size, self.n_outputs)
def output(self, input=None, dropout_active=True, *args, **kwargs):
if (input is None):
input = self.input_layer.output(*args, dropout_active=dropout_active, **kwargs)
if (len(self.input_layer.get_output_shape()) > 2):
input = input.reshape(self.flatinput_shape)
if (dropout_active and (self.dropout > 0.0)):
retain_prob = (1 - self.dropout)
input = ((input / retain_prob) * srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32'))
return self.nonlinearity((T.dot(input, self.W) + self.b.dimshuffle('x', 0))) |
class MultiHeadAttention(chainer.Chain):
def __init__(self, n_units, h=8, dropout=0.1, initialW=None, initial_bias=None):
super(MultiHeadAttention, self).__init__()
assert ((n_units % h) == 0)
stvd = (1.0 / np.sqrt(n_units))
with self.init_scope():
self.linear_q = L.Linear(n_units, n_units, initialW=initialW(scale=stvd), initial_bias=initial_bias(scale=stvd))
self.linear_k = L.Linear(n_units, n_units, initialW=initialW(scale=stvd), initial_bias=initial_bias(scale=stvd))
self.linear_v = L.Linear(n_units, n_units, initialW=initialW(scale=stvd), initial_bias=initial_bias(scale=stvd))
self.linear_out = L.Linear(n_units, n_units, initialW=initialW(scale=stvd), initial_bias=initial_bias(scale=stvd))
self.d_k = (n_units // h)
self.h = h
self.dropout = dropout
self.attn = None
def forward(self, e_var, s_var=None, mask=None, batch=1):
xp = self.xp
if (s_var is None):
Q = self.linear_q(e_var).reshape(batch, (- 1), self.h, self.d_k)
K = self.linear_k(e_var).reshape(batch, (- 1), self.h, self.d_k)
V = self.linear_v(e_var).reshape(batch, (- 1), self.h, self.d_k)
else:
Q = self.linear_q(e_var).reshape(batch, (- 1), self.h, self.d_k)
K = self.linear_k(s_var).reshape(batch, (- 1), self.h, self.d_k)
V = self.linear_v(s_var).reshape(batch, (- 1), self.h, self.d_k)
scores = (F.matmul(F.swapaxes(Q, 1, 2), K.transpose(0, 2, 3, 1)) / np.sqrt(self.d_k))
if (mask is not None):
mask = xp.stack(([mask] * self.h), axis=1)
scores = F.where(mask, scores, xp.full(scores.shape, MIN_VALUE, 'f'))
self.attn = F.softmax(scores, axis=(- 1))
p_attn = F.dropout(self.attn, self.dropout)
x = F.matmul(p_attn, F.swapaxes(V, 1, 2))
x = F.swapaxes(x, 1, 2).reshape((- 1), (self.h * self.d_k))
return self.linear_out(x) |
def dump(obj, file=None, file_format=None, **kwargs):
if (file_format is None):
if is_str(file):
file_format = file.split('.')[(- 1)]
elif (file is None):
raise ValueError('file_format must be specified since file is None')
if (file_format not in file_handlers):
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if (file is None):
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object') |
class TFConvBertModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class ReconnectingClient():
def __init__(self, address):
self.conn = None
self.address = address
logging.debug('Connecting...')
self.connect()
def connect(self):
while True:
try:
self.conn = multiprocessing.connection.Client(self.address)
print('Connected')
return
except ConnectionRefusedError:
logging.debug('Connection refused.')
time.sleep(1)
def recv(self):
return self.conn.recv()
def send(self, data):
return self.conn.send(data)
def communicate(self, data):
self.conn.send(data)
return self.conn.recv()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def close(self):
self.conn.close() |
class VQA():
def __init__(self, annotation_file=None, question_file=None):
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if ((not (annotation_file is None)) and (not (question_file is None))):
dataset = json.load(open(annotation_file, 'r'))
questions = json.load(open(question_file, 'r'))
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']}
qa = {ann['question_id']: [] for ann in self.dataset['annotations']}
qqa = {ann['question_id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToQA[ann['image_id']] += [ann]
qa[ann['question_id']] = ann
for ques in self.questions['questions']:
qqa[ques['question_id']] = ques
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
for (key, value) in self.datset['info'].items():
print(('%s: %s' % (key, value)))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
imgIds = (imgIds if (type(imgIds) == list) else [imgIds])
quesTypes = (quesTypes if (type(quesTypes) == list) else [quesTypes])
ansTypes = (ansTypes if (type(ansTypes) == list) else [ansTypes])
if (len(imgIds) == len(quesTypes) == len(ansTypes) == 0):
anns = self.dataset['annotations']
else:
if (not (len(imgIds) == 0)):
anns = sum([self.imgToQA[imgId] for imgId in imgIds if (imgId in self.imgToQA)], [])
else:
anns = self.dataset['annotations']
anns = (anns if (len(quesTypes) == 0) else [ann for ann in anns if (ann['question_type'] in quesTypes)])
anns = (anns if (len(ansTypes) == 0) else [ann for ann in anns if (ann['answer_type'] in ansTypes)])
ids = [ann['question_id'] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
quesIds = (quesIds if (type(quesIds) == list) else [quesIds])
quesTypes = (quesTypes if (type(quesTypes) == list) else [quesTypes])
ansTypes = (ansTypes if (type(ansTypes) == list) else [ansTypes])
if (len(quesIds) == len(quesTypes) == len(ansTypes) == 0):
anns = self.dataset['annotations']
else:
if (not (len(quesIds) == 0)):
anns = sum([self.qa[quesId] for quesId in quesIds if (quesId in self.qa)], [])
else:
anns = self.dataset['annotations']
anns = (anns if (len(quesTypes) == 0) else [ann for ann in anns if (ann['question_type'] in quesTypes)])
anns = (anns if (len(ansTypes) == 0) else [ann for ann in anns if (ann['answer_type'] in ansTypes)])
ids = [ann['image_id'] for ann in anns]
return ids
def loadQA(self, ids=[]):
if (type(ids) == list):
return [self.qa[id] for id in ids]
elif (type(ids) == int):
return [self.qa[ids]]
def showQA(self, anns):
if (len(anns) == 0):
return 0
for ann in anns:
quesId = ann['question_id']
print(('Question: %s' % self.qqa[quesId]['question']))
for ans in ann['answers']:
print(('Answer %d: %s' % (ans['answer_id'], ans['answer'])))
def loadRes(self, resFile, quesFile):
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset['info'] = copy.deepcopy(self.questions['info'])
res.dataset['task_type'] = copy.deepcopy(self.questions['task_type'])
res.dataset['data_type'] = copy.deepcopy(self.questions['data_type'])
res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype'])
res.dataset['license'] = copy.deepcopy(self.questions['license'])
time_t = datetime.datetime.utcnow()
anns = resFile
assert (type(anns) == list), 'results is not an array of objects'
annsQuesIds = [ann['question_id'] for ann in anns]
assert (set(annsQuesIds) == set(self.getQuesIds())), 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann['question_id']
if (res.dataset['task_type'] == 'Multiple Choice'):
assert (ann['answer'] in self.qqa[quesId]['multiple_choices']), 'predicted answer is not one of the multiple choices'
qaAnn = self.qa[quesId]
ann['image_id'] = qaAnn['image_id']
ann['question_type'] = qaAnn['question_type']
ann['answer_type'] = qaAnn['answer_type']
res.dataset['annotations'] = anns
res.createIndex()
return res |
def imputing_missing_features(df, target_name):
cat_col_names = get_category_columns(df, target_name)
num_cols_names = get_numerical_columns(df, target_name)
for col in cat_col_names:
df[col] = df[col].fillna('None')
for col in num_cols_names:
df[col] = df[col].fillna(df[col].mode()[0])
return df |
.export
def get_lang_tok(lang: str, lang_tok_style: str, spec: str=LangTokSpec.main.value) -> str:
TOKEN_STYLES: Dict[(str, str)] = {LangTokStyle.mbart.value: '[{}]', LangTokStyle.multilingual.value: '__{}__'}
if spec.endswith('dae'):
lang = f'{lang}_dae'
elif spec.endswith('mined'):
lang = f'{lang}_mined'
style = TOKEN_STYLES[lang_tok_style]
return style.format(lang) |
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
gen_net = eval((('models_search.' + args.gen_model) + '.Generator'))(args=args).cuda()
dis_net = eval((('models_search.' + args.dis_model) + '.Discriminator'))(args=args).cuda()
gen_net.set_arch(args.arch, cur_stage=2)
dis_net.cur_stage = 2
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Conv2d') != (- 1)):
if (args.init_type == 'normal'):
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif (args.init_type == 'orth'):
nn.init.orthogonal_(m.weight.data)
elif (args.init_type == 'xavier_uniform'):
nn.init.xavier_uniform(m.weight.data, 1.0)
else:
raise NotImplementedError('{} unknown inital type'.format(args.init_type))
elif (classname.find('BatchNorm2d') != (- 1)):
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
gen_net.apply(weights_init)
dis_net.apply(weights_init)
gen_optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), gen_net.parameters()), args.g_lr, (args.beta1, args.beta2))
dis_optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), dis_net.parameters()), args.d_lr, (args.beta1, args.beta2))
gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, (args.max_iter * args.n_critic))
dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, (args.max_iter * args.n_critic))
dataset = datasets.ImageDataset(args)
train_loader = dataset.train
if (args.dataset.lower() == 'cifar10'):
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
elif (args.dataset.lower() == 'stl10'):
fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
args.max_epoch = (args.max_epoch * args.n_critic)
if args.max_iter:
args.max_epoch = np.ceil(((args.max_iter * args.n_critic) / len(train_loader)))
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
gen_avg_param = copy_params(gen_net)
start_epoch = 0
best_fid = 10000.0
if args.load_path:
print(f'=> resuming from {args.load_path}')
assert os.path.exists(args.load_path)
checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth')
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
start_epoch = checkpoint['epoch']
best_fid = checkpoint['best_fid']
gen_net.load_state_dict(checkpoint['gen_state_dict'])
dis_net.load_state_dict(checkpoint['dis_state_dict'])
gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
avg_gen_net = deepcopy(gen_net)
avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
gen_avg_param = copy_params(avg_gen_net)
del avg_gen_net
args.path_helper = checkpoint['path_helper']
logger = create_logger(args.path_helper['log_path'])
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
else:
assert args.exp_name
args.path_helper = set_log_dir('logs', args.exp_name)
logger = create_logger(args.path_helper['log_path'])
logger.info(args)
writer_dict = {'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': (start_epoch * len(train_loader)), 'valid_global_steps': (start_epoch // args.val_freq)}
for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'):
lr_schedulers = ((gen_scheduler, dis_scheduler) if args.lr_decay else None)
train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers)
if ((epoch and ((epoch % args.val_freq) == 0)) or (epoch == (int(args.max_epoch) - 1))):
backup_param = copy_params(gen_net)
load_params(gen_net, gen_avg_param)
(inception_score, fid_score) = validate(args, fixed_z, fid_stat, gen_net, writer_dict)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score} || epoch {epoch}.')
load_params(gen_net, backup_param)
if (fid_score < best_fid):
best_fid = fid_score
is_best = True
else:
is_best = False
else:
is_best = False
avg_gen_net = deepcopy(gen_net)
load_params(avg_gen_net, gen_avg_param)
save_checkpoint({'epoch': (epoch + 1), 'gen_model': args.gen_model, 'dis_model': args.dis_model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper}, is_best, args.path_helper['ckpt_path'])
del avg_gen_net |
.no_cover
.timeout(60)
def test_te_ppo_point():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'tf/te_ppo_point.py')), '--n_epochs', '1', '--batch_size_per_task', '100'], check=False).returncode == 0) |
def shared_convl1_bn_lrelu(shape, nb_filters, kernel, stride=(1, 1), **kwargs):
c = Convolution2D(nb_filters, kernel, padding='same', kernel_initializer='he_uniform', kernel_regularizer=l1(0.01), strides=(stride, stride), input_shape=shape)
b = BatchNormalization()
l = LeakyReLU()
return Sequential([c, b, l], **kwargs) |
def test_image_batch(model, images, new_gopro=False):
img_transforms = transforms.Compose([transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
size_transform = Compose([PadIfNeeded(736, 1280)])
images_ = read_imgs(images)
results_ = model(images_)
results_ = results_.cpu().float().numpy()
results_ = (((np.transpose(results_, (0, 2, 3, 1)) + 1) / 2.0) * 255.0)
crop = CenterCrop(720, 1280)
results_ = crop(image=results_)['image']
results_ = results.astype('uint8')
get_gt_img = partial(get_gt_img, new_gopro=new_gopro)
get_gt_img_v = np.vectorize(get_gt_img)
gt_images = get_gt_img_v(results_) |
def prepare_data(datum, devices: list=None, allocation: list=None):
with torch.no_grad():
if (devices is None):
devices = (['cuda:0'] if args.cuda else ['cpu'])
if (allocation is None):
allocation = ([(args.batch_size // len(devices))] * (len(devices) - 1))
allocation.append((args.batch_size - sum(allocation)))
(images, (targets, masks, num_crowds)) = datum
cur_idx = 0
for (device, alloc) in zip(devices, allocation):
for _ in range(alloc):
images[cur_idx] = gradinator(images[cur_idx].to(device))
targets[cur_idx] = gradinator(targets[cur_idx].to(device))
masks[cur_idx] = gradinator(masks[cur_idx].to(device))
cur_idx += 1
if cfg.preserve_aspect_ratio:
(_, h, w) = images[random.randint(0, (len(images) - 1))].size()
for (idx, (image, target, mask, num_crowd)) in enumerate(zip(images, targets, masks, num_crowds)):
(images[idx], targets[idx], masks[idx], num_crowds[idx]) = enforce_size(image, target, mask, num_crowd, w, h)
cur_idx = 0
(split_images, split_targets, split_masks, split_numcrowds) = [[None for alloc in allocation] for _ in range(4)]
for (device_idx, alloc) in enumerate(allocation):
split_images[device_idx] = torch.stack(images[cur_idx:(cur_idx + alloc)], dim=0)
split_targets[device_idx] = targets[cur_idx:(cur_idx + alloc)]
split_masks[device_idx] = masks[cur_idx:(cur_idx + alloc)]
split_numcrowds[device_idx] = num_crowds[cur_idx:(cur_idx + alloc)]
cur_idx += alloc
return (split_images, split_targets, split_masks, split_numcrowds) |
def shorten_to_bytes_width(string: str, maximum_bytes: int) -> str:
maximum_bytes = max(_MIN_WIDTH, maximum_bytes)
placeholder: str = '[...]'
encoded_placeholder = placeholder.encode().strip()
string = _RE_COMBINE_WHITESPACE.sub(' ', string)
string = _RE_STRIP_WHITESPACE.sub('', string)
encoded_string = string.encode()
if (not encoded_string):
return ''
if (len(encoded_string) <= maximum_bytes):
return string
substring = encoded_string[:(maximum_bytes - len(encoded_placeholder))]
splitted = substring.rsplit(b' ', 1)
if (len(splitted) == 2):
return b' '.join([splitted[0], encoded_placeholder]).decode()
return '[...]' |
def get_centering_tf_schema(scale):
return [{'type': 'scalar-mult', 'value': (1 / scale)}, {'type': 'scalar-add', 'value': (- 0.5)}] |
def _check_same_shape(preds, targets):
if (preds.shape != targets.shape):
invalidInputError(False, 'preds and targets are expected to have the same shape') |
def get_config():
name = 'graph_correlated'
n_stages = 20
mu0 = (- 0.5)
sigma0 = 1
sigma_tilde = 1
agents = collections.OrderedDict([('coherent TS', functools.partial(CorrelatedBBTS, n_stages, mu0, sigma0, sigma_tilde)), ('misspecified TS', functools.partial(IndependentBBTS, n_stages, mu0, sigma0, sigma_tilde))])
environments = collections.OrderedDict([('env', functools.partial(CorrelatedBinomialBridge, n_stages, mu0, sigma0, sigma_tilde))])
experiments = collections.OrderedDict([(name, ExperimentNoAction)])
n_steps = 500
n_seeds = 1000
config = Config(name, agents, environments, experiments, n_steps, n_seeds)
return config |
class DistributionOutput():
distribution_class: type
in_features: int
args_dim: Dict[(str, int)]
def __init__(self, dim: int=1) -> None:
self.dim = dim
self.args_dim = {k: (dim * self.args_dim[k]) for k in self.args_dim}
def _base_distribution(self, distr_args):
if (self.dim == 1):
return self.distribution_class(*distr_args)
else:
return Independent(self.distribution_class(*distr_args), 1)
def distribution(self, distr_args, loc: Optional[torch.Tensor]=None, scale: Optional[torch.Tensor]=None) -> Distribution:
distr = self._base_distribution(distr_args)
if ((loc is None) and (scale is None)):
return distr
else:
return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim)
def event_shape(self) -> Tuple:
return (() if (self.dim == 1) else (self.dim,))
def event_dim(self) -> int:
return len(self.event_shape)
def value_in_support(self) -> float:
return 0.0
def get_parameter_projection(self, in_features: int) -> nn.Module:
return ParameterProjection(in_features=in_features, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map))
def domain_map(self, *args: torch.Tensor):
raise NotImplementedError()
def squareplus(x: torch.Tensor) -> torch.Tensor:
return ((x + torch.sqrt((torch.square(x) + 4.0))) / 2.0) |
def scatter_sub(ref, indices, updates, use_locking=True, name=None):
if utils.is_kv_variable_op_type(ref.op.type):
return gen_kv_variable_ops.kv_variable_scatter_sub_v2(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)
return orignal_scatter_sub(ref, indices, updates, use_locking=use_locking, name=name) |
class TestGeneration(tf.test.TestCase):
def setUp(self):
self.net = WaveNetModel(batch_size=1, dilations=[1, 2, 4, 8, 16, 32, 64, 128, 256], filter_width=2, residual_channels=16, dilation_channels=16, quantization_channels=128, skip_channels=32)
def testGenerateSimple(self):
waveform = tf.placeholder(tf.int32)
np.random.seed(0)
data = np.random.randint(128, size=1000)
proba = self.net.predict_proba(waveform)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
proba = sess.run(proba, feed_dict={waveform: data})
self.assertAllEqual(proba.shape, [128])
self.assertTrue(np.all(((proba >= 0) & (proba <= (128 - 1)))))
def testGenerateFast(self):
waveform = tf.placeholder(tf.int32)
np.random.seed(0)
data = np.random.randint(128)
proba = self.net.predict_proba_incremental(waveform)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(self.net.init_ops)
proba = sess.run(proba, feed_dict={waveform: data})
self.assertAllEqual(proba.shape, [128])
self.assertTrue(np.all(((proba >= 0) & (proba <= (128 - 1)))))
def testCompareSimpleFast(self):
waveform = tf.placeholder(tf.int32)
np.random.seed(0)
data = np.random.randint(128, size=1000)
proba = self.net.predict_proba(waveform)
proba_fast = self.net.predict_proba_incremental(waveform)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(self.net.init_ops)
for x in data[:(- 1)]:
proba_fast_ = sess.run([proba_fast, self.net.push_ops], feed_dict={waveform: x})
proba_fast_ = sess.run(proba_fast, feed_dict={waveform: data[(- 1)]})
proba_ = sess.run(proba, feed_dict={waveform: data})
self.assertAllClose(proba_, proba_fast_) |
def load_dataset(root_dir, train=True):
labels = []
images = []
if train:
sub_dir = 'training'
else:
sub_dir = 'test'
label_path = os.path.join(root_dir, sub_dir, 'label')
image_path = os.path.join(root_dir, sub_dir, 'images')
for file in glob.glob(os.path.join(image_path, '*.tif')):
image_name = os.path.basename(file)
label_name = (image_name[:(- 4)] + '_nerve_ann.tif')
labels.append(os.path.join(label_path, label_name))
images.append(os.path.join(image_path, image_name))
return (images, labels) |
def idx_to_sparse(idx, sparse_dim):
sparse = np.zeros(sparse_dim)
sparse[int(idx)] = 1
return pd.Series(sparse, dtype=int) |
def get_args():
parse = argparse.ArgumentParser()
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor of RL')
parse.add_argument('--seed', type=int, default=123, help='the random seeds')
parse.add_argument('--num-workers', type=int, default=8, help='the number of workers to collect samples')
parse.add_argument('--env-name', type=str, default='PongNoFrameskip-v4', help='the environment name')
parse.add_argument('--batch-size', type=int, default=4, help='the batch size of updating')
parse.add_argument('--lr', type=float, default=0.00025, help='learning rate of the algorithm')
parse.add_argument('--epoch', type=int, default=4, help='the epoch during training')
parse.add_argument('--nsteps', type=int, default=128, help='the steps to collect samples')
parse.add_argument('--vloss-coef', type=float, default=0.5, help='the coefficient of value loss')
parse.add_argument('--ent-coef', type=float, default=0.01, help='the entropy loss coefficient')
parse.add_argument('--tau', type=float, default=0.95, help='gae coefficient')
parse.add_argument('--cuda', action='store_true', help='use cuda do the training')
parse.add_argument('--total-frames', type=int, default=, help='the total frames for training')
parse.add_argument('--dist', type=str, default='gauss', help='the distributions for sampling actions')
parse.add_argument('--eps', type=float, default=1e-05, help='param for adam optimizer')
parse.add_argument('--clip', type=float, default=0.1, help='the ratio clip param')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the folder to save models')
parse.add_argument('--lr-decay', action='store_true', help='if using the learning rate decay during decay')
parse.add_argument('--max-grad-norm', type=float, default=0.5, help='grad norm')
parse.add_argument('--display-interval', type=int, default=10, help='the interval that display log information')
parse.add_argument('--env-type', type=str, default='atari', help='the type of the environment')
parse.add_argument('--log-dir', type=str, default='logs', help='the folders to save the log files')
args = parse.parse_args()
return args |
class ParametricSequential(nn.Sequential):
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv2d_circular(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv2d_circular(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out |
class FSMStage():
def __init__(self, stage_id: StageID, acting_agents: Sequence[AgentID], rewarded_agents: Optional[Sequence[AgentID]]=None, next_stages: Optional[Sequence[StageID]]=None, handler: Optional[Callable[([], StageID)]]=None) -> None:
self.id = stage_id
self.acting_agents = acting_agents
self.rewarded_agents = rewarded_agents
self.next_stages = (next_stages or [])
self.handler = handler
def __call__(self, handler_fn: Callable[(..., Optional[StageID])]):
setattr(handler_fn, '_decorator', self)
self.handler = handler_fn
return handler_fn |
def apply_patches(model, args):
if ((not args.custom_model) and (not args.custom_model_together) and (not args.custom_model_mistral)):
if ('GPTNeoXForCausalLM' in model.config.architectures):
assert (args.gpt_neox_max_length is not None)
patch_gptneox_for_longer_sequences(model, args.gpt_neox_max_length)
if args.dynamic_linear:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_scaled_rotary_embeddings(model)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic linear')
elif args.dynamic_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_scaled_rotary_embeddings(model, ntk=args.dynamic_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic ntk')
elif args.dynamic_part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_part_ntk_rotary_embeddings(model, args.finetuned)
elif ('RWForCausalLM' in model.config.architectures):
patch_falcon_for_dynamic_part_ntk_rotary_embeddings(model)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic part ntk')
elif args.dynamic_yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_dynamic_yarn_rotary_embeddings(model, args.original_max_position_embeddings, args.finetuned)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for dyanmic yarn')
elif args.ntk:
if ('GPTNeoXForCausalLM' in model.config.architectures):
patch_gptneox_for_ntk_scaled_rotary_embeddings(model, args.ntk)
elif ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_ntk_scaled_rotary_embeddings(model, args.ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for ntk')
elif args.linear:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_linear_scaled_rotary_embeddings(model, scale=args.linear)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for linear')
elif args.part_ntk:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_part_ntk_scaled_rotary_embeddings(model, scale=args.part_ntk)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for part ntk')
elif args.yarn:
if ('LlamaForCausalLM' in model.config.architectures):
patch_llama_for_yarn_scaled_rotary_embeddings(model, scale=args.yarn, original_max_position_embeddings=args.original_max_position_embeddings)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
elif args.rerope:
if ('LlamaForCausalLM' in model.config.architectures):
training_length = (args.original_max_position_embeddings if args.original_max_position_embeddings else 4096)
window = args.rerope
patch_llama_for_rerope(model, training_length=training_length, window=window)
else:
raise RuntimeError(f'Unsupported architecture {model.config.architectures} for YaRN')
if args.adapter:
from peft import PeftModel
model = PeftModel.from_pretrained(model, args.adapter)
model = model.merge_and_unload()
return model |
def test_amateur_draft() -> None:
result = amateur_draft(2019, 1)
assert (result is not None)
assert (not result.empty)
assert (len(result.columns) == 20)
assert (len(result) == 41) |
def resnet50_w1a2_imagenet(target_platform=None):
target_platform = resolve_target_platform(target_platform)
driver_mode = get_driver_mode()
model_name = 'resnet50-w1a2'
filename = find_bitfile(model_name, target_platform)
runtime_weight_dir = find_runtime_weights(model_name, target_platform)
return FINNExampleOverlay(filename, driver_mode, _imagenet_resnet50_top5inds_io_shape_dict, runtime_weight_dir=runtime_weight_dir) |
def test__item_volume() -> None:
item_1 = Item(1, 0, 1)
item_2 = Item(1, 2, 2)
assert (item_volume(item_1) == 0)
assert (item_volume(item_2) == 4) |
class ACM(nn.Module):
def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg, norm_cfg, act_cfg):
super(ACM, self).__init__()
self.pool_scale = pool_scale
self.fusion = fusion
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.pooled_redu_conv = ConvModule(self.in_channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.input_redu_conv = ConvModule(self.in_channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.global_info = ConvModule(self.channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.gla = nn.Conv2d(self.channels, (self.pool_scale ** 2), 1, 1, 0)
self.residual_conv = ConvModule(self.channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
if self.fusion:
self.fusion_conv = ConvModule(self.channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def forward(self, x):
pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale)
x = self.input_redu_conv(x)
pooled_x = self.pooled_redu_conv(pooled_x)
batch_size = x.size(0)
pooled_x = pooled_x.view(batch_size, self.channels, (- 1)).permute(0, 2, 1).contiguous()
affinity_matrix = self.gla((x + resize(self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:]))).permute(0, 2, 3, 1).reshape(batch_size, (- 1), (self.pool_scale ** 2))
affinity_matrix = F.sigmoid(affinity_matrix)
z_out = torch.matmul(affinity_matrix, pooled_x)
z_out = z_out.permute(0, 2, 1).contiguous()
z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3))
z_out = self.residual_conv(z_out)
z_out = F.relu((z_out + x))
if self.fusion:
z_out = self.fusion_conv(z_out)
return z_out |
class Exp(ZooKerasLayer):
def __init__(self, input_shape=None, **kwargs):
super(Exp, self).__init__(None, (list(input_shape) if input_shape else None), **kwargs) |
class ConditionalDetrPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def auprc_compute_fn(y_preds, y_targets):
y_true = y_targets.numpy()
y_pred = y_preds.numpy()
return average_precision_score(y_true, y_pred) |
class MultiScaleArchitecture(Flow):
def __init__(self, flow_step, levels, num_steps, in_channels, factors, hidden_channels, h_channels=0, inverse=False, transform='affine', prior_transform='affine', alpha=1.0, kernel_size=None, coupling_type='conv', h_type=None, activation='relu', normalize=None, num_groups=None):
super(MultiScaleArchitecture, self).__init__(inverse)
assert (levels > 1), 'Multi-scale architecture should have at least 2 levels.'
assert (levels == len(num_steps))
factors = (([0] + factors) + [0])
assert (levels == len(factors))
assert (levels == len(hidden_channels))
if (normalize == 'group_norm'):
assert (levels == len(num_groups))
blocks = []
self.levels = levels
self.internals = (levels - 2)
self.squeeze_h = ((h_type is not None) and h_type.startswith('local'))
for level in range(levels):
hidden_channel = hidden_channels[level]
n_groups = (num_groups[level] if (normalize == 'group_norm') else None)
if (level == 0):
block = MultiScaleExternal(flow_step, num_steps[level], in_channels, hidden_channels=hidden_channel, h_channels=h_channels, transform=transform, alpha=alpha, inverse=inverse, kernel_size=kernel_size, coupling_type=coupling_type, h_type=h_type, activation=activation, normalize=normalize, num_groups=n_groups)
blocks.append(block)
elif (level == (levels - 1)):
in_channels = (in_channels * 4)
if self.squeeze_h:
h_channels = (h_channels * 4)
block = MultiScaleExternal(flow_step, num_steps[level], in_channels, hidden_channels=hidden_channel, h_channels=h_channels, transform=transform, alpha=alpha, inverse=inverse, kernel_size=kernel_size, coupling_type=coupling_type, h_type=h_type, activation=activation, normalize=normalize, num_groups=n_groups)
blocks.append(block)
else:
in_channels = (in_channels * 4)
if self.squeeze_h:
h_channels = (h_channels * 4)
block = MultiScaleInternal(flow_step, num_steps[level], in_channels, hidden_channels=hidden_channel, h_channels=h_channels, factor=factors[level], inverse=inverse, kernel_size=kernel_size, transform=transform, prior_transform=prior_transform, alpha=alpha, coupling_type=coupling_type, h_type=h_type, activation=activation, normalize=normalize, num_groups=n_groups)
blocks.append(block)
in_channels = block.z_channels
self.blocks = nn.ModuleList(blocks)
def sync(self):
for block in self.blocks:
block.sync()
def forward(self, input: torch.Tensor, h=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
logdet_accum = input.new_zeros(input.size(0))
out = input
outputs = []
for (i, block) in enumerate(self.blocks):
(out, logdet) = block.forward(out, h=h)
logdet_accum = (logdet_accum + logdet)
if (i < (self.levels - 1)):
if (i > 0):
(out1, out2) = split2d(out, block.z_channels)
outputs.append(out2)
out = out1
out = squeeze2d(out, factor=2)
if self.squeeze_h:
h = squeeze2d(h, factor=2)
out = unsqueeze2d(out, factor=2)
for _ in range(self.internals):
out2 = outputs.pop()
out = unsqueeze2d(unsplit2d([out, out2]), factor=2)
assert (len(outputs) == 0)
return (out, logdet_accum)
def backward(self, input: torch.Tensor, h=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
outputs = []
out = input
for i in range((self.levels - 1)):
if (i > 0):
(out1, out2) = split2d(out, self.blocks[i].z_channels)
outputs.append(out2)
out = out1
out = squeeze2d(out, factor=2)
if self.squeeze_h:
h = squeeze2d(h, factor=2)
logdet_accum = input.new_zeros(input.size(0))
for (i, block) in enumerate(reversed(self.blocks)):
if (i > 0):
out = unsqueeze2d(out, factor=2)
if self.squeeze_h:
h = unsqueeze2d(h, factor=2)
if (i < (self.levels - 1)):
out2 = outputs.pop()
out = unsplit2d([out, out2])
(out, logdet) = block.backward(out, h=h)
logdet_accum = (logdet_accum + logdet)
assert (len(outputs) == 0)
return (out, logdet_accum)
def init(self, data: torch.Tensor, h=None, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]:
logdet_accum = data.new_zeros(data.size(0))
out = data
outputs = []
for (i, block) in enumerate(self.blocks):
(out, logdet) = block.init(out, h=h, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
if (i < (self.levels - 1)):
if (i > 0):
(out1, out2) = split2d(out, block.z_channels)
outputs.append(out2)
out = out1
out = squeeze2d(out, factor=2)
if self.squeeze_h:
h = squeeze2d(h, factor=2)
out = unsqueeze2d(out, factor=2)
for _ in range(self.internals):
out2 = outputs.pop()
out = unsqueeze2d(unsplit2d([out, out2]), factor=2)
assert (len(outputs) == 0)
return (out, logdet_accum) |
class FiveCrop(object):
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert (len(size) == 2), 'Please provide only two dimensions (h, w) for size.'
self.size = size
def __call__(self, img):
return F.five_crop(img, self.size) |
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs):
required = {}
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if ((not line) or (line[0] == '#')):
continue
matched = _RE_PATTERN_STRING.search(line)
if matched:
prefix = line[:matched.start()]
if (prefix.endswith('std::') or (not prefix.endswith('::'))):
required['<string>'] = (linenum, 'string')
for (pattern, template, header) in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
if (not ('<' in line)):
continue
for (pattern, template, header) in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
include_dict = dict([item for sublist in include_state.include_list for item in sublist])
header_found = False
abs_filename = FileInfo(filename).FullName()
abs_filename = re.sub('_flymake\\.cc$', '.cc', abs_filename)
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = (common_path + header)
if (same_module and UpdateIncludeState(fullpath, include_dict, io)):
header_found = True
if (filename.endswith('.cc') and (not header_found)):
return
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if (required_header_unstripped.strip('<>"') not in include_dict):
error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, ((('Add #include ' + required_header_unstripped) + ' for ') + template)) |
def eval_score(prediction, ground_truth):
(precision, recall, f1) = (0, 0, 0)
if (len(ground_truth) == 0):
if (len(prediction) == 0):
(EM, precision, recall, f1) = (1, 1, 1, 1)
else:
EM = (normalize_answer(prediction) == normalize_answer(ground_truth))
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = (Counter(prediction_tokens) & Counter(ground_truth_tokens))
num_same = sum(common.values())
if (num_same != 0):
precision = ((1.0 * num_same) / len(prediction_tokens))
recall = ((1.0 * num_same) / len(ground_truth_tokens))
f1 = (((2 * precision) * recall) / (precision + recall))
return (EM, precision, recall, f1) |
def mask_bg(mask, attention, threshold=0.05):
pos_bg = (attention < threshold)
mask[pos_bg.data] = 0.0
return mask |
def find_smallest_n(m, t, max_iters=100):
for n in range((m + 1), (m + max_iters)):
if check_configuration(m, n, t):
return n
print('Failed to find valid N!')
return (- 1) |
def batch_decode(encoded_boxes, box_coder, anchors):
encoded_boxes.get_shape().assert_has_rank(3)
if (encoded_boxes.get_shape()[1].value != anchors.num_boxes_static()):
raise ValueError(('The number of anchors inferred from encoded_boxes and anchors are inconsistent: shape[1] of encoded_boxes %s should be equal to the number of anchors: %s.' % (encoded_boxes.get_shape()[1].value, anchors.num_boxes_static())))
decoded_boxes = tf.stack([box_coder.decode(boxes, anchors).get() for boxes in tf.unstack(encoded_boxes)])
return decoded_boxes |
class FastAdaptiveAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAdaptiveAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
return (x.mean((2, 3)) if self.flatten else x.mean((2, 3), keepdim=True)) |
def set_gpu_fraction(sess=None, gpu_fraction=0.3):
print((' tensorlayer: GPU MEM Fraction %f' % gpu_fraction))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
return sess |
def cross_entropy_smooth(input, target, size_average=True, label_smoothing=0.1):
y = torch.eye(10).cuda()
lb_oh = y[target]
target = ((lb_oh * (1 - label_smoothing)) + (0.5 * label_smoothing))
logsoftmax = nn.LogSoftmax()
if size_average:
return torch.mean(torch.sum(((- target) * logsoftmax(input)), dim=1))
else:
return torch.sum(torch.sum(((- target) * logsoftmax(input)), dim=1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.