code stringlengths 101 5.91M |
|---|
def readIntsFile(filename):
with open(filename) as f:
array = []
for line in f:
if (line.startswith('%') or line.startswith('#')):
continue
if (len(line.split()) == 0):
continue
array.append([int(x) for x in line.split()])
return array |
class TFT5PreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def multi_run_histories_summary(run_histories, save_filename=None, metrics='val_binary_accuracy', description_prefix='k_fold_average_', results_prefix='k_fold_results', multi_history_metrics='mean', verbose=1):
if isinstance(metrics, str):
metrics = [metrics]
if isinstance(multi_history_metrics, str):
multi_history_metrics = [multi_history_metrics]
results = {}
for (metric, multi_history_metric) in zip(metrics, multi_history_metrics):
best_metric_scores = []
for (history_description, history_object) in six.iteritems(run_histories):
if (('loss' in metric) or ('error' in metric)):
best_score = np.min(history_object.history[metric])
results[((history_description + '_min_') + metric)] = best_score
else:
best_score = np.max(history_object.history[metric])
results[((history_description + '_max_') + metric)] = best_score
best_metric_scores += [best_score]
if ((multi_history_metric == 'mean') or (multi_history_metric == 'average')):
k_fold_average = np.mean(best_metric_scores)
elif (multi_history_metric == 'min'):
k_fold_average = np.min(best_metric_scores)
elif (multi_history_metric == 'max'):
k_fold_average = np.max(best_metric_scores)
else:
raise ValueError(('multi_run_histories_summary(): Unsupported multi_history_metric: ' + str(multi_history_metric)))
result_key = ((((description_prefix + '_') + multi_history_metric) + '_') + metric)
results[result_key] = k_fold_average
if verbose:
print(((str(results_prefix) + ':\n ') + str(results)))
if (save_filename is not None):
with open(save_filename, 'w') as fp:
json.dump(results, fp)
return results |
class TestLDHead(TestCase):
def test_ld_head_loss(self):
s = 256
img_metas = [{'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1}]
train_cfg = Config(dict(assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1), allowed_border=(- 1), pos_weight=(- 1), debug=False))
ld_head = LDHead(num_classes=4, in_channels=1, train_cfg=train_cfg, loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0), loss_cls=dict(type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict(type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]))
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16, 32, 64]]
(cls_scores, bbox_preds) = ld_head.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
batch_gt_instances_ignore = None
empty_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances], img_metas, rand_soft_target, batch_gt_instances_ignore)
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
self.assertGreater(empty_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes')
self.assertGreaterEqual(empty_ld_loss.item(), 0, 'ld loss should be non-negative')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
batch_gt_instances_ignore = None
one_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances], img_metas, rand_soft_target, batch_gt_instances_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
batch_gt_instances_ignore = gt_instances
ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances], img_metas, rand_soft_target, batch_gt_instances_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
self.assertGreater(ignore_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertEqual(ignore_box_loss.item(), 0, 'gt bbox ignored loss should be zero')
batch_gt_instances_ignore = InstanceData()
batch_gt_instances_ignore.bboxes = torch.randn(1, 4)
not_ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances], img_metas, rand_soft_target, batch_gt_instances_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
self.assertGreater(not_ignore_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertGreaterEqual(not_ignore_box_loss.item(), 0, 'gt bbox not ignored loss should be non-zero') |
class ModelCheckpoint(callbacks.ModelCheckpoint):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto', save_freq='epoch', **kwargs):
super(ModelCheckpoint, self).__init__(filepath=filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only, mode=mode, save_freq=save_freq, **kwargs)
def pass_model(self, model):
if isinstance(model, BaseModel):
self.model = model
else:
raise TypeError('model must be a deepposekit BaseModel class')
def set_model(self, model):
pass |
def unconvert_from_RGB_255(colors):
un_rgb_color = ((colors[0] / 255.0), (colors[1] / 255.0), (colors[2] / 255.0))
return un_rgb_color |
class Graph(JsonSerializer):
def __init__(self) -> None:
super().__init__()
self._nodes: Dict[(str, Node)] = {}
self._edges: List[Edge] = []
def add_node(self, node: Node) -> None:
self._nodes[node.id] = node
def nodes(self) -> List[Node]:
return list(self._nodes.values())
def edges(self) -> List[Edge]:
return self._edges
def add_edge(self, source_id: str, target_id: str) -> bool:
try:
source = self.get_node(source_id)
target = self.get_node(target_id)
except NotFoundException as err:
log.debug(f'Got an error: {str(err)} while attempted to add an Edge from {source_id} to {target_id}')
return False
self._edges.append(Edge(source, target))
return True
def get_node(self, id: str) -> Node:
if (id not in self._nodes):
raise NotFoundException(f'Node id: {id} not found in Graph')
return self._nodes[id]
def highlight_pattern(self, op_name: str, pattern: List[str]) -> None:
source_op = op_name
self.get_node(source_op).highlight = True
for op in pattern[1:]:
target_nodes = self.get_target_nodes(source_op)
for target_node in target_nodes:
if (target_node.label == op):
self.get_node(target_node.id).highlight = True
source_op = target_node.id
continue
def get_target_nodes(self, op_name: str) -> List[Node]:
target_nodes: List[Node] = []
for edge in self.edges:
if (edge.source == op_name):
target_nodes.append(self.get_node(edge.target))
return target_nodes |
def calculate_activation_statistics_from_files(files, sess, batch_size=50, verbose=False):
act = get_activations_from_files(files, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return (mu, sigma) |
class Fuse_fea(nn.Module):
def __init__(self):
super(Fuse_fea, self).__init__()
self.convt1 = nn.Conv3d(in_channels=96, out_channels=48, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 2))
self.convt2 = nn.Conv3d(in_channels=48, out_channels=n_class, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 2))
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, feas_ra, feas_rv, feas_va):
feas_rv = torch.sum(feas_rv, 4, keepdim=True)
feas_ra1 = feas_rv.expand((- 1), (- 1), (- 1), (- 1), n_angle)
feas_va = torch.sum(feas_va, 4, keepdim=True)
feas_va = torch.transpose(feas_va, 3, 4)
feas_ra2 = feas_va.expand((- 1), (- 1), (- 1), n_range, (- 1))
feas_ra = torch.cat((feas_ra, feas_ra1, feas_ra2), 1)
x = self.relu(self.convt1(feas_ra))
x = self.sigmoid(self.convt2(x))
return x |
class SimpleCrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = SimpleCrossAttnUpBlock2D
block_type = 'up'
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True, include_encoder_hidden_states=True)
def prepare_init_args_and_inputs_for_common(self):
(init_dict, inputs_dict) = super().prepare_init_args_and_inputs_for_common()
init_dict['cross_attention_dim'] = 32
return (init_dict, inputs_dict)
def test_output(self):
expected_slice = [0.2645, 0.148, 0.0909, 0.8044, (- 0.9758), (- 0.9083), 0.0994, (- 1.1453), (- 0.7402)]
super().test_output(expected_slice) |
def image_loader(path):
if isinstance(path, Path):
path = str(path.resolve())
return default_loader(path) |
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
([('cpu',), ('cuda',)])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if ((device == 'cuda') and (not torch.cuda.is_available())):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(2, image_shapes=[(3, 128, 127), (3, 91, 92)], sem_seg_output_strides=1, with_mask=True, with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
([('cpu',), ('cuda',)])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if ((device == 'cuda') and (not torch.cuda.is_available())):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(2, image_shapes=[(3, 128, 127), (3, 91, 92)], sem_seg_output_strides=1, with_mask=True, with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
([('cpu',), ('cuda',)])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if ((device == 'cuda') and (not torch.cuda.is_available())):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]], sem_seg_output_strides=1, with_mask=True, with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple) |
class SimpleEngine(Engine):
def __init__(self, run_function: Callable):
super().__init__(process_function=(lambda x, y: None))
self._allowed_events = [Events.STARTED, Events.COMPLETED]
self._run_function = run_function
def run(self, *args, **kwargs):
self._fire_event(Events.STARTED)
self._run_function(*args, **kwargs)
self._fire_event(Events.COMPLETED) |
def split_dataset(fname, ind_arg):
with open(fname, 'r') as read_obj:
header = np.array(['ID', 'Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age', 'Outcome'])
ind_1 = ([0] + [(ind + 1) for ind in ind_arg])
ind_2 = ind_2 = ([0] + [ind for ind in range(1, len(header)) if (ind not in ind_1)])
csv_reader = reader(read_obj)
with open('./diabetes-1.csv', 'w') as write_obj_1:
with open('./diabetes-2.csv', 'w') as write_obj_2:
csv_writer_1 = writer(write_obj_1, delimiter=',')
csv_writer_2 = writer(write_obj_2, delimiter=',')
for (i, row) in enumerate(csv_reader):
if (i == 0):
if (not row[0].isdigit()):
header = np.array((['ID'] + row))
csv_writer_1.writerow(header[ind_1])
csv_writer_2.writerow(header[ind_2])
else:
csv_writer_1.writerow(header[ind_1])
csv_writer_2.writerow(header[ind_2])
row = np.array(([int((i + 1))] + [float(d) for d in row]))
csv_writer_1.writerow(row[ind_1])
csv_writer_2.writerow(row[ind_2])
else:
row = np.array(([int((i + 1))] + [float(d) for d in row]))
csv_writer_1.writerow(row[ind_1])
csv_writer_2.writerow(row[ind_2]) |
def get_mask(mask_root, mask_paths, ignore_path, f_resize_length: Optional[int]=None):
rsize = (RESIZE_LENGTH if (f_resize_length is None) else f_resize_length)
mask_all_instances = []
for mask_path in mask_paths:
mask_file = os.path.join(mask_root, mask_path)
if os.path.isfile(mask_file):
mask = load_mask_image(mask_file, (rsize, rsize))
else:
mask = np.zeros((rsize, rsize), dtype=np.float32)
mask_all_instances.append((mask > 0.5))
mask_all_instances = np.stack(mask_all_instances, axis=0).any(axis=0)
ignore_file = os.path.join(mask_root, ignore_path)
if os.path.isfile(ignore_file):
ignore_box_mask = load_mask_image(ignore_file, (rsize, rsize))
ignore_box_mask = (ignore_box_mask > 0.5)
ignore_mask = np.logical_and(ignore_box_mask, np.logical_not(mask_all_instances))
if np.logical_and(ignore_mask, mask_all_instances).any():
raise RuntimeError('Ignore and foreground masks intersect.')
return (mask_all_instances.astype(np.uint8) + (255 * ignore_mask.astype(np.uint8)))
else:
return mask_all_instances.astype(np.uint8) |
class TFTrainingArguments(TrainingArguments):
framework = 'tf'
tpu_name: Optional[str] = field(default=None, metadata={'help': 'Name of TPU'})
tpu_zone: Optional[str] = field(default=None, metadata={'help': 'Zone of TPU'})
gcp_project: Optional[str] = field(default=None, metadata={'help': 'Name of Cloud TPU-enabled project'})
poly_power: float = field(default=1.0, metadata={'help': 'Power for the Polynomial decay LR scheduler.'})
xla: bool = field(default=False, metadata={'help': 'Whether to activate the XLA compilation or not'})
_property
def _setup_strategy(self) -> Tuple[('tf.distribute.Strategy', int)]:
requires_backends(self, ['tf'])
logger.info('Tensorflow: setting up strategy')
gpus = tf.config.list_physical_devices('GPU')
if self.fp16:
tf.keras.mixed_precision.set_global_policy('mixed_float16')
if self.no_cuda:
strategy = tf.distribute.OneDeviceStrategy(device='/cpu:0')
else:
try:
if self.tpu_name:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name, zone=self.tpu_zone, project=self.gcp_project)
else:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
if self.tpu_name:
raise RuntimeError(f"Couldn't connect to TPU {self.tpu_name}!")
else:
tpu = None
if tpu:
if self.fp16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
elif (len(gpus) == 0):
strategy = tf.distribute.OneDeviceStrategy(device='/cpu:0')
elif (len(gpus) == 1):
strategy = tf.distribute.OneDeviceStrategy(device='/gpu:0')
elif (len(gpus) > 1):
strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError('Cannot find the proper strategy, please check your environment properties.')
return strategy
def strategy(self) -> 'tf.distribute.Strategy':
requires_backends(self, ['tf'])
return self._setup_strategy
def n_replicas(self) -> int:
requires_backends(self, ['tf'])
return self._setup_strategy.num_replicas_in_sync
def should_log(self):
return False
def train_batch_size(self) -> int:
if self.per_gpu_train_batch_size:
logger.warning('Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future version. Using `--per_device_train_batch_size` is preferred.')
per_device_batch_size = (self.per_gpu_train_batch_size or self.per_device_train_batch_size)
return (per_device_batch_size * self.n_replicas)
def eval_batch_size(self) -> int:
if self.per_gpu_eval_batch_size:
logger.warning('Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.')
per_device_batch_size = (self.per_gpu_eval_batch_size or self.per_device_eval_batch_size)
return (per_device_batch_size * self.n_replicas)
def n_gpu(self) -> int:
requires_backends(self, ['tf'])
warnings.warn('The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.', FutureWarning)
return self._setup_strategy.num_replicas_in_sync |
.parametrize(['item', 'location', 'expected_space'], [(Item(2, 3, 4), Location(1, 4, 5), Space(x1=1, x2=3, y1=4, y2=7, z1=5, z2=9)), (Item(4, 1, 6), Location(10, 5, 3), Space(x1=10, x2=14, y1=5, y2=6, z1=3, z2=9))])
def test__space_from_item_and_location(item: Item, location: Location, expected_space: Space) -> None:
space = space_from_item_and_location(item, location)
assert (space == expected_space) |
def F_measure(preds, labels, openset=False, theta=None):
if openset:
true_pos = 0.0
false_pos = 0.0
false_neg = 0.0
for i in range(len(labels)):
true_pos += (1 if ((preds[i] == labels[i]) and (labels[i] != (- 1))) else 0)
false_pos += (1 if ((preds[i] != labels[i]) and (labels[i] != (- 1)) and (preds[i] != (- 1))) else 0)
false_neg += (1 if ((preds[i] != labels[i]) and (labels[i] == (- 1))) else 0)
precision = (true_pos / (true_pos + false_pos))
recall = (true_pos / (true_pos + false_neg))
return (2 * ((precision * recall) / ((precision + recall) + 1e-12)))
else:
return f1_score(labels.detach().cpu().numpy(), preds.detach().cpu().numpy(), average='macro') |
def convert_from_interleaved(args):
nargs = len(args)
arrays = []
inputs = []
for i in range(0, (nargs // 2)):
arrays.append(args[(2 * i)])
inputs.append(args[((2 * i) + 1)])
symbol_map = get_symbol_map(inputs)
eq = ','.join((''.join((symbol_map[ix] for ix in term)) for term in inputs))
if ((nargs % 2) == 1):
eq += f"->{''.join((symbol_map[ix] for ix in args[(- 1)]))}"
return (eq, arrays) |
class PythonFormatter():
standard_header = '# \n# - Open3D: www.open3d.org -\n# \n# Copyright (c) 2018-2023 www.open3d.org\n# SPDX-License-Identifier: MIT\n# \n'
def __init__(self, file_paths, style_config):
self.file_paths = file_paths
self.style_config = style_config
def _check_style(file_path, style_config):
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
is_valid_header = ((len(content) == 0) or content.startswith(PythonFormatter.standard_header))
(_, _, changed) = yapf.yapflib.yapf_api.FormatFile(file_path, style_config=style_config, in_place=False)
return ((not changed), is_valid_header)
def _apply_style(file_path, style_config):
(_, _, _) = yapf.yapflib.yapf_api.FormatFile(file_path, style_config=style_config, in_place=True)
def run(self, apply, no_parallel, verbose):
num_procs = (multiprocessing.cpu_count() if (not no_parallel) else 1)
action_name = ('Applying Python style' if apply else 'Checking Python style')
print(f"{action_name} ({num_procs} process{'es'[:((2 * num_procs) ^ 2)]})")
if verbose:
print('To format:')
for file_path in self.file_paths:
print(f'> {file_path}')
start_time = time.time()
with multiprocessing.Pool(num_procs) as pool:
is_valid_files = pool.map(partial(self._check_style, style_config=self.style_config), self.file_paths)
changed_files = []
wrong_header_files = []
for (is_valid, file_path) in zip(is_valid_files, self.file_paths):
is_valid_style = is_valid[0]
is_valid_header = is_valid[1]
if (not is_valid_style):
changed_files.append(file_path)
if apply:
self._apply_style(file_path, self.style_config)
if (not is_valid_header):
wrong_header_files.append(file_path)
print(f'{action_name} took {(time.time() - start_time):.2f}s')
return (changed_files, wrong_header_files) |
class Reshape(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.view(self.size) |
def greedy_search(gold, test, classify):
cur = (test.clone(), {'type': 'init'}, 0)
iters = 0
path = []
while True:
path.append(cur)
if (iters > 100):
return ((0, iters), None)
ctree = cur[0]
cerrors = parse_errors.ParseErrorSet(gold, ctree)
if (len(cerrors) == 0):
final = cur
break
best = None
for (fixes, ntree, info) in successors(ctree, cerrors, gold):
if (not ntree.check_consistency()):
raise Exception('Inconsistent tree! {}'.format(ntree))
nerrors = parse_errors.get_errors(ntree, gold)
change = (len(cerrors) - len(nerrors))
if (change < 0):
continue
if ((best is None) or (change > best[2])):
best = (ntree, info, change)
cur = best
iters += 1
for step in path:
classify(step[1], gold, test)
return ((0, iters), path) |
def fast_walsh_hadamard_torched(x, axis=0, normalize=False):
orig_shape = x.size()
assert ((axis >= 0) and (axis < len(orig_shape))), ('For a vector of shape %s, axis must be in [0, %d] but it is %d' % (orig_shape, (len(orig_shape) - 1), axis))
h_dim = orig_shape[axis]
h_dim_exp = int(round((np.log(h_dim) / np.log(2))))
assert (h_dim == (2 ** h_dim_exp)), ('hadamard can only be computed over axis with size that is a power of two, but chosen axis %d has size %d' % (axis, h_dim))
working_shape_pre = [int(np.prod(orig_shape[:axis]))]
working_shape_post = [int(np.prod(orig_shape[(axis + 1):]))]
working_shape_mid = ([2] * h_dim_exp)
working_shape = ((working_shape_pre + working_shape_mid) + working_shape_post)
ret = x.view(working_shape)
for ii in range(h_dim_exp):
dim = (ii + 1)
arrs = torch.chunk(ret, 2, dim=dim)
assert (len(arrs) == 2)
ret = torch.cat(((arrs[0] + arrs[1]), (arrs[0] - arrs[1])), axis=dim)
if normalize:
ret = (ret / torch.sqrt(float(h_dim)))
ret = ret.view(orig_shape)
return ret |
def analyze_ops(graph, print_info=False):
if print_info:
print('')
print('Operations: name -> (type shapes) [size]')
print('')
total_size = 0
for op in graph.get_operations():
op_size = 0
shapes = []
for output in op.outputs:
output_size = (output.get_shape().num_elements() or 0)
if output.get_shape():
shapes.append(tensor_description(output))
op_size += output_size
if print_info:
print(op.name, '\t->', ', '.join(shapes), (('[' + str(op_size)) + ']'))
total_size += op_size
return total_size |
class MultiScaleInternal(Flow):
def __init__(self, flow_step, num_steps, in_channels, hidden_channels, h_channels, factor=2, transform='affine', prior_transform='affine', alpha=1.0, inverse=False, kernel_size=(2, 3), coupling_type='conv', h_type=None, activation='relu', normalize=None, num_groups=None):
super(MultiScaleInternal, self).__init__(inverse)
num_layers = len(num_steps)
assert (num_layers < factor)
self.layers = nn.ModuleList()
self.priors = nn.ModuleList()
channel_step = (in_channels // factor)
for num_step in num_steps:
layer = [flow_step(in_channels, hidden_channels=hidden_channels, h_channels=h_channels, transform=transform, alpha=alpha, inverse=inverse, coupling_type=coupling_type, h_type=h_type, activation=activation, normalize=normalize, num_groups=num_groups, kernel_size=kernel_size) for _ in range(num_step)]
self.layers.append(nn.ModuleList(layer))
prior = MultiScalePrior(in_channels, hidden_channels=hidden_channels, h_channels=h_channels, transform=prior_transform, alpha=alpha, inverse=inverse, factor=factor, coupling_type=coupling_type, h_type=h_type, activation=activation, normalize=normalize, num_groups=num_groups)
self.priors.append(prior)
in_channels = (in_channels - channel_step)
assert (in_channels == prior.z1_channels)
factor = (factor - 1)
self.z_channels = in_channels
assert (len(self.layers) == len(self.priors))
def sync(self):
for (layer, prior) in zip(self.layers, self.priors):
for step in layer:
step.sync()
prior.sync()
def forward(self, input: torch.Tensor, h=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = input
logdet_accum = input.new_zeros(input.size(0))
outputs = []
for (layer, prior) in zip(self.layers, self.priors):
for step in layer:
(out, logdet) = step.forward(out, h=h)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = prior.forward(out, h=h)
logdet_accum = (logdet_accum + logdet)
(out1, out2) = split2d(out, prior.z1_channels)
outputs.append(out2)
out = out1
outputs.append(out)
outputs.reverse()
out = unsplit2d(outputs)
return (out, logdet_accum)
def backward(self, input: torch.Tensor, h=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = input
outputs = []
for prior in self.priors:
(out1, out2) = split2d(out, prior.z1_channels)
outputs.append(out2)
out = out1
logdet_accum = out.new_zeros(out.size(0))
for (layer, prior) in zip(reversed(self.layers), reversed(self.priors)):
out2 = outputs.pop()
out = unsplit2d([out, out2])
(out, logdet) = prior.backward(out, h=h)
logdet_accum = (logdet_accum + logdet)
for step in reversed(layer):
(out, logdet) = step.backward(out, h=h)
logdet_accum = (logdet_accum + logdet)
assert (len(outputs) == 0)
return (out, logdet_accum)
def init(self, data, h=None, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = data
logdet_accum = data.new_zeros(data.size(0))
outputs = []
for (layer, prior) in zip(self.layers, self.priors):
for step in layer:
(out, logdet) = step.init(out, h=h, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = prior.init(out, h=h, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out1, out2) = split2d(out, prior.z1_channels)
outputs.append(out2)
out = out1
outputs.append(out)
outputs.reverse()
out = unsplit2d(outputs)
return (out, logdet_accum) |
class ResNet(nn.Module):
def __init__(self, last_stride, block, layers, num_classes=1000):
scale = 64
self.inplanes = scale
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, scale, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.InstanceNorm2d(scale, affine=True)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, scale, layers[0], stride=1, IN=True)
self.layer2 = self._make_layer(block, (scale * 2), layers[1], stride=2, IN=True)
self.layer3 = self._make_layer(block, (scale * 4), layers[2], stride=2)
self.layer4 = self._make_layer(block, (scale * 8), layers[3], stride=last_stride)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(((scale * 8) * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, IN=False):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, (blocks - 1)):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, IN=IN))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if ('fc' in i):
continue
self.state_dict()[i].copy_(param_dict[i]) |
_registry(pattern_type='TorchInnerProductInsertBias')
class TorchInnerProductInsertBias(Pattern):
def __call__(self, model):
if ((model.framework_modeling_config['framework'] != 'torch') or (not util.get_quant_info())):
return model
for node in model.nodes:
if (node.op_type == 'InnerProduct'):
if (len(node.input_tensors) == 2):
weight = node.input_tensors[1].data
bias = Tensor(name=(node.name + '_bias'), source_op=[], dest_op=[node.name], shape=[weight.shape[0]], data=np.zeros(weight.shape[0]).astype(np.float32), dtype='fp32')
node.input_tensors.append(bias)
return model |
def preprocessor(l):
pdb_fn = '_'.join(l.split('/')[(- 1)].split('.')[0].split('_')[:(- 2)])
key = pdb_fn.split('_')[0]
data_dir = './'
if os.path.exists(f'{data_dir}/{key}'):
return
ligand_pdb_fn = l
pdb_dir = '../../refined_set'
bs_pdb_fn = f'{pdb_dir}/{key}/{key}_protein.pdb'
m1 = Chem.MolFromPDBFile(ligand_pdb_fn)
if (m1 is None):
for i in range(2, 10):
ligand_pdb_fn[(- 5)] = str(i)
if os.path.exists(l):
m1 = Chem.MolFromPDBFile(ligand_pdb_fn)
if (m1 is not None):
break
print(f'{pdb_fn} no mol generated from pdb')
return
m1_uff = uff(m1)
if (m1_uff is None):
print(f'{pdb_fn} no uff mol from ligand mol!')
return
m2 = extract(m1, bs_pdb_fn)
if (m2 is None):
print(f'{pdb_fn} no extracted binding pocket!')
return
m2 = remove_water(m2)
if (len(m1.GetConformers()) == 0):
return
if (len(m2.GetConformers()) == 0):
return
with open((data_dir + pdb_fn), 'wb') as fp:
pickle.dump((m1, m1_uff, m2, []), fp, pickle.HIGHEST_PROTOCOL)
return |
class BasicResidualBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, props, stride=None):
super().__init__()
self.kernel_size = kernel_size
props['conv_op_kwargs']['stride'] = 1
self.stride = stride
self.props = props
self.out_planes = out_planes
self.in_planes = in_planes
if (stride is not None):
kwargs_conv1 = deepcopy(props['conv_op_kwargs'])
kwargs_conv1['stride'] = stride
else:
kwargs_conv1 = props['conv_op_kwargs']
self.conv1 = props['conv_op'](in_planes, out_planes, kernel_size, padding=[((i - 1) // 2) for i in kernel_size], **kwargs_conv1)
self.norm1 = props['norm_op'](out_planes, **props['norm_op_kwargs'])
self.nonlin1 = props['nonlin'](**props['nonlin_kwargs'])
if (props['dropout_op_kwargs']['p'] != 0):
self.dropout = props['dropout_op'](**props['dropout_op_kwargs'])
else:
self.dropout = Identity()
self.conv2 = props['conv_op'](out_planes, out_planes, kernel_size, padding=[((i - 1) // 2) for i in kernel_size], **props['conv_op_kwargs'])
self.norm2 = props['norm_op'](out_planes, **props['norm_op_kwargs'])
self.nonlin2 = props['nonlin'](**props['nonlin_kwargs'])
if (((self.stride is not None) and any(((i != 1) for i in self.stride))) or (in_planes != out_planes)):
stride_here = (stride if (stride is not None) else 1)
self.downsample_skip = nn.Sequential(props['conv_op'](in_planes, out_planes, 1, stride_here, bias=False), props['norm_op'](out_planes, **props['norm_op_kwargs']))
else:
self.downsample_skip = (lambda x: x)
def forward(self, x):
residual = x
out = self.dropout(self.conv1(x))
out = self.nonlin1(self.norm1(out))
out = self.norm2(self.conv2(out))
residual = self.downsample_skip(residual)
out += residual
return self.nonlin2(out) |
class ProofStepClassificationDatasetCreator(DatasetCreator):
def __init__(self, fp):
super().__init__(fp)
self.seen = set()
def process_dp(self, dp):
(ts, positive_hyps) = get_proof_step_classification_datapoint(dp)
positive_hyps = tuple(map(to_type_annotation, positive_hyps))
result = {'goal': ts, 'classify_locals': positive_hyps}
result_msg = json.dumps(result)
guard = (lambda : (len(positive_hyps) > 0))
if (guard() and (not ((ts, positive_hyps) in self.seen))):
self.seen.add((ts, positive_hyps))
self.fp.write((result_msg + '\n')) |
def make_seed():
d = 10000
t = time.time()
sub1 = (int((t * d)) % d)
sub2 = (int((t * (d ** 2))) % d)
s = 0.001
s_inv = (1.0 / s)
time.sleep(((s * sub2) / d))
t2 = time.time()
t2 = (t2 - int(t2))
t2 = (int(((t2 * d) * s_inv)) % d)
time.sleep(((s * sub1) / d))
t3 = time.time()
t3 = (t3 - int(t3))
t3 = (int((((t3 * d) * s_inv) * 10)) % d)
return ((t3 - t2) % d) |
class DataReaderBase(object):
def from_opt(cls, opt):
return cls()
def _read_file(cls, path):
with open(path, 'rb') as f:
for line in f:
(yield line)
def _raise_missing_dep(*missing_deps):
raise MissingDependencyException(('Could not create reader. Be sure to install the following dependencies: ' + ', '.join(missing_deps)))
def read(self, data, side, src_dir):
raise NotImplementedError() |
def contrastive_loss(y_c: torch.Tensor, pred_dists: torch.Tensor, margin: int=1) -> torch.Tensor:
N = pred_dists.shape[0]
pull_losses = (y_c * torch.pow(pred_dists, 2))
zero = torch.zeros(N)
device = y_c.device
zero = zero.to(device)
clamped_dists = torch.max((margin - pred_dists), zero)
push_losses = ((1 - y_c) * torch.pow(clamped_dists, 2))
return torch.mean((pull_losses + push_losses)) |
class GroupsSimpleStationary(GroupsStationary):
monsters = Groups.monsters[:3]
modifiers = Groups.modifiers[:4] |
class DataTrainingArguments():
source_lang: str = field(default=None, metadata={'help': 'Source language id for translation.'})
target_lang: str = field(default=None, metadata={'help': 'Target language id for translation.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
source_prefix: Optional[str] = field(default=None, metadata={'help': 'A prefix to add before every source text (useful for T5 models).'}) |
def replace_attr(obj, name: str, value):
torch_attr = getattr(obj, name)
setattr(obj, name, value)
attrs.append((obj, name, torch_attr)) |
class GymEnvironment(BaseEnvironment):
def __init__(self, name: str, **kwargs):
import gym
self.name = name
self.environmet = gym.make(name)
self.action_space = self.environmet.action_space
self.action_num = self.action_space.n
self.shape = kwargs.get('shape', None)
self.binary = kwargs.get('binary', False)
self.gray = kwargs.get('binary', True)
self.flatten = kwargs.get('flatten', True)
self.max_prob = kwargs.get('max_prob', 1.0)
self.clip_rewards = kwargs.get('clip_rewards', True)
self.episode_step_count = 0
self.obs = None
self.reward = None
assert (0.0 < self.max_prob <= 1.0), 'Maximum spiking probability must be in (0, 1].'
def step(self, action):
(self.obs, self.reward, self.done, info) = self.environmet.step(action)
if self.clip_rewards:
self.reward = np.sign(self.reward)
'\n After encoding the shape of 1D observations will become [Time_step, batch_size, length].\n 2D observations are mono images. They will be flatten into 1D.\n 3D observations are color images that will be converted to grayscale images and then will be flatten into 1D.\n '
if ((len(self.obs.shape) >= 3) and self.gray):
self.obs = RGBtoGray(self.obs)
if self.binary:
self.obs = GraytoBinary(self.obs)
if (self.shape is not None):
if (self.shape != self.obs.shape):
self.obs = reshape(self.obs, self.shape)
if ((len(self.obs.shape) >= 2) and self.flatten):
self.obs = self.obs.flatten()
info['gym_obs'] = self.obs
self.episode_step_count += 1
return (self.obs, self.reward, self.done, info)
def reset(self):
self.obs = self.environmet.reset()
if ((len(self.obs.shape) >= 3) and self.gray):
self.obs = RGBtoGray(self.obs)
if self.binary:
self.obs = GraytoBinary(self.obs)
if (self.shape is not None):
self.shape = tuple(self.shape)
if (self.shape != self.obs.shape):
self.obs = reshape(self.obs, self.shape)
if ((len(self.obs.shape) >= 2) and self.flatten):
self.obs = self.obs.flatten()
self.episode_step_count = 0
return self.obs
def render(self, mode):
return self.environmet.render(mode)
def seed(self, seed):
self.environmet.seed(seed)
def close(self):
self.environmet.close() |
class FeedWrapper(object):
def __init__(self, feed, **kwargs):
assert isinstance(feed, torch.utils.data.DataLoader)
(self.feed, self.kwargs) = (feed, kwargs)
def __len__(self):
return len(self.feed)
def __iter__(self):
if (not self.kwargs):
(yield from iter(self.feed))
else:
for batch in iter(self.feed):
(yield tuple((b.to(**self.kwargs) for b in batch))) |
class LogConfusionMatrix(Callback):
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
self.ready = True
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if self.ready:
self.preds.append(outputs['preds'])
self.targets.append(outputs['targets'])
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
plt.figure(figsize=(14, 8))
sn.set(font_scale=1.4)
sn.heatmap(confusion_matrix, annot=True, annot_kws={'size': 8}, fmt='g')
experiment.log({f'confusion_matrix/{experiment.name}': wandb.Image(plt)}, commit=False)
plt.clf()
self.preds.clear()
self.targets.clear() |
class TFXLNetForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def apply_spectral_norm(m):
from torch.nn.utils import spectral_norm
for layer in m.modules():
if isinstance(layer, nn.Conv2d):
spectral_norm(layer)
elif isinstance(layer, nn.Linear):
spectral_norm(layer)
elif isinstance(layer, nn.Embedding):
spectral_norm(layer) |
def loadMappableLayers(path):
path = os.path.join(path, 'compiled_partitions')
if (not os.path.exists(path)):
raise FileNotFoundError
filenames = [f for f in os.listdir(path) if (os.path.splitext(f)[1] == '.pickle')]
layers = {}
layerNames = []
with ThreadPoolExecutor() as executor:
futures = [executor.submit(deserializeLayer, path, filename) for filename in filenames]
for future in futures:
layer = future.result()
layers[layer.id] = layer
layerNames.append(layer.id)
if (len(layers) == 0):
raise FileNotFoundError
for layer in layers.values():
postLayerName = layer.postLayer
if isinstance(postLayerName, Layer):
continue
assert isinstance(postLayerName, str)
layer.postLayer = layers[postLayerName]
layerNames.remove(postLayerName)
outLayers = []
assert (len(layerNames) == 1)
postLayer = layers[layerNames[0]]
while True:
outLayers.append(postLayer)
postLayer = postLayer.postLayer
if (postLayer is None):
return outLayers[:(- 1)] |
class EuclideanDistance(Distance):
def get_distance(self, list1: [], list2: []):
return distance.euclidean(list1, list2) |
class WorldRegistry():
_world_classes = {}
def get(cls, world_type: str) -> World:
try:
return cls._world_classes[world_type]
except KeyError:
raise ValueError(f'unknown world type for: {world_type}')
def register(cls, world_type: str):
def inner_wrapper(wrapped_class):
cls._world_classes[world_type] = wrapped_class
return wrapped_class
return inner_wrapper |
def parse_messages(messages, functions):
if all(((m.role != 'user') for m in messages)):
raise HTTPException(status_code=400, detail=f'Invalid request: Expecting at least one user message.')
messages = copy.deepcopy(messages)
default_system = 'You are a helpful assistant.'
system = ''
if (messages[0].role == 'system'):
system = messages.pop(0).content.lstrip('\n').rstrip()
if (system == default_system):
system = ''
if functions:
tools_text = []
tools_name_text = []
for func_info in functions:
name = func_info.get('name', '')
name_m = func_info.get('name_for_model', name)
name_h = func_info.get('name_for_human', name)
desc = func_info.get('description', '')
desc_m = func_info.get('description_for_model', desc)
tool = TOOL_DESC.format(name_for_model=name_m, name_for_human=name_h, description_for_model=desc_m, parameters=json.dumps(func_info['parameters'], ensure_ascii=False))
tools_text.append(tool)
tools_name_text.append(name_m)
tools_text = '\n\n'.join(tools_text)
tools_name_text = ', '.join(tools_name_text)
system += ('\n\n' + REACT_INSTRUCTION.format(tools_text=tools_text, tools_name_text=tools_name_text))
system = system.lstrip('\n').rstrip()
dummy_thought = {'en': '\nThought: I now know the final answer.\nFinal answer: ', 'zh': '\nThought: \nFinal answer: '}
_messages = messages
messages = []
for (m_idx, m) in enumerate(_messages):
(role, content, func_call) = (m.role, m.content, m.function_call)
if content:
content = content.lstrip('\n').rstrip()
if (role == 'function'):
if ((len(messages) == 0) or (messages[(- 1)].role != 'assistant')):
raise HTTPException(status_code=400, detail=f'Invalid request: Expecting role assistant before role function.')
messages[(- 1)].content += f'''
Observation: {content}'''
if (m_idx == (len(_messages) - 1)):
messages[(- 1)].content += '\nThought:'
elif (role == 'assistant'):
if (len(messages) == 0):
raise HTTPException(status_code=400, detail=f'Invalid request: Expecting role user before role assistant.')
last_msg = messages[(- 1)].content
last_msg_has_zh = (len(re.findall('[\\u4e00-\\u9fff]+', last_msg)) > 0)
if (func_call is None):
if functions:
content = (dummy_thought[('zh' if last_msg_has_zh else 'en')] + content)
else:
(f_name, f_args) = (func_call['name'], func_call['arguments'])
if (not content):
if last_msg_has_zh:
content = f'Thought: {f_name} API'
else:
content = f'Thought: I can use {f_name}.'
content = f'''
{content}
Action: {f_name}
Action Input: {f_args}'''
if (messages[(- 1)].role == 'user'):
messages.append(ChatMessage(role='assistant', content=content.lstrip('\n').rstrip()))
else:
messages[(- 1)].content += content
elif (role == 'user'):
messages.append(ChatMessage(role='user', content=content.lstrip('\n').rstrip()))
else:
raise HTTPException(status_code=400, detail=f'Invalid request: Incorrect role {role}.')
query = _TEXT_COMPLETION_CMD
if (messages[(- 1)].role == 'user'):
query = messages[(- 1)].content
messages = messages[:(- 1)]
if ((len(messages) % 2) != 0):
raise HTTPException(status_code=400, detail='Invalid request')
history = []
for i in range(0, len(messages), 2):
if ((messages[i].role == 'user') and (messages[(i + 1)].role == 'assistant')):
usr_msg = messages[i].content.lstrip('\n').rstrip()
bot_msg = messages[(i + 1)].content.lstrip('\n').rstrip()
if (system and (i == (len(messages) - 2))):
usr_msg = f'''{system}
Question: {usr_msg}'''
system = ''
for t in dummy_thought.values():
t = t.lstrip('\n')
if (bot_msg.startswith(t) and ('\nAction: ' in bot_msg)):
bot_msg = bot_msg[len(t):]
history.append([usr_msg, bot_msg])
else:
raise HTTPException(status_code=400, detail='Invalid request: Expecting exactly one user (or function) role before every assistant role.')
if system:
assert (query is not _TEXT_COMPLETION_CMD)
query = f'''{system}
Question: {query}'''
return (query, history) |
class ClsAccuracy(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(ClsAccuracy, self).__init__('ClsAcc', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
cls_logits = outputs['label_logits']
cls_pred = (cls_logits > 0).long()
label = outputs['label'].long()
keep = (label >= 0)
self.sum_metric += float((cls_pred[keep] == label[keep]).sum().item())
self.num_inst += keep.sum().item() |
def InverseBoxCoxL(num_blocks, **kwargs):
(set_res, addf0, init_random, constraint) = common_config(kwargs)
block_array = []
for nb in range(num_blocks):
if init_random:
(a_aff, b_aff) = numpy.random.randn(2)
init_lam = (numpy.random.randn(1) + 1.0)
else:
(a_aff, b_aff) = (1.0, 0.0)
init_lam = 5.0
init_bc = {'init_lam': init_lam, 'add_init_f0': addf0, 'constraint': constraint}
init_affine = {'init_a': a_aff, 'init_b': b_aff, 'set_restrictions': set_res}
block = [('inverseboxcox', init_bc), ('affine', init_affine)]
block_array.extend(block)
return block_array |
def train_epoch(model, loader, criterion, optimizer, lr_scheduler, epoch, use_cuda=False):
model.train()
running_loss = 0.0
tic = timer()
for (i, (data, mask, pe, lap_pe, degree, labels)) in enumerate(loader):
if (args.warmup is not None):
iteration = ((epoch * len(loader)) + i)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_scheduler(iteration)
if args.lappe:
sign_flip = torch.rand(lap_pe.shape[(- 1)])
sign_flip[(sign_flip >= 0.5)] = 1.0
sign_flip[(sign_flip < 0.5)] = (- 1.0)
lap_pe = (lap_pe * sign_flip.unsqueeze(0))
if use_cuda:
data = data.cuda()
mask = mask.cuda()
if (pe is not None):
pe = pe.cuda()
if (lap_pe is not None):
lap_pe = lap_pe.cuda()
if (degree is not None):
degree = degree.cuda()
labels = labels.cuda()
optimizer.zero_grad()
output = model(data, mask, pe, lap_pe, degree)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += (loss.item() * len(data))
toc = timer()
n_sample = len(loader.dataset)
epoch_loss = (running_loss / n_sample)
print('Train loss: {:.4f} time: {:.2f}s'.format(epoch_loss, (toc - tic)))
return epoch_loss |
def is_mag(arg1: str) -> bool:
arg1_split = arg1.lower().split('x')
if ((len(arg1_split) != 2) or (arg1_split[1] != '')):
return False
try:
mag = float(arg1_split[0])
except ValueError:
return False
return True |
def get_onnx_model_list():
config_mapping = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING
model_names = config_mapping = transformers_module.models.auto.configuration_auto.MODEL_NAMES_MAPPING
onnx_model_types = [model_type for model_type in config_mapping.keys() if has_onnx(model_type)]
onnx_model_names = [model_names[model_type] for model_type in onnx_model_types]
onnx_model_names.sort(key=(lambda x: x.upper()))
return ('\n'.join([f'- {name}' for name in onnx_model_names]) + '\n') |
class TestLayer(ZooTestCase):
def test_embedding(self):
input_data = np.random.randint(1000, size=(32, 10))
zlayer = ZLayer.Embedding(1000, 64, input_shape=(10,))
klayer = KLayer.Embedding(1000, 64, input_length=10)
self.compare_layer(klayer, zlayer, input_data, WeightsConverter.convert_embedding)
def test_batchnormalization(self):
print('Running batch normal test')
K.set_image_dim_ordering('th')
input_data = np.random.random_sample([2, 5, 32, 32])
zlayer = ZLayer.BatchNormalization(axis=1, input_shape=(5, 32, 32))
klayer = KLayer.BatchNormalization(axis=1, input_shape=(5, 32, 32))
self.compare_layer(klayer, zlayer, input_data, WeightsConverter.convert_batchnormalization)
K.set_image_dim_ordering('tf')
input_data2 = np.random.random_sample([2, 32, 32, 4])
zlayer = ZLayer.BatchNormalization(axis=(- 1), dim_ordering='tf', input_shape=(32, 32, 4))
klayer = KLayer.BatchNormalization(axis=(- 1), input_shape=(32, 32, 4))
self.compare_layer(klayer, zlayer, input_data2, WeightsConverter.convert_batchnormalization)
K.set_image_dim_ordering('th')
input_data = np.random.random_sample([2, 5])
zlayer = ZLayer.BatchNormalization(axis=1, input_shape=(5,))
klayer = KLayer.BatchNormalization(axis=1, input_shape=(5,))
self.compare_layer(klayer, zlayer, input_data, WeightsConverter.convert_batchnormalization)
def test_merge_sum(self):
z1 = ZLayer.InputLayer(input_shape=(3, 5))
z2 = ZLayer.InputLayer(input_shape=(3, 5))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='sum')
k1 = KLayer.InputLayer(input_shape=(3, 5))
k2 = KLayer.InputLayer(input_shape=(3, 5))
klayer = KLayer.Merge(layers=[k1, k2], mode='sum')
input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_mul(self):
z1 = ZLayer.InputLayer(input_shape=(3, 5))
z2 = ZLayer.InputLayer(input_shape=(3, 5))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='mul')
k1 = KLayer.InputLayer(input_shape=(3, 5))
k2 = KLayer.InputLayer(input_shape=(3, 5))
klayer = KLayer.Merge(layers=[k1, k2], mode='mul')
input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_ave(self):
z1 = ZLayer.InputLayer(input_shape=(2, 5, 8))
z2 = ZLayer.InputLayer(input_shape=(2, 5, 8))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='ave')
k1 = KLayer.InputLayer(input_shape=(2, 5, 8))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode='ave')
input_data = [np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_max(self):
z1 = ZLayer.InputLayer(input_shape=(2, 5, 8))
z2 = ZLayer.InputLayer(input_shape=(2, 5, 8))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='max')
k1 = KLayer.InputLayer(input_shape=(2, 5, 8))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode='max')
input_data = [np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_concat(self):
z1 = ZLayer.InputLayer(input_shape=(2, 5, 11))
z2 = ZLayer.InputLayer(input_shape=(2, 5, 8))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='concat')
k1 = KLayer.InputLayer(input_shape=(2, 5, 11))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode='concat')
input_data = [np.random.random([3, 2, 5, 11]), np.random.random([3, 2, 5, 8])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_dot(self):
z1 = ZLayer.InputLayer(input_shape=(4,))
z2 = ZLayer.InputLayer(input_shape=(4,))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='dot')
k1 = KLayer.InputLayer(input_shape=(4,))
k2 = KLayer.InputLayer(input_shape=(4,))
klayer = KLayer.Merge(layers=[k1, k2], mode='dot')
input_data = [np.random.random([2, 4]), np.random.random([2, 4])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_cos(self):
z1 = ZLayer.InputLayer(input_shape=(3,))
z2 = ZLayer.InputLayer(input_shape=(3,))
zlayer = ZLayer.Merge(layers=[z1, z2], mode='cos')
k1 = KLayer.InputLayer(input_shape=(3,))
k2 = KLayer.InputLayer(input_shape=(3,))
klayer = KLayer.Merge(layers=[k1, k2], mode='cos')
input_data = [np.random.random([2, 3]), np.random.random([2, 3])]
self.compare_layer(klayer, zlayer, input_data)
def convert_two_dense(self, kmodel, weights):
return [weights[2].T, weights[3], weights[0].T, weights[1]]
def test_merge_method_sum(self):
zx1 = ZLayer.Input(shape=(8,))
zx2 = ZLayer.Input(shape=(6,))
zy1 = ZLayer.Dense(10)(zx1)
zy2 = ZLayer.Dense(10)(zx2)
zz = ZLayer.merge([zy1, zy2], mode='sum')
zmodel = ZModel([zx1, zx2], zz, name='graph1')
kx1 = KLayer.Input(shape=(8,))
kx2 = KLayer.Input(shape=(6,))
ky1 = KLayer.Dense(10)(kx1)
ky2 = KLayer.Dense(10)(kx2)
kz = kmerge([ky1, ky2], mode='sum')
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 8]), np.random.random([2, 6])]
self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
def test_merge_method_model_concat(self):
zx1 = ZLayer.Input(shape=(4,))
zx2 = ZLayer.Input(shape=(5,))
zy1 = ZLayer.Dense(6, activation='sigmoid')(zx1)
zbranch1 = ZModel(zx1, zy1)(zx1)
zbranch2 = ZLayer.Dense(8)(zx2)
zz = ZLayer.merge([zbranch1, zbranch2], mode='concat')
zmodel = ZModel([zx1, zx2], zz)
kx1 = KLayer.Input(shape=(4,))
kx2 = KLayer.Input(shape=(5,))
ky1 = KLayer.Dense(6, activation='sigmoid')(kx1)
kbranch1 = KModel(kx1, ky1)(kx1)
kbranch2 = KLayer.Dense(8)(kx2)
kz = KLayer.merge([kbranch1, kbranch2], mode='concat')
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 4]), np.random.random([2, 5])]
self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
def test_merge_method_seq_concat(self):
zx1 = ZLayer.Input(shape=(10,))
zx2 = ZLayer.Input(shape=(10,))
zy1 = ZLayer.Dense(12, activation='sigmoid')(zx1)
zbranch1_node = ZModel(zx1, zy1)(zx1)
zbranch2 = ZSequential()
zbranch2.add(ZLayer.Dense(12, input_dim=10))
zbranch2_node = zbranch2(zx2)
zz = ZLayer.merge([zbranch1_node, zbranch2_node], mode='concat')
zmodel = ZModel([zx1, zx2], zz)
kx1 = KLayer.Input(shape=(10,))
kx2 = KLayer.Input(shape=(10,))
ky1 = KLayer.Dense(12, activation='sigmoid')(kx1)
kbranch1_node = KModel(kx1, ky1)(kx1)
kbranch2 = KSequential()
kbranch2.add(KLayer.Dense(12, input_dim=10))
kbranch2_node = kbranch2(kx2)
kz = KLayer.merge([kbranch1_node, kbranch2_node], mode='concat')
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
def test_reshape(self):
a = np.random.random((2, 2, 3, 4))
i1 = ZLayer.Input(shape=(2, 3, 4))
s = ZLayer.Reshape(((- 1), 2, 12))(i1)
m = ZModel(i1, s)
y = m.predict(a, distributed=False)
def test_regularizer(self):
model = ZSequential()
model.add(ZLayer.Dense(16, W_regularizer=regularizers.l2(0.001), activation='relu', input_shape=(10000,)))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
def test_transformer_forward_backward(self):
layer = ZLayer.TransformerLayer.init(vocab=200, hidden_size=128, n_head=4, seq_len=20)
train_token = np.random.randint(20, size=(2, 20))
train_pos = np.zeros((2, 20), dtype=np.int32)
input = [train_token, train_pos]
self.assert_forward_backward(layer, input)
def test_bert_forward_backward(self):
layer = ZLayer.BERT.init(vocab=200, hidden_size=128, n_head=4, seq_len=20, intermediate_size=20)
train_token = np.random.randint(20, size=(2, 20))
token_type_id = np.zeros((2, 20), dtype=np.int32)
train_pos = np.zeros((2, 20), dtype=np.int32)
mask_attention = np.ones((2, 1, 1, 20), dtype=np.int32)
input = [train_token, token_type_id, train_pos, mask_attention]
self.assert_forward_backward(layer, input) |
def plmodel(**kwvars):
def registered_class(Cls):
objCls = obj(**kwvars)(Cls)
class PLAutoMdl(Cls):
def __init__(self, **kwargs):
self.kwargs = kwargs
self._lazyobj = objCls(**kwargs)
default_config = self._lazyobj.cs.get_default_configuration().get_dictionary()
super_kwargs = copy.deepcopy(self.kwargs)
kwspaces = copy.deepcopy(self._lazyobj.kwspaces)
for (k, v) in super_kwargs.items():
if ((k in kwspaces) and isinstance(kwspaces[k], NestedSpace)):
sub_config = _strip_config_space(default_config, prefix=k)
super_kwargs[k] = kwspaces[k].sample(**sub_config)
elif (k in default_config):
super_kwargs[k] = default_config[k]
super().__init__(**super_kwargs)
self.backend = create_hpo_backend()
def __repr__(self):
return ('PlAutoMdl -- ' + Cls.__name__)
def _model_build(self, trial):
model = self.backend.instantiate(trial, self._lazyobj)
return model
return PLAutoMdl
return registered_class |
class ATONet(nn.Module):
def __init__(self, opt):
super(ATONet, self).__init__()
fn = opt.feature_num
self.an = opt.angular_num
self.an2 = (self.an * self.an)
self.scale = opt.scale
self.fea_conv0 = nn.Conv2d(1, fn, 3, 1, 1, bias=True)
self.fea_resblock = make_layer(ResidualBlock, fn, opt.layer_num[0])
self.pair_conv0 = nn.Conv2d((2 * fn), fn, 3, 1, 1, bias=True)
self.pair_resblock = make_layer(ResidualBlock, fn, opt.layer_num[1])
self.pair_conv1 = nn.Conv2d(fn, fn, 3, 1, 1, bias=True)
self.fusion_view_conv0 = nn.Conv2d(self.an2, fn, 3, 1, 1, bias=True)
self.fusion_view_resblock = make_layer(ResidualBlock, fn, opt.layer_num[2])
self.fusion_view_conv1 = nn.Conv2d(fn, 1, 3, 1, 1, bias=True)
self.fusion_fea_conv0 = nn.Conv2d(fn, fn, 3, 1, 1, bias=True)
self.fusion_fea_resblock = make_layer(ResidualBlock, fn, opt.layer_num[3])
up = []
for _ in range(int(math.log(self.scale, 2))):
up.append(nn.Conv2d(fn, (4 * fn), 3, 1, 1, bias=True))
up.append(nn.PixelShuffle(2))
up.append(nn.ReLU(inplace=True))
self.upsampler = nn.Sequential(*up)
self.HRconv = nn.Conv2d(fn, (fn // 2), 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d((fn // 2), 1, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, lf_lr, ref_ind):
(N, an2, H, W) = lf_lr.size()
ref_view_lr = lf_lr[(torch.arange(N), ref_ind)].clone().view(N, 1, H, W)
lf_fea = self.relu(self.fea_conv0(lf_lr.view((- 1), 1, H, W)))
lf_fea = self.fea_resblock(lf_fea).view(N, an2, (- 1), H, W)
lf_pair_fea = []
ref_fea = lf_fea[(torch.arange(N), ref_ind)].clone()
for i in range(an2):
aux_fea = lf_fea[(torch.arange(N), i)].clone()
pair_fea = torch.cat([ref_fea, aux_fea], 1)
lf_pair_fea.append(pair_fea)
lf_pair_fea = torch.stack(lf_pair_fea, 1)
lf_pair_fea = self.relu(self.pair_conv0(lf_pair_fea.view((N * an2), (- 1), H, W)))
lf_pair_fea = self.pair_resblock(lf_pair_fea)
lf_fea_aligned = self.pair_conv1(lf_pair_fea)
lf_fea_aligned = torch.transpose(lf_fea_aligned.view(N, an2, (- 1), H, W), 1, 2)
ref_fea_fused = self.relu(self.fusion_view_conv0(lf_fea_aligned.view((- 1), an2, H, W)))
ref_fea_fused = self.fusion_view_resblock(ref_fea_fused)
ref_fea_fused = self.relu(self.fusion_view_conv1(ref_fea_fused))
ref_fea_fused = self.relu(self.fusion_fea_conv0(ref_fea_fused.view(N, (- 1), H, W)))
ref_fea_fused = self.fusion_fea_resblock(ref_fea_fused)
ref_fea_hr = self.upsampler(ref_fea_fused)
out = self.relu(self.HRconv(ref_fea_hr))
out = self.conv_last(out)
base = functional.interpolate(ref_view_lr, scale_factor=self.scale, mode='bilinear', align_corners=False)
out += base
return out |
class MyNano(TorchNano):
def train(self):
seed_everything(42)
model = MyPytorchModule()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)
loss_fuc = torch.nn.CrossEntropyLoss()
(train_loader, val_loader) = create_dataloaders()
(model, optimizer, (train_loader, val_loader)) = self.setup(model, optimizer, train_loader, val_loader)
num_epochs = 5
for epoch in range(num_epochs):
model.train()
(train_loss, num) = (0, 0)
for (data, target) in train_loader:
optimizer.zero_grad()
output = model(data)
loss = loss_fuc(output, target)
self.backward(loss)
optimizer.step()
train_loss += loss.sum()
num += 1
print(f'Train Epoch: {epoch}, loss: {(train_loss / num)}') |
def is_torch_compile_available():
if (not is_torch_available()):
return False
import torch
return hasattr(torch, 'compile') |
def get_pyg_dataset(dataset=[], id_tag='jid', target='', neighbor_strategy='', atom_features='', use_canonize='', name='', line_graph='', cutoff=8.0, max_neighbors=12, classification=False, output_dir='.', tmp_name='dataset', use_lattice=False, use_angle=False, data_from='Jarvis', use_save=False, mean_train=None, std_train=None, now=False):
df = pd.DataFrame(dataset)
vals = df[target].values
if ((target == 'shear modulus') or (target == 'bulk modulus')):
val_list = [vals[i].item() for i in range(len(vals))]
vals = val_list
output_dir = (('./saved_data/' + tmp_name) + 'test_graph_angle.pkl')
print('data range', np.max(vals), np.min(vals))
print(output_dir)
if now:
if (not os.path.exists(output_dir)):
graphs = load_pyg_graphs(df, name=name, neighbor_strategy=neighbor_strategy, use_canonize=use_canonize, cutoff=cutoff, max_neighbors=max_neighbors, use_lattice=use_lattice, use_angle=use_angle)
with open(output_dir, 'wb') as pf:
pk.dump(graphs, pf)
print('save graphs to ', output_dir)
else:
print('loading graphs from ', output_dir)
with open(output_dir, 'rb') as pf:
graphs = pk.load(pf)
else:
print('graphs not saved')
graphs = load_pyg_graphs(df, name=name, neighbor_strategy=neighbor_strategy, use_canonize=use_canonize, cutoff=cutoff, max_neighbors=max_neighbors, use_lattice=use_lattice, use_angle=use_angle)
if (mean_train == None):
mean_train = np.mean(vals)
std_train = np.std(vals)
data = PygStructureDataset(df, graphs, target=target, atom_features=atom_features, line_graph=line_graph, id_tag=id_tag, classification=classification, neighbor_strategy=neighbor_strategy, mean_train=mean_train, std_train=std_train)
else:
data = PygStructureDataset(df, graphs, target=target, atom_features=atom_features, line_graph=line_graph, id_tag=id_tag, classification=classification, neighbor_strategy=neighbor_strategy, mean_train=mean_train, std_train=std_train)
return (data, mean_train, std_train) |
class CubicQuad():
mul = 8
def __call__(t: Tensor) -> Tensor:
y_sup = (0.5 * (t ** 2))
y_inf = (((1 / 6) * ((t + 0.5).clamp(min=0) ** 3)) - (1 / 24))
return torch.where((t >= 0.5), y_sup, y_inf)
def tilde(: Tensor) -> Tensor:
y_sup =
y_inf = (torch.sqrt((2 * )) - 0.5)
return torch.where(( >= 0.5), y_sup, y_inf) |
def pretty_print(res):
import numpy as np
pres = dict()
for (k, v) in res.items():
if tf.is_tensor(v):
pres[k] = f'{v.shape}, {v.dtype}, {np.average(v.numpy())}'
else:
pres[k] = v
print(pres)
return pres |
def get_entity_spans_finalize(input_sentences, output_sentences, redirections=None):
return_outputs = []
for (input_, output_) in zip(input_sentences, output_sentences):
input_ = (input_.replace('\xa0', ' ') + ' -')
output_ = (output_.replace('\xa0', ' ') + ' -')
entities = []
status = 'o'
i = 0
j = 0
while ((j < len(output_)) and (i < len(input_))):
if (status == 'o'):
if ((input_[i] == output_[j]) or ((output_[j] in '()') and (input_[i] in '[]{}'))):
i += 1
j += 1
elif (output_[j] == ' '):
j += 1
elif (input_[i] == ' '):
i += 1
elif (output_[j] == '{'):
entities.append([i, 0, ''])
j += 1
status = 'm'
else:
raise RuntimeError
elif (status == 'm'):
if (input_[i] == output_[j]):
i += 1
j += 1
entities[(- 1)][1] += 1
elif (output_[j] == ' '):
j += 1
elif (input_[i] == ' '):
i += 1
elif (output_[j] == '}'):
j += 1
status = 'e'
else:
raise RuntimeError
elif (status == 'e'):
if (output_[j] == '['):
j += 1
elif (output_[j] != ']'):
entities[(- 1)][2] += output_[j]
j += 1
elif (output_[j] == ']'):
entities[(- 1)][2] = entities[(- 1)][2].replace(' ', '_')
if (len(entities[(- 1)][2]) <= 1):
del entities[(- 1)]
elif (entities[(- 1)][2] == 'NIL'):
del entities[(- 1)]
elif ((redirections is not None) and (entities[(- 1)][2] in redirections)):
entities[(- 1)][2] = redirections[entities[(- 1)][2]]
if (len(entities) > 0):
entities[(- 1)] = tuple(entities[(- 1)])
status = 'o'
j += 1
else:
raise RuntimeError
return_outputs.append(entities)
return return_outputs |
def get_vgg(cut_idx=(- 1), vgg_type='pytorch'):
f = get_vanilla_vgg_features(cut_idx, vgg_type)
keys = [x for x in cnn._modules.keys()]
max_idx = max((keys.index(x) for x in opt_content['layers'].split(',')))
for k in keys[(max_idx + 1):]:
cnn._modules.pop(k)
return f |
class LinearMapper(object):
def __init__(self, in_bounds, out_bounds):
(self.in_min, in_max) = in_bounds
(self.out_min, out_max) = out_bounds
self.in_range = (in_max - self.in_min)
self.out_range = (out_max - self.out_min)
def convert(self, value):
return ((((value - self.in_min) * self.out_range) / self.in_range) + self.out_min)
def scale(self, value):
return ((value * self.out_range) / self.in_range) |
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
if average:
return self.average_time
else:
return self.diff
def clear(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0 |
class resnet_base(nn.Module):
def __init__(self):
super(resnet_base, self).__init__()
self.base = models.resnet101(pretrained=True)
def forward(self, x):
for (name, module) in self.base._modules.items():
if (name == 'avgpool'):
break
x = module(x)
out = x
return torch.mean(out.view((- 1), out.size(1), (out.size(2) * out.size(3))), 2) |
class ProcessPool(object):
def __init__(self, num_processes: int=None, interval_sec: int=0):
self.num_processes = (num_processes if (num_processes is not None) else os.cpu_count())
self.interval_sec = interval_sec
def map(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, target, dynamic_param_list: List[tuple], static_param: tuple=None, **kwargs):
static_param = (static_param if static_param else tuple())
if ((dynamic_param_list is None) or (len(dynamic_param_list) == 0)):
return None
if (self.num_processes > 0):
res_list = self._run_async_pool(target, dynamic_param_list=dynamic_param_list, static_param=static_param, chunksize=kwargs.get('chunksize'))
else:
res_list = self._run_sync(target, dynamic_param_list=dynamic_param_list, static_param=static_param)
return res_list
def _run_sync(self, target, dynamic_param_list: List[tuple], static_param: tuple):
result_list = list()
for dp in dynamic_param_list:
out = target(*static_param, *dp)
if (out is not None):
result_list.append(out)
return result_list
def _run_async_pool(self, target, dynamic_param_list: List[tuple], static_param: tuple, chunksize: int):
import multiprocessing as mp
import dill
target = dill.dumps(target)
param_list = [(target, *static_param, *dp) for dp in dynamic_param_list]
if (chunksize is None):
chunksize = max((len(param_list) // self.num_processes), 1)
with mp.Pool(self.num_processes) as pool:
out_list = pool.starmap(func=dill_serialized_execute_func, iterable=param_list, chunksize=chunksize)
res_list = []
for res in out_list:
if (res is not None):
res_list.append(res)
return res_list
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass |
def aspect_ratio_abs(im, aspect_ratio):
(im_h, im_w) = im.shape[:2]
im_area = (im_h * im_w)
im_ar_w = np.sqrt((im_area * aspect_ratio))
im_ar_h = np.sqrt((im_area / aspect_ratio))
assert np.isclose((im_ar_w / im_ar_h), aspect_ratio)
im_ar = cv2.resize(im, dsize=(int(im_ar_w), int(im_ar_h)))
return im_ar |
('cnn_lnlstm')
def cnn_lnlstm(nlstm=128, **conv_kwargs):
return cnn_lstm(nlstm, layer_norm=True, **conv_kwargs) |
.register('R-50-LPF')
def build_resnet_50_antialiased_backbone(cfg):
filter_size = 3
model = resnet_lpf.resnet50(cfg, filter_size=filter_size)
model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
return model |
class BrainDataset(data.Dataset):
def __init__(self, df, transform=None):
self.df = df
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = cv2.imread(self.df.iloc[(idx, 0)])
image = (np.array(image) / 255.0)
mask = cv2.imread(self.df.iloc[(idx, 1)], 0)
mask = (np.array(mask) / 255.0)
if (self.transform is not None):
aug = self.transform(image=image, mask=mask)
image = aug['image']
mask = aug['mask']
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image).type(torch.float32)
image = tt.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(image)
mask = np.expand_dims(mask, axis=(- 1)).transpose((2, 0, 1))
mask = torch.from_numpy(mask).type(torch.float32)
return (image, mask) |
def iters_schedule_grid_search(model, config, n_iter=6, betas_range=(1e-06, 0.01), test_batch_size=2, step=1, path_to_store_schedule=None, save_stats_for_grid=True, verbose=True, n_jobs=1):
device = next(model.parameters()).device
if ('cpu' in str(device)):
show_message('WARNING: running grid search on CPU will be slow.')
show_message('Initializing betas grid...', verbose=verbose)
grid = generate_betas_grid(n_iter, betas_range, verbose=verbose)[::step]
show_message('Initializing utils...', verbose=verbose)
mel_fn = MelSpectrogramFixed(sample_rate=config.data_config.sample_rate, n_fft=config.data_config.n_fft, win_length=config.data_config.win_length, hop_length=config.data_config.hop_length, f_min=config.data_config.f_min, f_max=config.data_config.f_max, n_mels=config.data_config.n_mels, window_fn=torch.hann_window).to(device)
dataset = AudioDataset(config, training=True)
idx = np.random.choice(range(len(dataset)), size=test_batch_size, replace=False)
test_batch = torch.stack([dataset[i] for i in idx]).to(device)
test_mels = mel_fn(test_batch)
show_message('Starting search...', verbose=verbose)
with ThreadPool(processes=n_jobs) as pool:
process_fn = partial(_betas_estimate, model=model, mels=test_mels, mel_fn=mel_fn)
stats = list(tqdm(pool.imap(process_fn, grid), total=len(grid)))
stats = {i: (grid[i], stats[i]) for i in range(len(stats))}
if save_stats_for_grid:
tmp_stats_path = f'{os.path.dirname(path_to_store_schedule)}/{n_iter}stats.pt'
show_message(f'Saving tmp stats for whole grid to `{tmp_stats_path}`...', verbose=verbose)
torch.save(stats, tmp_stats_path)
best_idx = np.argmin(list([value for (_, value) in stats.values()]))
best_betas = grid[best_idx]
if (not isinstance(path_to_store_schedule, type(None))):
show_message(f'Saving best schedule to `{path_to_store_schedule}`...', verbose=verbose)
torch.save(best_betas, path_to_store_schedule)
return (best_betas, stats) |
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='PyTorch Embedding Learning')
parser.add_argument('--dataset-dir', default='/tmp/fmnist/', help='FashionMNIST dataset directory path')
parser.add_argument('-p', '--labels-per-batch', default=8, type=int, help='Number of unique labels/classes per batch')
parser.add_argument('-k', '--samples-per-label', default=8, type=int, help='Number of samples per label in a batch')
parser.add_argument('--eval-batch-size', default=512, type=int)
parser.add_argument('--epochs', default=10, type=int, metavar='N', help='Number of training epochs to run')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='Number of data loading workers')
parser.add_argument('--lr', default=0.0001, type=float, help='Learning rate')
parser.add_argument('--margin', default=0.2, type=float, help='Triplet loss margin')
parser.add_argument('--print-freq', default=20, type=int, help='Print frequency')
parser.add_argument('--save-dir', default='.', help='Model save directory')
parser.add_argument('--resume', default='', help='Resume from checkpoint')
return parser.parse_args() |
def sepreresnet56_cifar100(num_classes=100, **kwargs):
return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name='sepreresnet56_cifar100', **kwargs) |
class AbsCriterion(Criterion):
def __init__(self, size_average=True, bigdl_type='float'):
super(AbsCriterion, self).__init__(None, bigdl_type, size_average) |
class TrackEpochCallback(LearnerCallback):
_order = (- 20)
def __init__(self, learn: Learner, name: str='epoch', epoch_offset: int=None):
super().__init__(learn)
learn._test_writeable_path()
self.path = ((learn.path / learn.model_dir) / name)
if (epoch_offset is None):
if os.path.isfile(self.path):
with self.path.open('r') as f:
try:
self.start_epoch = (int(f.read()) + 1)
except:
self.start_epoch = 0
else:
self.start_epoch = 0
def on_train_begin(self, **kwargs: Any):
return {'epoch': self.start_epoch}
def on_epoch_end(self, epoch, **kwargs: Any) -> None:
with self.path.open('w') as f:
f.write(f'{epoch}')
def restart(self):
os.remove(self.path) |
class autoencoder_vgg5(nn.Module):
def __init__(self):
super(autoencoder_vgg5, self).__init__()
self.encoder = models.vgg19(pretrained=True).features
self.decoder = nn.Sequential(nn.Conv2d(512, 512, 3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(512, 512, 3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(512, 256, 3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(256, 128, 3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(128, 64, 3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(64, 3, 3, stride=1, padding=1), nn.Sigmoid())
def forward(self, x):
feat = []
feat_out = []
for i in range(len(self.encoder)):
x = self.encoder[i](x)
if (i == 3):
feat.append(x)
elif (i == 8):
feat.append(x)
elif (i == 17):
feat.append(x)
elif (i == 26):
feat.append(x)
elif (i == 35):
feat.append(x)
for i in range(len(self.decoder)):
x = self.decoder[i](x)
if (i == 1):
(_, _, h, w) = feat[4].shape
x = nn.UpsamplingBilinear2d(size=(h, w))(x)
x = (x + feat[4])
elif (i == 3):
(_, _, h, w) = feat[3].shape
x = nn.UpsamplingBilinear2d(size=(h, w))(x)
x = (x + feat[3])
elif (i == 5):
(_, _, h, w) = feat[2].shape
x = nn.UpsamplingBilinear2d(size=(h, w))(x)
x = (x + feat[2])
feat_out.append(x)
elif (i == 7):
(_, _, h, w) = feat[1].shape
x = nn.UpsamplingBilinear2d(size=(h, w))(x)
x = (x + feat[1])
feat_out.append(x)
elif (i == 9):
(_, _, h, w) = feat[0].shape
x = nn.UpsamplingBilinear2d(size=(h, w))(x)
x = (x + feat[0])
feat_out.append(x)
return (feat_out, x) |
def main(_):
seed = 8964
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
print(' Arguments ')
for key in FLAGS.__flags.keys():
print('{}: {}'.format(key, getattr(FLAGS, key)))
print(' Arguments ')
if (not os.path.exists(FLAGS.checkpoint_dir)):
os.makedirs(FLAGS.checkpoint_dir)
path_arg_log = os.path.join(FLAGS.checkpoint_dir, 'flag.txt')
with open(path_arg_log, 'w') as f:
for key in FLAGS.__flags.keys():
v = '{} : {}'.format(key, getattr(FLAGS, key))
f.write(v)
f.write('\n')
system = MGC_TRAIN(FLAGS)
system.train(FLAGS) |
def count_parameters(model):
table = PrettyTable(['Modules', 'Parameters'])
total_params = 0
for (name, parameter) in model.named_parameters():
if (not parameter.requires_grad):
continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
print(table)
print(f'Total Trainable Params: {total_params}')
return total_params |
def save_config(config, logdir=None):
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.FastGFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = 'Start a new run without storing summaries and checkpoints since no logging directory was specified.'
tf.logging.info(message)
return config |
def add_vae_arguments(parser):
for f in dataclasses.fields(Hyperparams):
kwargs = (dict(action='store_true') if ((f.type is bool) and (not f.default)) else dict(default=f.default, type=f.type))
parser.add_argument(f'--{f.name}', **kwargs, **f.metadata)
return parser |
def validate(args, model, criterion, device, val_dataloader, writer, epoch):
torch.set_grad_enabled(False)
model.eval()
total_loss = 0.0
correct = 0
for (i, data) in enumerate(val_dataloader):
(clips, idxs) = data
inputs = clips.to(device)
targets = idxs.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
total_loss += loss.item()
pts = torch.argmax(outputs, dim=1)
print(targets)
correct += torch.sum((targets == pts)).item()
avg_loss = (total_loss / len(val_dataloader))
avg_acc = (correct / len(val_dataloader.dataset))
writer.add_scalar('val/CrossEntropyLoss', avg_loss, epoch)
writer.add_scalar('val/Accuracy', avg_acc, epoch)
print('[VAL] loss: {:.3f}, acc: {:.3f}'.format(avg_loss, avg_acc))
return avg_loss |
def read_csv(path):
with open(path) as f1:
data = f1.readlines()[1:]
data = [line.split(', ') for line in data]
return data |
def IPOT_distance_torch_batch_uniform_T(C, bs, n, m, iteration=50):
C = C.float().cuda()
T = IPOT_torch_batch_uniform(C, bs, n, m, iteration=iteration)
return T |
.dataclass
class FlaxCausalLMOutputWithCrossAttentions(ModelOutput):
logits: jnp.ndarray = None
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None
attentions: Optional[Tuple[jnp.ndarray]] = None
cross_attentions: Optional[Tuple[jnp.ndarray]] = None |
def double_newton_at_series(pols, lser, idx=1, maxdeg=4, nbr=4, checkin=True, vrblvl=0):
nbsym = number_of_symbols(pols)
if (vrblvl > 0):
print('the polynomials :')
for pol in pols:
print(pol)
print('Number of variables :', nbsym)
if checkin:
if (not checkin_newton_at_series(nbsym, lser, idx)):
return lser
set_double_system(1, lser)
initialize_double_syspool(1, vrblvl)
copy_to_double_syspool(1, vrblvl)
set_double_system(nbsym, pols)
phc = get_phcfun()
apars = int4a2nbr([idx, maxdeg, nbr], (vrblvl > 0))
bbb = pointer(c_int32(vrblvl))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> double_newton_at_series calls phc ...')
print('apars =', nbr2int4a(apars))
retval = phc(694, apars, bbb, ccc, vrb)
fail = (retval > 0)
size = ((- 1) if fail else size_double_syspool(vrblvl))
if (vrblvl > 0):
if (size == (- 1)):
print("An error occurred in the execution of Newton's method.")
else:
print('Computed one series solution.')
copy_from_double_syspool(1, vrblvl)
result = get_double_system()
result = substitute_symbol(result, idx)
clear_double_syspool(vrblvl)
return result |
class Hamburger(nn.Module):
def __init__(self, ham_channels=512, ham_kwargs=dict(), norm_cfg=None):
super().__init__()
self.ham_in = ConvModule(ham_channels, ham_channels, 1, norm_cfg=None, act_cfg=None)
self.ham = NMF2D(ham_kwargs)
self.ham_out = ConvModule(ham_channels, ham_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
def forward(self, x):
enjoy = self.ham_in(x)
enjoy = F.relu(enjoy, inplace=True)
enjoy = self.ham(enjoy)
enjoy = self.ham_out(enjoy)
ham = F.relu((x + enjoy), inplace=True)
return ham |
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError(('fpath has unknown extension: %s' % fpath))
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm.tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member) |
def try_once(func):
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.info('ignore error \n{}'.format(str(e)))
print_trace()
return func_wrapper |
class ImageFolder(data.Dataset):
def __init__(self, imgDir, dataTransform, imgSize, isTrain):
self.imgDir = imgDir
sample3 = os.path.join(self.imgDir, '1_3.jpg')
self.cycle = (3 if os.path.exists(sample3) else 2)
self.nbImg = (len(os.listdir(self.imgDir)) // self.cycle)
self.isTrain = isTrain
self.dataTransform = dataTransform
self.imgSize = imgSize
def __getitem__(self, index):
idx = np.random.choice(range(1, (self.cycle + 1)), 2, replace=False)
path1 = os.path.join(self.imgDir, '{:d}_{:d}.jpg'.format(index, idx[0]))
path2 = os.path.join(self.imgDir, '{:d}_{:d}.jpg'.format(index, idx[1]))
(I1, I2) = (LoadImg(path1), LoadImg(path2))
if self.isTrain:
(I1, I2) = self.dataTransform(I1, I2, self.imgSize)
else:
(I1, I2) = (self.dataTransform(I1), self.dataTransform(I2))
return {'I1': I1, 'I2': I2}
def __len__(self):
return self.nbImg |
class RMBitbrain(RM):
def __init__(self, size_list, read_list, write_list):
super().__init__()
self.size_list = size_list
self.read_list = read_list
self.write_list = write_list
def ram(self):
size_list_count = ((self.container.env.interval - self.container.startAt) % len(self.size_list))
read_list_count = ((self.container.env.interval - self.container.startAt) % len(self.read_list))
write_list_count = ((self.container.env.interval - self.container.startAt) % len(self.write_list))
return (self.size_list[size_list_count], self.read_list[read_list_count], self.write_list[write_list_count]) |
def count_accuracy(B_bin_true, B_bin_est, check_input=False):
if check_input:
if (B_bin_est == (- 1)).any():
if (not (((B_bin_est == 0) | (B_bin_est == 1)) | (B_bin_est == (- 1))).all()):
raise ValueError('B_bin_est should take value in {0, 1, -1}.')
if ((B_bin_est == (- 1)) & (B_bin_est.T == (- 1))).any():
raise ValueError('Undirected edge should only appear once.')
else:
if (not ((B_bin_est == 0) | (B_bin_est == 1)).all()):
raise ValueError('B_bin_est should take value in {0, 1}.')
if (not is_dag(B_bin_est)):
raise ValueError('B_bin_est should be a DAG.')
d = B_bin_true.shape[0]
pred_und = np.flatnonzero((B_bin_est == (- 1)))
pred = np.flatnonzero((B_bin_est == 1))
cond = np.flatnonzero(B_bin_true)
cond_reversed = np.flatnonzero(B_bin_true.T)
cond_skeleton = np.concatenate([cond, cond_reversed])
true_pos = np.intersect1d(pred, cond, assume_unique=True)
true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True)
true_pos = np.concatenate([true_pos, true_pos_und])
false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)
false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True)
false_pos = np.concatenate([false_pos, false_pos_und])
extra = np.setdiff1d(pred, cond, assume_unique=True)
reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)
pred_size = (len(pred) + len(pred_und))
cond_neg_size = (((0.5 * d) * (d - 1)) - len(cond))
fdr = (float((len(reverse) + len(false_pos))) / max(pred_size, 1))
tpr = (float(len(true_pos)) / max(len(cond), 1))
fpr = (float((len(reverse) + len(false_pos))) / max(cond_neg_size, 1))
pred_lower = np.flatnonzero(np.tril((B_bin_est + B_bin_est.T)))
cond_lower = np.flatnonzero(np.tril((B_bin_true + B_bin_true.T)))
extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)
missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)
shd = ((len(extra_lower) + len(missing_lower)) + len(reverse))
return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'shd': shd, 'pred_size': pred_size} |
def _get_weight_shape(w):
with misc.suppress_tracer_warnings():
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape |
def get_segment_waveform(path_or_fp, offset, n_frames, normalization=True):
if isinstance(path_or_fp, str):
ext = os.path.splitext(os.path.basename(path_or_fp))[1]
if (ext not in {'.flac', '.wav'}):
raise ValueError(f'Unsupported audio format: {ext}')
(waveform, sample_rate) = torchaudio.load(path_or_fp, frame_offset=offset, num_frames=n_frames)
if (not normalization):
waveform *= (2 ** 15)
return (waveform, sample_rate) |
def expect_token(expected_item, seen_item, what_parsing):
if (seen_item != expected_item):
raise RuntimeError("parsing {0}, expected '{1}' but got '{2}'".format(what_parsing, expected_item, seen_item)) |
def plot_7_day_prediction_errors(metric, all_errors, all_dates):
plt.figure(figsize=(4, 3), dpi=200)
ax = plt.subplot(111)
for method in ['linear', 'advanced_shared_model', 'ensemble']:
if (method != 'ensemble'):
ax.plot(all_errors[method][7][::(- 1)], label=label_name[method], color=color_name[method], linestyle=ls_name[method], linewidth=1.5, alpha=0.8)
else:
ax.plot(all_errors[method][7][::(- 1)], label=label_name[method], color=color_name[method], linestyle=ls_name[method], linewidth=1.5, alpha=1)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if (metric == 'mae'):
plt.ylabel('Raw scale MAE', fontsize=15)
plt.yticks([20, 40, 60], fontsize=12)
elif (metric == 'mape'):
plt.ylabel('MAPE', fontsize=15)
plt.yticks([50, 100, 150], fontsize=12)
elif (metric == 'sqrt'):
plt.ylabel('Square root scale MAE', fontsize=15)
plt.yticks([1, 2, 3], fontsize=12)
if (metric == 'mape'):
plt.legend(fontsize=12)
plt.xticks(fontsize=12)
plt.xticks(range(0, error_num_days, 14), all_dates[::(- 1)][range(0, error_num_days, 14)])
plt.xlabel('Date', fontsize=15)
plt.tight_layout()
filename = os.path.join(result_dir, f'over_time_{metric}_jun21.pdf')
plt.savefig(filename) |
class FSMTForConditionalGeneration():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def vgg(x, is_training, config, num_filters=32):
print(('Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
bn_input = tf.compat.v1.layers.batch_normalization(input_layer, training=is_training)
conv1 = tf.compat.v1.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training)
pool1 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv1, pool_size=[4, 1], strides=[2, 2])
print(('pool1: ' + str(pool1.get_shape)))
do_pool1 = tf.compat.v1.layers.dropout(pool1, rate=0.25, training=is_training)
conv2 = tf.compat.v1.layers.conv2d(inputs=do_pool1, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training)
pool2 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2])
print(('pool2: ' + str(pool2.get_shape)))
do_pool2 = tf.compat.v1.layers.dropout(pool2, rate=0.25, training=is_training)
conv3 = tf.compat.v1.layers.conv2d(inputs=do_pool2, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training)
pool3 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2])
print(('pool3: ' + str(pool3.get_shape)))
do_pool3 = tf.layers.dropout(pool3, rate=0.25, training=is_training)
conv4 = tf.layers.conv2d(inputs=do_pool3, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
bn_conv4 = tf.compat.v1.layers.batch_normalization(conv4, training=is_training)
pool4 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2])
print(('pool4: ' + str(pool4.get_shape)))
do_pool4 = tf.compat.v1.layers.dropout(pool4, rate=0.25, training=is_training)
conv5 = tf.compat.v1.layers.conv2d(inputs=do_pool4, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
bn_conv5 = tf.compat.v1.layers.batch_normalization(conv5, training=is_training)
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[4, 4], strides=[4, 4])
print(('pool5: ' + str(pool5.get_shape)))
flat_pool5 = tf.contrib.layers.flatten(pool5)
do_pool5 = tf.compat.v1.layers.dropout(flat_pool5, rate=0.5, training=is_training)
output = tf.compat.v1.layers.dense(inputs=do_pool5, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
print(('output: ' + str(output.get_shape)))
return output |
def test_classifier(P, model, loader, criterion, steps, logger=None):
metric_logger = MetricLogger(delimiter=' ')
if (logger is None):
log_ = print
else:
log_ = logger.log
mode = model.training
model.eval()
acc = 0.0
for (n, batch) in enumerate(loader):
if ((n * P.test_batch_size) > P.max_test_task):
break
(train_inputs, train_targets) = batch['train']
train_inputs = train_inputs.to(device, non_blocking=True)
train_targets = train_targets.to(device, non_blocking=True)
(test_inputs, test_targets) = batch['test']
test_inputs = test_inputs.to(device, non_blocking=True)
test_targets = test_targets.to(device, non_blocking=True)
for (task_idx, (train_input, train_target, test_input, test_target)) in enumerate(zip(train_inputs, train_targets, test_inputs, test_targets)):
(params, loss_train) = maml_inner_adapt(model, criterion, train_input, train_target, P.inner_lr, P.inner_steps_test, first_order=True)
with torch.no_grad():
outputs_test = model(test_input, params=params)
loss = criterion(outputs_test, test_target)
if (not P.regression):
acc = accuracy(outputs_test, test_target, topk=(1,))[0].item()
elif (P.dataset == 'shapenet'):
acc = (- degree_loss(outputs_test, test_target).item())
elif (P.dataset == 'pose'):
acc = (- loss.item())
else:
raise NotImplementedError()
metric_logger.meters['loss_train'].update(loss_train.item())
metric_logger.meters['loss'].update(loss.item())
metric_logger.meters['acc'].update(acc)
metric_logger.synchronize_between_processes()
log_((' * [ %.3f] [LossOut %.3f] [LossIn %.3f]' % (metric_logger.acc.global_avg, metric_logger.loss.global_avg, metric_logger.loss_train.global_avg)))
if (logger is not None):
logger.scalar_summary('eval/acc', metric_logger.acc.global_avg, steps)
logger.scalar_summary('eval/loss_test', metric_logger.loss.global_avg, steps)
logger.scalar_summary('eval/loss_train', metric_logger.loss_train.global_avg, steps)
model.train(mode)
return metric_logger.acc.global_avg |
class Actor(nn.Module):
def __init__(self, in_dim, out_dim, hidden_size, layers, activation=nn.ReLU):
super().__init__()
self.feedforward_model = build_model(in_dim, out_dim, layers, hidden_size, activation)
def forward(self, state_features):
x = self.feedforward_model(state_features)
action_dist = OneHotCategorical(logits=x)
action = action_dist.sample()
return (action, x) |
def num_lines(filename):
try:
p = subprocess.check_output(['wc', '-l', filename])
return int(p.decode().strip().split()[0])
except subprocess.CalledProcessError as cpe:
quit(cpe.returncode) |
def test_link_func_mlogit():
predictions = np.array([[0.25, 0.625, 0.125], [0.5, 0.25, 1.25], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [1.0, np.nan, 0.0]])
expected = np.array([[(- 0.9162907), 0.0, (- 1.6094379)], [(- 0.9162907), (- 1.6094379), 0.0], [(- np.inf), (- np.inf), 0], [0, (- np.inf), (- np.inf)], [np.nan, np.nan, np.nan]])
result = link_func(predictions, 'mlogit')
np.testing.assert_almost_equal(result, expected) |
def pitching_stats_bref(season: Optional[int]=None) -> pd.DataFrame:
if (season is None):
season = most_recent_season()
str_season = str(season)
start_dt = (str_season + '-03-01')
end_dt = (str_season + '-11-30')
return pitching_stats_range(start_dt, end_dt) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.