code stringlengths 101 5.91M |
|---|
class CNNFashion_Mnist(nn.Module):
def __init__(self, args):
super(CNNFashion_Mnist, self).__init__()
self.layer1 = nn.Sequential(nn.Conv2d(1, 16, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(2))
self.layer2 = nn.Sequential(nn.Conv2d(16, 32, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(2))
self.fc = nn.Linear(((7 * 7) * 32), 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), (- 1))
out = self.fc(out)
return F.log_softmax(out, dim=1) |
class RandomIdentitySampler_alignedreid(Sampler):
def __init__(self, data_source, num_instances):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for (index, (_, pid, _)) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_identities = len(self.pids)
def __iter__(self):
indices = torch.randperm(self.num_identities)
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
replace = (False if (len(t) >= self.num_instances) else True)
t = np.random.choice(t, size=self.num_instances, replace=replace)
ret.extend(t)
return iter(ret)
def __len__(self):
return (self.num_identities * self.num_instances) |
def main():
print('DeJPEG generator')
jpeg_levels = [int(x) for x in sys.argv[kJPEG_LEVELS].split(',')]
input_fld = sys.argv[kINPUT_FLD]
all_proc = []
for jpeg_quality in jpeg_levels:
all_proc.append(Process(target=genDEJPEG, args=(jpeg_quality, input_fld)))
all_proc[(- 1)].start()
for p in all_proc:
p.join()
print('Done') |
def zero_module(module):
for p in module.parameters():
p.detach().zero_()
return module |
def massivesumm_extract_from_url(urls):
archive = run(urls)
dataset = extract(archive)
return dataset |
class DebugOption(ExplicitEnum):
UNDERFLOW_OVERFLOW = 'underflow_overflow'
TPU_METRICS_DEBUG = 'tpu_metrics_debug' |
def load_lvis_json(json_file, image_root, dataset_name=None):
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
if (dataset_name is not None):
meta = get_lvis_instances_meta(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta)
img_ids = sorted(list(lvis_api.imgs.keys()))
imgs = lvis_api.load_imgs(img_ids)
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info('Loaded {} images in the LVIS format from {}'.format(len(imgs_anns), json_file))
dataset_dicts = []
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
file_name = img_dict['file_name']
if img_dict['file_name'].startswith('COCO'):
file_name = file_name[(- 16):]
record['file_name'] = os.path.join(image_root, file_name)
record['height'] = img_dict['height']
record['width'] = img_dict['width']
record['not_exhaustive_category_ids'] = img_dict.get('not_exhaustive_category_ids', [])
record['neg_category_ids'] = img_dict.get('neg_category_ids', [])
image_id = record['image_id'] = img_dict['id']
objs = []
for anno in anno_dict_list:
assert (anno['image_id'] == image_id)
obj = {'bbox': anno['bbox'], 'bbox_mode': BoxMode.XYWH_ABS}
obj['category_id'] = (anno['category_id'] - 1)
segm = anno['segmentation']
valid_segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
assert (len(segm) == len(valid_segm)), 'Annotation contains an invalid polygon with < 3 points'
assert (len(segm) > 0)
obj['segmentation'] = segm
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
return dataset_dicts |
def get_elemental_ref_entries(entries: Sequence[EntryLike], verbose: bool=True) -> dict[(str, Entry)]:
entries = [(PDEntry.from_dict(e) if isinstance(e, dict) else e) for e in entries]
elements = {elems for entry in entries for elems in entry.composition.elements}
dim = len(elements)
if verbose:
print(f'Sorting {len(entries)} entries with {dim} dimensions...', flush=True)
entries = sorted(entries, key=(lambda e: e.composition.reduced_composition))
elemental_ref_entries = {}
for (composition, entry_group) in tqdm(itertools.groupby(entries, key=(lambda e: e.composition.reduced_composition)), disable=(not verbose), desc='Finding elemental reference entries'):
min_entry = min(entry_group, key=(lambda e: e.energy_per_atom))
if composition.is_element:
elem_symb = str(composition.elements[0])
elemental_ref_entries[elem_symb] = min_entry
if (len(elemental_ref_entries) > dim):
missing = (elements - set(elemental_ref_entries))
raise ValueError(f'Some terminal entries are missing = {missing!r}')
if (len(elemental_ref_entries) < dim):
extra = (set(elemental_ref_entries) - set(elements))
raise ValueError(f'There are more terminal element entries than dimensions: {extra}')
return elemental_ref_entries |
def get_dataloader_tiny(datadir, train_bs, test_bs, dataidxs=None, test_idxs=None, cache_train_data_set=None, cache_test_data_set=None, logger=None):
(transform_train, transform_test) = _data_transforms_tiny()
dataidxs = np.array(dataidxs)
logger.info('train_num{} test_num{}'.format(len(dataidxs), len(test_idxs)))
train_ds = tiny_truncated(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True, cache_data_set=cache_train_data_set)
test_ds = tiny_truncated(datadir, dataidxs=test_idxs, train=False, transform=transform_test, download=True, cache_data_set=cache_test_data_set)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=True, drop_last=False)
return (train_dl, test_dl) |
def load_conf(conf_path):
with open(conf_path, 'r', encoding='utf-8') as file:
file_data = file.read()
all_data = yaml.safe_load(file_data)
return all_data |
class sensor():
def __init__(self):
self.update_cluster = True
self.n_reset = (- 1)
self.obstacles_dyn = {}
self.obstacles_static = {}
self.cluster = Clusters()
self.obst_topics_dyn = []
self.obst_topics_static = []
self.pub_obst_odom = rospy.Publisher('/obst_odom', Clusters, queue_size=1)
self.pub_timer = rospy.Timer(rospy.Duration(0.1), self.pub_odom)
self.sub_reset = rospy.Subscriber('/scenario_reset', Int16, self.cb_reset)
self.cb_reset(0)
def cb_reset(self, msg):
self.n_reset += 1
self.obst_topics_dyn = []
self.obst_topics_static = []
self.get_obstacle_topics()
def update_obstacle_odom(self):
for topic in self.obst_topics_dyn:
rospy.Subscriber(topic, MarkerArray, self.cb_marker, topic)
for topic in self.obst_topics_static:
rospy.Subscriber(topic, MarkerArray, self.cb_marker, topic)
rospy.Subscriber('/pedsim_simulator/simulated_agents', peds.AgentStates, self.cb_marker)
def get_obstacle_topics(self):
topics = rospy.get_published_topics()
for t_list in topics:
for t in t_list:
if ('/debug/model/obstacle_dynamic' in t):
self.obst_topics_dyn.append(t)
elif ('/debug/model/obstacle_circle' in t):
self.obst_topics_static.append(t)
self.update_obstacle_odom()
print(((' ' + 'reset:') + str(self.n_reset)))
print('dynamic obstacles:', len(self.obst_topics_dyn))
def pub_odom(self, event):
self.update_cluster = False
self.fill_cluster()
self.pub_obst_odom.publish(self.cluster)
self.cluster = Clusters()
self.update_cluster = True
def fill_cluster(self):
for topic in self.obstacles_dyn:
if (topic in self.obstacles_dyn):
tmp_point = Point()
tmp_point.x = self.obstacles_dyn[topic][0].x
tmp_point.y = self.obstacles_dyn[topic][0].y
tmp_point.z = self.obstacles_dyn[topic][1]
tmp_vel = self.obstacles_dyn[topic][2]
self.cluster.mean_points.append(tmp_point)
self.cluster.velocities.append(tmp_vel)
self.cluster.labels.append(self.obstacles_dyn[topic][4])
for topic in self.obst_topics_static:
if (topic in self.obstacles_static):
tmp_point = Point()
tmp_point.x = self.obstacles_static[topic][0].x
tmp_point.y = self.obstacles_static[topic][0].y
tmp_point.z = self.obstacles_static[topic][1]
tmp_vel = self.obstacles_static[topic][2]
self.cluster.mean_points.append(tmp_point)
self.cluster.velocities.append(tmp_vel)
self.cluster.labels.append(self.obstacles_static[topic][3])
def cb_marker(self, msg, topic=None):
if self.update_cluster:
if (type(msg) == MarkerArray):
v = Vector3()
m = msg.markers[0]
pos = m.pose.position
r = (m.scale.x / 2)
label = 0
if ('dynamic' in topic):
if (topic in self.obstacles_dyn):
old_pos = self.obstacles_dyn[topic][0]
old_time = self.obstacles_dyn[topic][3].nsecs
curr_time = m.header.stamp.nsecs
dt = ((curr_time - old_time) * (10 ** (- 9)))
if (dt > 0):
v.x = round(((pos.x - old_pos.x) / dt), 3)
v.y = round(((pos.y - old_pos.y) / dt), 3)
label = topic.replace('/flatland_server/debug/model/obstacle_dynamic_with_traj_', '')
label = ((int(label) + len(self.obst_topics_static)) + 1)
self.obstacles_dyn[topic] = [pos, r, v, m.header.stamp, label]
else:
if (topic in self.obstacles_static):
label = topic.replace('/flatland_server/debug/model/obstacle_circle_static_', '')
label = int(label)
self.obstacles_static[topic] = [pos, r, v, label] |
class DotDict(dict):
def __init__(self, *a, **kw):
dict.__init__(self)
self.update(*a, **kw)
self.__dict__ = self
def __setattr__(self, key, value):
if (key in dict.__dict__):
raise AttributeError('This key is reserved for the dict methods.')
dict.__setattr__(self, key, value)
def __setitem__(self, key, value):
if (key in dict.__dict__):
raise AttributeError('This key is reserved for the dict methods.')
dict.__setitem__(self, key, value)
def update(self, *args, **kwargs):
for (k, v) in dict(*args, **kwargs).items():
self[k] = v
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
self.__dict__ = self |
def main():
dataset = Datasets('tensorflow')['dummy'](shape=(1, 224, 224, 3))
dataloader = DataLoader(framework='tensorflow', dataset=dataset)
config = PostTrainingQuantConfig()
q_model = fit(model='./mobilenet_v1_1.0_224_frozen.pb', conf=config, calib_dataloader=dataloader) |
class SimpleRewardShaper():
def __init__(self):
pass
def reset(self, env):
pass
def __call__(self, env, observations, action_dict, rewards, dones):
for handle in rewards.keys():
if (rewards[handle] == 1):
rewards[handle] = env.max_time_steps
return rewards |
def hrnet18(in_channels, num_classes):
model = HighResolutionNet(in_channels=in_channels, num_classes=num_classes, extra=extra_18)
init_weights(model, 'kaiming')
return model |
class ClientModel(Model):
def __init__(self, seed, lr, num_classes):
self.num_classes = num_classes
super(ClientModel, self).__init__(seed, lr)
def create_model(self):
features = tf.placeholder(tf.float32, shape=[None, (IMAGE_SIZE * IMAGE_SIZE)], name='features')
labels = tf.placeholder(tf.int64, shape=[None], name='labels')
input_layer = tf.reshape(features, [(- 1), IMAGE_SIZE, IMAGE_SIZE, 1])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.reshape(pool2, [(- 1), ((7 * 7) * 64)])
dense = tf.layers.dense(inputs=pool2_flat, units=2048, activation=tf.nn.relu)
logits = tf.layers.dense(inputs=dense, units=self.num_classes)
predictions = {'classes': tf.argmax(input=logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')}
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions['classes']))
self.trainable_variables = tf.trainable_variables()
USE_PARTIAL_TRAINING = 1
print('USE_PARTIAL_TRAINING', USE_PARTIAL_TRAINING)
if USE_PARTIAL_TRAINING:
self.trainable_variables = self.trainable_variables[(- 2):]
train_op = self.optimizer.minimize(loss=loss, global_step=tf.train.get_global_step(), var_list=self.trainable_variables)
return (features, labels, train_op, eval_metric_ops, loss)
def process_x(self, raw_x_batch):
return np.array(raw_x_batch)
def process_y(self, raw_y_batch):
return np.array(raw_y_batch) |
def get_api_defs(lib):
assert (lib in ['tf', 'torch'])
if (lib == 'tf'):
api_def_fn = 'data/api_def_tf.txt'
else:
api_def_fn = 'data/api_def_torch.txt'
return _get_api_defs(api_def_fn) |
_model
def halonet50ts(pretrained=False, **kwargs):
return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) |
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=0.001, momentum=0.01)
self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0])
self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU())
block = post_act_block
self.conv1 = spconv.SparseSequential(SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'), SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'))
self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'))
self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'))
self.conv4 = spconv.SparseSequential(block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'))
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU())
self.num_point_features = 128
def forward(self, batch_dict):
(voxel_features, voxel_coords) = (batch_dict['voxel_features'], batch_dict['voxel_coords'])
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(features=voxel_features, indices=voxel_coords.int(), spatial_shape=self.sparse_shape, batch_size=batch_size)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
batch_dict.update({'encoded_spconv_tensor': out, 'encoded_spconv_tensor_stride': 8})
batch_dict.update({'multi_scale_3d_features': {'x_conv1': x_conv1, 'x_conv2': x_conv2, 'x_conv3': x_conv3, 'x_conv4': x_conv4}, 'multi_scale_3d_strides': {'x_conv1': 1, 'x_conv2': 2, 'x_conv3': 4, 'x_conv4': 8}})
return batch_dict |
def natural_keys(text: str):
def atof(text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
return [atof(c) for c in re.split('[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text)] |
def add_decola_deta_config(cfg):
_C = cfg
_C.MODEL.DECOLA.DETA = CN()
_C.MODEL.DECOLA.DETA.USE_DETA = False
_C.MODEL.DECOLA.DETA.ASSIGN_FIRST_STAGE = True
_C.MODEL.DECOLA.DETA.ASSIGN_SECOND_STAGE = True |
def load_c_file(c_file_path):
try:
with open(c_file_path, encoding='utf-8') as rfile:
code_content = rfile.read()
return code_content
except UnicodeDecodeError:
with open(c_file_path, encoding='windows-1252') as rfile:
code_content = rfile.read()
return code_content |
def compile_fn(network, net_config, args):
base_lr = float(args.lr[0])
l2 = float(args.l2[0])
input_var = net_config['input'].input_var
mask_var = net_config['mask'].input_var
kspace_var = net_config['kspace_input'].input_var
target_var = T.tensor4('targets')
pred = lasagne.layers.get_output(network)
loss_sq = (lasagne.objectives.squared_error(target_var, pred).mean() * 2)
if l2:
l2_penalty = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
loss = (loss_sq + (l2_penalty * l2))
update_rule = lasagne.updates.adam
params = lasagne.layers.get_all_params(network, trainable=True)
updates = update_rule(loss, params, learning_rate=base_lr)
print(' Compiling ... ')
t_start = time.time()
train_fn = theano.function([input_var, mask_var, kspace_var, target_var], [loss], updates=updates, on_unused_input='ignore')
val_fn = theano.function([input_var, mask_var, kspace_var, target_var], [loss, pred], on_unused_input='ignore')
t_end = time.time()
print((' ... Done, took %.4f s' % (t_end - t_start)))
return (train_fn, val_fn) |
def cosine_similarity(lfs, rhs):
dot = tf.reduce_sum((lfs * rhs), axis=1)
base = (tf.sqrt(tf.reduce_sum(tf.square(lfs), axis=1)) * tf.sqrt(tf.reduce_sum(tf.square(rhs), axis=1)))
return (dot / base) |
def mask_heads(args, model, eval_dataloader):
(_, head_importance, preds, labels) = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
preds = (np.argmax(preds, axis=1) if (args.output_mode == 'classification') else np.squeeze(preds))
original_score = compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info('Pruning: original score: %f, threshold: %f', original_score, (original_score * args.masking_threshold))
new_head_mask = torch.ones_like(head_importance)
num_to_mask = max(1, int((new_head_mask.numel() * args.masking_amount)))
current_score = original_score
while (current_score >= (original_score * args.masking_threshold)):
head_mask = new_head_mask.clone()
head_importance[(head_mask == 0.0)] = float('Inf')
current_heads_to_mask = head_importance.view((- 1)).sort()[1]
if (len(current_heads_to_mask) <= num_to_mask):
break
current_heads_to_mask = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s', str(current_heads_to_mask.tolist()))
new_head_mask = new_head_mask.view((- 1))
new_head_mask[current_heads_to_mask] = 0.0
new_head_mask = new_head_mask.view_as(head_mask)
print_2d_tensor(new_head_mask)
(_, head_importance, preds, labels) = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask)
preds = (np.argmax(preds, axis=1) if (args.output_mode == 'classification') else np.squeeze(preds))
current_score = compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info('Masking: current score: %f, remaning heads %d (%.1f percents)', current_score, new_head_mask.sum(), ((new_head_mask.sum() / new_head_mask.numel()) * 100))
logger.info('Final head mask')
print_2d_tensor(head_mask)
np.save(os.path.join(args.output_dir, 'head_mask.npy'), head_mask.detach().cpu().numpy())
return head_mask |
class BaseProcessScheduler(object):
def __init__(self):
self._exit_event = threading.Event()
self._error_event = threading.Event()
self.all_processes = dict()
def start_monitor_thread_subprocess(self):
def baby_sitter():
def inner():
while True:
for (task_name, processes) in self.all_processes.items():
for (index, process) in enumerate(processes):
if (process.poll() is None):
continue
if (process.returncode != 0):
logger.info('%s %s exit code = %s, pid = %s', task_name, index, process.returncode, process.pid)
self._error_event.set()
return
if all([(p.poll() is not None) for p in self.waiting_process]):
return
if self._exit_event.wait(timeout=3):
return
inner()
if all(map((lambda p: ((p.returncode == 0) or (p.poll() is None))), functools.reduce(operator.concat, self.all_processes.values()))):
logger.info('All process running successfully')
for (task_name, processes) in self.all_processes.items():
for process in processes:
if (process.poll() is None):
logger.info('Kill job_name: %s, process: %s', task_name, process.pid)
try:
process.kill()
except Exception:
with contextlib.suppress(Exception):
os.system('kill -9 {}'.format(process.pid))
logger.info('start')
self._monitor_thread = threading.Thread(target=baby_sitter)
self._monitor_thread.start()
def add_signal_handler(self):
def int_handler(sig, frame):
logger.info('Ctrl+C pressed, kill all processes')
self._exit_event.set()
signal.signal(signal.SIGINT, int_handler)
def join(self):
self._monitor_thread.join()
def run_process(self):
raise NotImplementedError()
def run(self):
self.run_process()
self.start_monitor_thread_subprocess()
self.join()
if (self._error_event.is_set() and (not self._exit_event.is_set())):
sys.exit((- 1)) |
def create_markers(marker_type):
marker_ids = utils.get_marker_ids(marker_type)
if (marker_type == 'robots'):
marker_ids = ((5 * marker_ids) + marker_ids[:4])
elif (marker_type == 'cubes'):
marker_ids = [marker_id for marker_id in marker_ids[:8] for _ in range(6)]
elif (marker_type == 'corners'):
marker_ids = (7 * marker_ids)
output_dir = 'printouts'
pdf_name = 'markers-{}.pdf'.format(marker_type)
orientation = 'P'
sticker_padding_mm = 3
marker_params = utils.get_marker_parameters()
paper_params = utils.get_paper_parameters(orientation)
marker_length_mm = (1000 * marker_params['marker_length'])
scale_factor = (((marker_length_mm / paper_params['mm_per_in']) * paper_params['ppi']) / marker_params['marker_length_pixels'])
sticker_length_mm = marker_params['sticker_length_mm'][marker_type]
stickers_per_row = int(((paper_params['width_mm'] - (2 * paper_params['padding_mm'])) / (sticker_length_mm + sticker_padding_mm)))
aruco_dict = cv2.aruco.Dictionary_get(marker_params['dict_id'])
pdf = FPDF(orientation, 'mm', 'letter')
pdf.add_page()
with tempfile.TemporaryDirectory() as tmp_dir_name:
for (i, marker_id) in enumerate(marker_ids):
image_path = str((Path(tmp_dir_name) / '{}.png'.format(marker_id)))
Image.fromarray(cv2.aruco.drawMarker(aruco_dict, marker_id, int((scale_factor * marker_params['marker_length_pixels'])))).save(image_path)
center_x = ((sticker_length_mm + sticker_padding_mm) * ((i % stickers_per_row) + 1))
center_y = ((sticker_length_mm + sticker_padding_mm) * ((i // stickers_per_row) + 1))
pdf.rect(x=((center_x - (sticker_length_mm / 2)) - (pdf.line_width / 2)), y=((center_y - (sticker_length_mm / 2)) - (pdf.line_width / 2)), w=(sticker_length_mm + pdf.line_width), h=(sticker_length_mm + pdf.line_width))
pdf.image(image_path, x=(center_x - (marker_length_mm / 2)), y=(center_y - (marker_length_mm / 2)), w=marker_length_mm, h=marker_length_mm)
output_dir = Path(output_dir)
if (not output_dir.exists()):
output_dir.mkdir(parents=True)
pdf.output((output_dir / pdf_name)) |
def create_logger(root_output_path, cfg, image_set):
if (not os.path.exists(root_output_path)):
os.makedirs(root_output_path)
assert os.path.exists(root_output_path), '{} does not exist'.format(root_output_path)
cfg_name = os.path.basename(cfg).split('.')[0]
config_output_path = os.path.join(root_output_path, '{}'.format(cfg_name))
if (not os.path.exists(config_output_path)):
os.makedirs(config_output_path)
image_sets = [iset for iset in image_set.split(';')]
final_output_path = os.path.join(config_output_path, '{}'.format('_'.join(image_sets)))
if (not os.path.exists(final_output_path)):
os.makedirs(final_output_path)
log_file = '{}_{}.log'.format(cfg_name, time.strftime('%Y-%m-%d-%H-%M'))
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return (logger, final_output_path) |
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator = nn.ModuleList([nn.Sequential(nn.utils.weight_norm(nn.Conv1d(1, 16, kernel_size=15, stride=1, padding=7)), nn.LeakyReLU()), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(16, 64, kernel_size=41, stride=4, padding=20, groups=4)), nn.LeakyReLU()), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(64, 256, kernel_size=41, stride=4, padding=20, groups=16)), nn.LeakyReLU()), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(256, 1024, kernel_size=41, stride=4, padding=20, groups=64)), nn.LeakyReLU()), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(1024, 1024, kernel_size=41, stride=4, padding=20, groups=256)), nn.LeakyReLU()), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(1024, 1024, kernel_size=5, stride=1, padding=2)), nn.LeakyReLU()), nn.utils.weight_norm(nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1))])
def forward(self, x):
features = list()
for module in self.discriminator:
x = module(x)
features.append(x)
return (features[:(- 1)], features[(- 1)]) |
class PredictionLossGame():
def __init__(self, extension, sample, label, loss):
if (sample.ndim == 1):
sample = sample[np.newaxis]
if np.isscalar(label):
label = np.array([label])
if (loss is utils.crossentropyloss):
if ((label.ndim <= 1) or (label.shape[1] == 1)):
if np.issubdtype(label.dtype, np.floating):
label = label.astype(int)
self.extension = extension
self.sample = sample
self.label = label
self.loss = loss
self.players = sample.shape[1]
self.sample_repeat = sample
self.label_repeat = label
def __call__(self, S):
single_eval = (S.ndim == 1)
if single_eval:
S = S[np.newaxis]
input_data = self.sample
output_label = self.label
else:
if (len(S) != len(self.sample_repeat)):
self.sample_repeat = self.sample.repeat(len(S), 0)
self.label_repeat = self.label.repeat(len(S), 0)
input_data = self.sample_repeat
output_label = self.label_repeat
output = (- self.loss(self.extension(input_data, S), output_label))
if single_eval:
output = output[0]
return output |
class CpuSampler(ParallelSamplerBase):
def __init__(self, *args, CollectorCls=CpuResetCollector, eval_CollectorCls=CpuEvalCollector, **kwargs):
super().__init__(*args, CollectorCls=CollectorCls, eval_CollectorCls=eval_CollectorCls, **kwargs)
def obtain_samples(self, itr):
self.agent.sync_shared_memory()
return super().obtain_samples(itr)
def evaluate_agent(self, itr):
self.agent.sync_shared_memory()
return super().evaluate_agent(itr) |
class Conv_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Conv_block, self).__init__()
self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False)
self.bn = BatchNorm2d(out_c)
self.prelu = PReLU(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.prelu(x)
return x |
def main():
parser = get_parser()
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
logging.info(get_commandline_args())
if (not os.path.exists(args.outdir)):
os.makedirs(args.outdir)
for (idx, (utt_id, lmspc)) in enumerate(file_reader_helper(args.rspecifier, args.filetype), 1):
if (args.n_mels is not None):
spc = logmelspc_to_linearspc(lmspc, fs=args.fs, n_mels=args.n_mels, n_fft=args.n_fft, fmin=args.fmin, fmax=args.fmax)
else:
spc = lmspc
y = griffin_lim(spc, n_fft=args.n_fft, n_shift=args.n_shift, win_length=args.win_length, window=args.window, n_iters=args.iters)
logging.info(('(%d) %s' % (idx, utt_id)))
write((args.outdir + ('/%s.wav' % utt_id)), args.fs, (y * np.iinfo(np.int16).max).astype(np.int16)) |
def visualize_registration(src, dst, transformation=np.eye(4)):
src_trans = deepcopy(src)
src_trans.transform(transformation)
src_trans.paint_uniform_color([1, 0, 0])
dst_clone = deepcopy(dst)
dst_clone.paint_uniform_color([0, 1, 0])
o3d.visualization.draw([src_trans, dst_clone]) |
def get_blocks(num_layers):
if (num_layers == 50):
blocks = [get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3)]
elif (num_layers == 100):
blocks = [get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3)]
elif (num_layers == 152):
blocks = [get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3)]
else:
raise ValueError('Invalid number of layers: {}. Must be one of [50, 100, 152]'.format(num_layers))
return blocks |
class SliceData(Dataset):
def __init__(self, root, transform, challenge, sample_rate=1):
if (challenge not in ('singlecoil', 'multicoil')):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.transform = transform
self.recons_key = ('reconstruction_esc' if (challenge == 'singlecoil') else 'reconstruction_rss')
self.examples = []
files = list(pathlib.Path(root).iterdir())
if (sample_rate < 1):
random.shuffle(files)
num_files = round((len(files) * sample_rate))
files = files[:num_files]
for fname in sorted(files):
data = h5py.File(fname, 'r')
try:
import ismrmrd
hdr = ismrmrd.xsd.CreateFromDocument(data['ismrmrd_header'][()])
enc = hdr.encoding[0]
enc_size = (enc.encodedSpace.matrixSize.x, enc.encodedSpace.matrixSize.y, enc.encodedSpace.matrixSize.z)
enc_limits_center = enc.encodingLimits.kspace_encoding_step_1.center
enc_limits_max = (enc.encodingLimits.kspace_encoding_step_1.maximum + 1)
padding_left = ((enc_size[1] // 2) - enc_limits_center)
padding_right = (padding_left + enc_limits_max)
except Exception as e:
padding_left = None
padding_right = None
raise e
kspace = data['kspace']
num_slices = kspace.shape[0]
self.examples += [(fname, slice, padding_left, padding_right) for slice in range(num_slices)]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
(fname, slice, padding_left, padding_right) = self.examples[i]
with h5py.File(fname, 'r') as data:
kspace = data['kspace'][slice]
mask = (np.asarray(data['mask']) if ('mask' in data) else None)
target = (data[self.recons_key][slice] if (self.recons_key in data) else None)
attrs = dict(data.attrs)
attrs['padding_left'] = padding_left
attrs['padding_right'] = padding_right
return self.transform(kspace, mask, target, attrs, fname.name, slice) |
def _test():
import torch
pretrained = False
models = [shufflenetv2_wd2, shufflenetv2_w1, shufflenetv2_w3d2, shufflenetv2_w2]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != shufflenetv2_wd2) or (weight_count == 1366792))
assert ((model != shufflenetv2_w1) or (weight_count == 2278604))
assert ((model != shufflenetv2_w3d2) or (weight_count == 4406098))
assert ((model != shufflenetv2_w2) or (weight_count == 7601686))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
def main(args):
args = get_parser().parse_args(args)
convert(args.json, args.refs, args.hyps, args.num_spkrs) |
def compute_histogram(args, dataloader, model, classifier):
histogram = np.zeros((args.K_test, args.K_test))
model.eval()
classifier.eval()
with torch.no_grad():
for (i, (indice, image, label)) in enumerate(dataloader):
image = image.cuda(non_blocking=True)
feats = model(image)
feats = F.normalize(feats, dim=1, p=2)
if (i == 0):
print('Batch image size : {}'.format(image.size()), flush=True)
print('Batch label size : {}'.format(label.size()), flush=True)
print('Batch feature size : {}\n'.format(feats.size()), flush=True)
probs = compute_dist(feats, classifier)
probs = F.interpolate(probs, args.res1, mode='bilinear', align_corners=False)
preds = probs.topk(1, dim=1)[1].view(probs.size(0), (- 1)).cpu().numpy()
label = label.view(probs.size(0), (- 1)).cpu().numpy()
histogram += scores(label, preds, args.K_test)
return histogram |
class ExplorationPolicy(abc.ABC):
def __init__(self, policy):
self.policy = policy
def get_action(self, observation):
def get_actions(self, observations):
def reset(self, dones=None):
self.policy.reset(dones)
def get_param_values(self):
return self.policy.get_param_values()
def set_param_values(self, params):
self.policy.set_param_values(params) |
def get_new_network_cell():
args = obtain_decode_args()
load_model = Loader(args)
(fea_net_paths, fea_net_paths_space, mat_net_paths, mat_net_paths_space) = load_model.decode_architecture()
(fea_genotype, mat_genotype) = load_model.decode_cell()
print('Feature Net search results:', fea_net_paths)
print('Matching Net search results:', mat_net_paths)
print('Feature Net cell structure:', fea_genotype)
print('Matching Net cell structure:', mat_genotype)
dir_name = os.path.dirname(args.resume)
fea_net_path_filename = os.path.join(dir_name, 'feature_network_path')
fea_genotype_filename = os.path.join(dir_name, 'feature_genotype')
np.save(fea_net_path_filename, fea_net_paths)
np.save(fea_genotype_filename, fea_genotype)
mat_net_path_filename = os.path.join(dir_name, 'matching_network_path')
mat_genotype_filename = os.path.join(dir_name, 'matching_genotype')
np.save(mat_net_path_filename, mat_net_paths)
np.save(mat_genotype_filename, mat_genotype)
fea_cell_name = os.path.join(dir_name, 'feature_cell_structure')
mat_cell_name = os.path.join(dir_name, 'matching_cell_structure') |
_module()
class FormatTrimap():
def __init__(self, to_onehot=False):
self.to_onehot = to_onehot
def __call__(self, results):
trimap = results['trimap'].squeeze()
trimap[(trimap == 128)] = 1
trimap[(trimap == 255)] = 2
if self.to_onehot:
trimap = F.one_hot(trimap.to(torch.long), num_classes=3)
trimap = trimap.permute(2, 0, 1)
else:
trimap = trimap[(None, ...)]
results['trimap'] = trimap.float()
results['meta'].data['to_onehot'] = self.to_onehot
return results
def __repr__(self):
return (self.__class__.__name__ + f'(to_onehot={self.to_onehot})') |
def get_transform_cub(model_type, train, augment_data):
scale = (256.0 / 224.0)
target_resolution = model_attributes[model_type]['target_resolution']
assert (target_resolution is not None)
if ((not train) or (not augment_data)):
transform = transforms.Compose([transforms.Resize((int((target_resolution[0] * scale)), int((target_resolution[1] * scale)))), transforms.CenterCrop(target_resolution), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
else:
transform = transforms.Compose([transforms.RandomResizedCrop(target_resolution, scale=(0.7, 1.0), ratio=(0.75, 1.), interpolation=2), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
return transform |
.dataclass
class FlaxImageClassifierOutputWithNoAttention(ModelOutput):
logits: jnp.ndarray = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None |
class PygNodePropPredDataset(InMemoryDataset):
def __init__(self, name, root='dataset', transform=None, pre_transform=None, meta_dict=None):
self.name = name
if (meta_dict is None):
self.dir_name = '_'.join(name.split('-'))
if osp.exists(osp.join(root, (self.dir_name + '_pyg'))):
self.dir_name = (self.dir_name + '_pyg')
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col=0)
if (not (self.name in master)):
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
if (osp.isdir(self.root) and (not osp.exists(osp.join(self.root, (('RELEASE_v' + str(self.meta_info['version'])) + '.txt'))))):
print((self.name + ' has been updated.'))
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name']
self.num_tasks = int(self.meta_info['num tasks'])
self.task_type = self.meta_info['task type']
self.eval_metric = self.meta_info['eval metric']
self.__num_classes__ = int(self.meta_info['num classes'])
self.is_hetero = (self.meta_info['is hetero'] == 'True')
self.binary = (self.meta_info['binary'] == 'True')
super(PygNodePropPredDataset, self).__init__(self.root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
def get_idx_split(self, split_type=None):
if (split_type is None):
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
if self.is_hetero:
(train_idx_dict, valid_idx_dict, test_idx_dict) = read_nodesplitidx_split_hetero(path)
for nodetype in train_idx_dict.keys():
train_idx_dict[nodetype] = torch.from_numpy(train_idx_dict[nodetype]).to(torch.long)
valid_idx_dict[nodetype] = torch.from_numpy(valid_idx_dict[nodetype]).to(torch.long)
test_idx_dict[nodetype] = torch.from_numpy(test_idx_dict[nodetype]).to(torch.long)
return {'train': train_idx_dict, 'valid': valid_idx_dict, 'test': test_idx_dict}
else:
train_idx = torch.from_numpy(pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header=None).values.T[0]).to(torch.long)
valid_idx = torch.from_numpy(pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header=None).values.T[0]).to(torch.long)
test_idx = torch.from_numpy(pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header=None).values.T[0]).to(torch.long)
return {'train': train_idx, 'valid': valid_idx, 'test': test_idx}
def num_classes(self):
return self.__num_classes__
def raw_file_names(self):
if self.binary:
if self.is_hetero:
return ['edge_index_dict.npz']
else:
return ['data.npz']
elif self.is_hetero:
return ['num-node-dict.csv.gz', 'triplet-type-list.csv.gz']
else:
file_names = ['edge']
if (self.meta_info['has_node_attr'] == 'True'):
file_names.append('node-feat')
if (self.meta_info['has_edge_attr'] == 'True'):
file_names.append('edge-feat')
return [(file_name + '.csv.gz') for file_name in file_names]
def processed_file_names(self):
return osp.join('geometric_data_processed.pt')
def download(self):
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
shutil.rmtree(self.root)
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop downloading.')
shutil.rmtree(self.root)
exit((- 1))
def process(self):
add_inverse_edge = (self.meta_info['add_inverse_edge'] == 'True')
if (self.meta_info['additional node files'] == 'None'):
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if (self.meta_info['additional edge files'] == 'None'):
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.is_hetero:
data = read_heterograph_pyg(self.raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)[0]
if self.binary:
tmp = np.load(osp.join(self.raw_dir, 'node-label.npz'))
node_label_dict = {}
for key in list(tmp.keys()):
node_label_dict[key] = tmp[key]
del tmp
else:
node_label_dict = read_node_label_hetero(self.raw_dir)
data.y_dict = {}
if ('classification' in self.task_type):
for (nodetype, node_label) in node_label_dict.items():
if np.isnan(node_label).any():
data.y_dict[nodetype] = torch.from_numpy(node_label).to(torch.float32)
else:
data.y_dict[nodetype] = torch.from_numpy(node_label).to(torch.long)
else:
for (nodetype, node_label) in node_label_dict.items():
data.y_dict[nodetype] = torch.from_numpy(node_label).to(torch.float32)
else:
data = read_graph_pyg(self.raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)[0]
if self.binary:
node_label = np.load(osp.join(self.raw_dir, 'node-label.npz'))['node_label']
else:
node_label = pd.read_csv(osp.join(self.raw_dir, 'node-label.csv.gz'), compression='gzip', header=None).values
if ('classification' in self.task_type):
if np.isnan(node_label).any():
data.y = torch.from_numpy(node_label).to(torch.float32)
else:
data.y = torch.from_numpy(node_label).to(torch.long)
else:
data.y = torch.from_numpy(node_label).to(torch.float32)
data = (data if (self.pre_transform is None) else self.pre_transform(data))
print('Saving...')
torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__) |
class LeNet(nn.Module):
def __init__(self, in_channel=1, out_channel=10):
super(LeNet, self).__init__()
self.conv1 = nn.Sequential(nn.Conv1d(in_channel, 6, 5), nn.ReLU(), nn.MaxPool1d(kernel_size=2, stride=2))
self.conv2 = nn.Sequential(nn.Conv1d(6, 16, 5), nn.ReLU(), nn.AdaptiveMaxPool1d(5))
self.fc1 = nn.Sequential(nn.Linear((16 * 5), 30), nn.ReLU())
self.fc2 = nn.Sequential(nn.Linear(30, 10), nn.ReLU())
self.fc3 = nn.Linear(10, out_channel)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], (- 1))
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x |
def rand_tree(n, reg, n_out=0, n_hyper_in=0, n_hyper_out=0, d_min=2, d_max=3, seed=None, optimize='greedy'):
from .core import ContractionTree
(inputs, output, _, size_dict) = rand_equation(n, reg, n_out=n_out, n_hyper_in=n_hyper_in, n_hyper_out=n_hyper_out, d_min=d_min, d_max=d_max, seed=seed)
tree = ContractionTree(inputs, output, size_dict)
tree.contract_nodes(tuple(tree.gen_leaves()), optimize=optimize)
return tree |
class Agent(abc.ABC):
def __init__(self, total_batch_size: int):
self.total_batch_size = total_batch_size
num_devices = jax.local_device_count()
assert ((total_batch_size % num_devices) == 0), f'The total batch size must be a multiple of the number of devices, got total_batch_size={total_batch_size} and num_devices={num_devices}.'
self.batch_size_per_device = (total_batch_size // num_devices)
def init_params(self, key: chex.PRNGKey) -> Optional[ParamsState]:
pass
def run_epoch(self, training_state: TrainingState) -> Tuple[(TrainingState, Dict)]:
pass
def make_policy(self, policy_params: Optional[hk.Params], stochastic: bool=True) -> Callable:
pass |
def load_f0(f0_dir, nshards):
path_to_f0 = {}
for rank in tqdm(range(1, (nshards + 1)), desc=f'load f0'):
f0_shard_path = f'{f0_dir}/f0_{rank}_{nshards}.pt'
shard_path_to_f0 = torch.load(f0_shard_path)
path_to_f0.update(shard_path_to_f0)
return path_to_f0 |
def compute_precision_recall(scores, labels, num_gt):
if ((not isinstance(labels, np.ndarray)) or (labels.dtype != np.bool) or (len(labels.shape) != 1)):
raise ValueError('labels must be single dimension bool numpy array')
if ((not isinstance(scores, np.ndarray)) or (len(scores.shape) != 1)):
raise ValueError('scores must be single dimension numpy array')
if (num_gt < np.sum(labels)):
raise ValueError('Number of true positives must be smaller than num_gt.')
if (len(scores) != len(labels)):
raise ValueError('scores and labels must be of the same size.')
if (num_gt == 0):
return (None, None)
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::(- 1)]
labels = labels.astype(int)
true_positive_labels = labels[sorted_indices]
false_positive_labels = (1 - true_positive_labels)
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = (cum_true_positives.astype(float) / (cum_true_positives + cum_false_positives))
recall = (cum_true_positives.astype(float) / num_gt)
return (precision, recall) |
('NOTICE', colon=False)
def _handle_notice(irc: miniirc.IRC, hostmask: Tuple[(str, str, str)], args: List[str]) -> None:
log.info('Received NOTICE: hostmask=%s, args=%s', hostmask, args)
(user, _ident, _hostname) = hostmask
if ((user.casefold() == 'nickserv') and (len(args) >= 2)):
(nick, msg) = args[:2]
if ('not a registered nickname' in msg):
msg = f'The server sent event NOTICE reporting the current nick {nick} is not registered.'
if (nick.casefold() != config.INSTANCE['nick'].casefold()):
_regain_nick(irc, msg)
else:
log.warning(msg)
elif ('can not regain your nickname' in msg):
log.error(f'The nick could not be regained due to error: {msg}')
Bot.EXITCODE_QUEUE.put(1) |
def find_tanh(alpha, k, eps=0.0001, positive=True):
u = torch.sqrt((1 - (k / alpha)))
extreme = ((1 - u) <= 0.01)
x1 = inverse_tanh((u - eps))
x2 = inverse_tanh((u + eps))
x2[extreme] = 100
k1 = ((1 - (torch.tanh(x1) ** 2)) * alpha)
k2 = ((1 - (torch.tanh(x2) ** 2)) * alpha)
k2[extreme] = 0
if positive:
return (x1, x2, k1, k2)
else:
return ((- x2), (- x1), k2, k1) |
def find_ngrams(token_dict, text, n):
if (n <= 1):
return text
saved_tokens = []
search_tokens = text[:]
next_search = []
while (len(search_tokens) >= n):
ngram = ' '.join(search_tokens[:n])
if (ngram in token_dict):
sub_n = min(len(next_search), (n - 1))
saved_tokens.extend(find_ngrams(token_dict, next_search, sub_n))
next_search.clear()
saved_tokens.append(ngram)
search_tokens = search_tokens[n:]
else:
next_search.append(search_tokens.pop(0))
remainder = (next_search + search_tokens)
sub_n = min(len(remainder), (n - 1))
saved_tokens.extend(find_ngrams(token_dict, remainder, sub_n))
return saved_tokens |
def named_penalties(module, reduction='sum', prefix=''):
if ((reduction is not None) and (reduction not in ('mean', 'sum'))):
raise ValueError(f'`reduction` must be either `None`, `sum` or `mean`. Got {reduction}.')
for (name, mod) in module.named_modules(prefix=prefix):
if isinstance(mod, BaseARD):
penalty = mod.penalty
if (reduction == 'sum'):
penalty = penalty.sum()
elif (reduction == 'mean'):
penalty = penalty.mean()
(yield (name, penalty)) |
def map_to_list_nn(features, nbidx, srcpos, bs, nv, height, width):
return MapToListNn.apply(features, nbidx, srcpos, bs, nv, height, width) |
class CallableModule(types.ModuleType):
def __init__(self):
types.ModuleType.__init__(self, __name__)
self.__dict__.update(sys.modules[__name__].__dict__)
def __call__(self, x, *args, **kwargs):
return __call__(x, *args, **kwargs) |
def learn_q_model(model_name):
if (control.Settings.REWARD_FUNCTION == 'Continuous'):
reward_function = continuous_reward
elif (control.Settings.REWARD_FUNCTION == 'Slotted'):
reward_function = slotted_reward
else:
raise ValueError('Invalid reward function {} specified in settings.'.format(control.Settings.REWARD_FUNCTION))
writer = SummaryWriter(comment=model_name)
for (key, value) in control.Settings.export_settings().items():
writer.add_text(key, str(value))
if control.Settings.INIT_MODEL_NAME:
Q = load_q_model(control.Settings.INIT_MODEL_NAME)
else:
Q = initialize_q()
visits = initialize_q()
for i in tqdm(range(control.Settings.NUM_TRAINING_EPISODES)):
if (((i % control.Settings.EVALUATION_PERIOD) == 0) and (i != 0)):
logging.info('Evaluating model at training episode {}'.format(i))
evaluate_q_model_and_log_metrics(Q, i, writer, visits, reward_function=reward_function)
control_function = partial(get_control, Q=Q, visits=visits, epsilon=1.0)
episode_state = control.run_episode(control_function=control_function, state_function=get_state_tuple, max_episode_length=control.Settings.MAX_EPISODE_LENGTH, wait_before_start=20, limit_metrics=True)
episode_history = get_history(episode_state, reward_function)
do_q_update(episode_history, Q, visits, control.Settings.GAMMA, control.Settings.STEP_SIZE)
if (((i % control.Settings.STEP_SIZE_HALF_PER_EPISODES) == 0) and (i != 0)):
control.Settings.STEP_SIZE /= 2
evaluate_q_model_and_log_metrics(Q, control.Settings.NUM_TRAINING_EPISODES, writer, visits, reward_function=reward_function)
np.save(model_name, Q)
writer.close() |
def CHECKEQ(a, b, s=None):
if (s is None):
s = ''
if (type(a) is list):
CHECKEQ(len(a), len(b), s)
for i in range(len(a)):
CHECKEQ(a[i], b[i], s)
elif (type(a) is dict):
CHECKEQ(list(a.keys()), list(b.keys()), s)
for k in a.keys():
CHECKEQ(a[k], b[k], (s + ' keys = {}'.format(k)))
elif (torch.is_tensor(a) and torch.is_tensor(b) and (a.numel() > 1)):
assert torch.equal(a, b), 'a {} not == b {}; {} '.format(a.shape, b.shape, s)
return
else:
assert (a == b), 'get {} {}; {}'.format(a, b, s) |
def predictor_typeendgame_get():
from phcpy.phcpy2c3 import py2c_get_value_of_continuation_parameter as get
return int(get(8)) |
def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None, verbose=True):
try:
if (loader_func is not None):
bytes = bytearray(loader_func(filename))
else:
with open(filename, 'rb') as stream:
bytes = bytearray(stream.read())
numpyarray = np.asarray(bytes, dtype=np.uint8)
return cv2.imdecode(numpyarray, flags)
except:
if verbose:
io.log_err(f'Exception occured in cv2_imread : {traceback.format_exc()}')
return None |
class myBN(nn.Module):
def __init__(self, num_channels, eps=1e-05, momentum=0.1):
super(myBN, self).__init__()
self.momentum = momentum
self.eps = eps
self.momentum = momentum
self.register_buffer('stored_mean', torch.zeros(num_channels))
self.register_buffer('stored_var', torch.ones(num_channels))
self.register_buffer('accumulation_counter', torch.zeros(1))
self.accumulate_standing = False
def reset_stats(self):
self.stored_mean[:] = 0
self.stored_var[:] = 0
self.accumulation_counter[:] = 0
def forward(self, x, gain, bias):
if self.training:
(out, mean, var) = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps)
if self.accumulate_standing:
self.stored_mean[:] = (self.stored_mean + mean.data)
self.stored_var[:] = (self.stored_var + var.data)
self.accumulation_counter += 1.0
else:
self.stored_mean[:] = ((self.stored_mean * (1 - self.momentum)) + (mean * self.momentum))
self.stored_var[:] = ((self.stored_var * (1 - self.momentum)) + (var * self.momentum))
return out
else:
mean = self.stored_mean.view(1, (- 1), 1, 1)
var = self.stored_var.view(1, (- 1), 1, 1)
if self.accumulate_standing:
mean = (mean / self.accumulation_counter)
var = (var / self.accumulation_counter)
return fused_bn(x, mean, var, gain, bias, self.eps) |
def CheckFiles(input_data_dir):
for file_name in ['spk2utt', 'text', 'utt2spk', 'feats.scp']:
file_name = '{0}/{1}'.format(input_data_dir, file_name)
if (not os.path.exists(file_name)):
raise Exception('There is no such file {0}'.format(file_name)) |
def load_files_from_dataset_dir(dataset_dir) -> dict:
file_paths = dict()
all_files = [f for f in os.listdir(dataset_dir)]
return all_files |
_FORMAT_LOADER.register('R-50-C4')
_FORMAT_LOADER.register('R-50-C5')
_FORMAT_LOADER.register('R-101-C4')
_FORMAT_LOADER.register('R-101-C5')
_FORMAT_LOADER.register('R-50-FPN')
_FORMAT_LOADER.register('R-50-FPN-RETINANET')
_FORMAT_LOADER.register('R-101-FPN')
_FORMAT_LOADER.register('R-101-FPN-RETINANET')
_FORMAT_LOADER.register('R-152-FPN')
def load_resnet_c2_format(cfg, f):
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace('-C4', '').replace('-C5', '').replace('-FPN', '')
arch = arch.replace('-RETINANET', '')
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
return dict(model=state_dict) |
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm, self).__init__()
def forward(self, x):
return torch.norm(x, 1, 1).sum() |
class FixedLRScheduleConfig(FairseqDataclass):
force_anneal: Optional[int] = field(default=None, metadata={'help': 'force annealing at specified epoch'})
lr_shrink: float = field(default=0.1, metadata={'help': 'shrink factor for annealing, lr_new = (lr * lr_shrink)'})
warmup_updates: int = field(default=0, metadata={'help': 'warmup the learning rate linearly for the first N updates'})
lr: List[float] = II('optimization.lr') |
class CompletionChunk(TypedDict):
id: str
object: Literal['text_completion']
created: int
model: str
choices: List[CompletionChoice] |
def gptneox_sample_token(ctx: gptneox_context_p, candidates) -> gptneox_token:
return _lib.gptneox_sample_token(ctx, candidates) |
def display_first_plan(folder, ruler=None):
targetfname = (('results/vtk_files/' + folder) + '/Target.vtk')
modelfname = (('results/vtk_files/' + folder) + '/Descent/Models/Model_1.vtk')
planfname = (('results/vtk_files/' + folder) + '/Descent/Plans/Plan_1.vtk')
outfname = (('results/images/firstplan_' + folder) + '.png')
renderView1 = get_view()
display_target(targetfname, renderView1)
display_plan(planfname, renderView1)
display_model(modelfname, renderView1)
if (ruler is not None):
display_ruler(ruler[0], ruler[1], renderView1)
screenshot(renderView1, outfname) |
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs):
(rank, world_size) = get_dist_info()
if dist:
if shuffle:
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = (GroupSampler(dataset, samples_per_gpu) if shuffle else None)
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs)
return data_loader |
def CalculateKeypointCenters(boxes):
return tf.divide(tf.add(tf.gather(boxes, [0, 1], axis=1), tf.gather(boxes, [2, 3], axis=1)), 2.0) |
def get_model_tester_from_test_class(test_class):
test = test_class()
if hasattr(test, 'setUp'):
test.setUp()
model_tester = None
if hasattr(test, 'model_tester'):
if (test.model_tester is not None):
model_tester = test.model_tester.__class__
return model_tester |
class LogWriter(object):
def __init__(self, path, args):
if ('' in args):
del args['']
self.path = path
self.args = args
with open(self.path, 'w') as f:
f.write('Training Log\n')
f.write('Specifications\n')
for argname in self.args:
f.write('{} : {}\n'.format(argname, self.args[argname]))
f.write('Checkpoints:\n')
def checkpoint(self, to_write):
with open(self.path, 'a') as f:
f.write((to_write + '\n'))
def initBest(self):
self.current_best = {'loglik': numpy.finfo(float).min, 'distance': numpy.finfo(float).max, 'loss': numpy.finfo(float).max, 'rmse': numpy.finfo(float).max, 'acc': numpy.finfo(float).min}
self.episode_best = 'NeverUpdated'
def updateBest(self, key, value, episode):
updated = False
if ((key == 'loglik') or (key == 'acc')):
if (value > self.current_best[key]):
updated = True
self.current_best[key] = value
self.episode_best = episode
elif ((key == 'distance') or (key == 'loss') or (key == 'rmse')):
if (value < self.current_best[key]):
updated = True
self.current_best[key] = value
self.episode_best = episode
else:
raise Exception('unknown key {}'.format(key))
return updated |
class SequenceTagger(TextKerasModel):
def __init__(self, num_pos_labels, num_chunk_labels, word_vocab_size, char_vocab_size=None, word_length=12, feature_size=100, dropout=0.2, classifier='softmax', optimizer=None):
classifier = classifier.lower()
invalidInputError((classifier in ['softmax', 'crf']), 'classifier should be either softmax or crf')
super(SequenceTagger, self).__init__(chunker.SequenceTagger(use_cudnn=False), vocabulary_size=word_vocab_size, num_pos_labels=num_pos_labels, num_chunk_labels=num_chunk_labels, char_vocab_size=char_vocab_size, max_word_len=word_length, feature_size=feature_size, dropout=dropout, classifier=classifier, optimizer=optimizer)
def load_model(path):
labor = chunker.SequenceTagger(use_cudnn=False)
model = TextKerasModel._load_model(labor, path)
model.__class__ = SequenceTagger
return model |
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int((duration * fs)), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording') |
def batch_to(tensor: Tensor, batch_shape: tuple, num_nonbatch: int):
batch_ref = torch.empty(batch_shape)
(out_tensor, _) = batch_broadcast((tensor, batch_ref), (num_nonbatch, 0))
return out_tensor |
_module()
class FPN(nn.Module):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest')):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output'))
elif add_extra_convs:
if extra_convs_on_inputs:
warnings.simplefilter('once')
warnings.warn('"extra_convs_on_inputs" will be deprecated in v2.9.0,Please use "add_extra_convs"', DeprecationWarning)
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (self.add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and (self.add_extra_convs == 'on_input')):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
if ('scale_factor' in self.upsample_cfg):
laterals[(i - 1)] += F.interpolate(laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if (self.add_extra_convs == 'on_input'):
extra_source = inputs[(self.backbone_end_level - 1)]
elif (self.add_extra_convs == 'on_lateral'):
extra_source = laterals[(- 1)]
elif (self.add_extra_convs == 'on_output'):
extra_source = outs[(- 1)]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs) |
def _empty_iterator(tensor) -> bool:
from collections.abc import Iterable
if isinstance(tensor, Iterable):
if (len(tensor) == 0):
return True
return False |
_features_generator('ecfp4')
def ecfp4_features_generator(mol: Molecule) -> np.ndarray:
smiles = (Chem.MolToSmiles(mol, isomericSmiles=True) if (type(mol) != str) else mol)
mapping_filepath = os.path.join(PRETRAINED_SMILES_PATH, 'smiles2ecfp4.pkl')
with open(mapping_filepath, 'rb') as reader:
mapping = pickle.load(reader, encoding='latin-1')
try:
features = mapping[smiles]
return features
except KeyError:
print('No ECFP4 features for smiles {}'.format(smiles)) |
def normalize_digraph(A):
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if (Dl[i] > 0):
Dn[(i, i)] = (Dl[i] ** (- 1))
AD = np.dot(A, Dn)
return AD |
def extract_threads():
print('====Extract threads and save to Avocado.json====')
threads = {}
lens = []
with open('subjects.json', 'r') as f:
subjects = json.load(f)
datestr = '%d %b %Y %H:%M:%S UTC'
for subject in tqdm(subjects):
thread = []
for file in subjects[subject]:
name = None
email = None
sub = None
date = None
received_time = None
tos = []
content = []
directory = file.split('-')[0]
with open(f'avocado_text/{directory}/{file}', 'r') as f:
while True:
line = f.readline()
if (not line):
break
line = line.strip()
if (not line):
continue
elif (('-----Original Message-----' in line) or ('-----Ursprungliche Nachricht-----' in line)):
break
elif line.startswith('From:'):
items = line.replace('From:', '').split('<')
if (len(items) < 2):
break
email = items[1].replace('>', '').strip()
name = items[0].replace('"', '').strip()
elif (line.startswith('To:') or line.startswith('to:') or line.startswith('TO:')):
tos.extend(line.replace('To:', '').replace('to:', '').replace('TO:', '').replace('"', '').replace("'", '').strip().split(','))
elif (line.startswith('CC:') or line.startswith('Cc:') or line.startswith('cc:')):
tos.extend(line.replace('CC:', '').replace('Cc:', '').replace('cc:', '').replace('"', '').replace("'", '').strip().split(','))
elif (line.startswith('Bcc:') or line.startswith('bcc:') or line.startswith('BCC:')):
tos.extend(line.replace('Bcc:', '').replace('bcc:', '').replace('BCC:', '').replace('"', '').replace("'", '').strip().split(','))
elif line.startswith('Subject:'):
sub = line.replace('Subject:', '').strip()
elif line.startswith('Date:'):
try:
date = line.replace('Date:', '').strip()
received_time = str(datetime.datetime.timestamp(datetime.datetime.strptime(date, datestr)))
except:
break
elif (line.startswith('Message-ID:') or line.startswith('In-Reply-To:') or line.startswith('MIME-Version:') or line.startswith('Reply-To:') or line.startswith('Content-Type:') or line.startswith('Sender:')):
continue
elif ('' in line):
continue
else:
content.append(line)
if ((not email) or (not tos) or (not content) or (not date) or (not received_time)):
continue
else:
thread.append({'file': file, 'name': name, 'email': email, 'subject': sub, 'date': date, 'received_time': received_time, 'to': tos, 'content': content})
if (len(thread) == 0):
continue
thread = sorted(thread, key=(lambda x: float(x['received_time'])))
new_thread = []
emails = {}
persons = set()
for email in thread:
my = email['email'].lower()
tos = [my]
key = ' '.join((tos + [email['received_time']]))
if (key in emails):
continue
for to in email['to']:
if (('<' in to) and ('>' in to)):
to = to.split('<')[1].split('>')[0].lower()
to = re.sub('\\(\\S+\\)', '', to)
tos.append(to.lower())
if (persons and (len((set(tos) & persons)) == 0)):
break
new_thread.append(email)
persons.update(tos)
emails[key] = email
content = set()
for email in new_thread:
content.add(' '.join(email['content']).lower())
if (len(content) == 1):
continue
if (2 < len(new_thread) <= 50):
lens.append(len(new_thread))
threads[subject] = new_thread
print(f'======{len(lens)} threads in total, the average/max thread lengths are {np.mean(lens)}/{max(lens)}======')
with open('Avocado.json', 'w') as f:
json.dump(threads, f) |
def test_feature_importances_tabnet():
tab_preprocessor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=cont_cols)
X_tr = tab_preprocessor.fit_transform(df_tr).astype(float)
X_te = tab_preprocessor.transform(df_te).astype(float)
tabnet = TabNet(column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, continuous_cols=tab_preprocessor.continuous_cols, embed_continuous=True)
model = WideDeep(deeptabular=tabnet)
trainer = Trainer(model, objective='binary')
trainer.fit(X_tab=X_tr, target=target, n_epochs=1, batch_size=16, feature_importance_sample_size=32)
feat_imps = trainer.feature_importance
feat_imp_per_sample = trainer.explain(X_te, save_step_masks=False)
assert ((len(feat_imps) == df_tr.shape[1]) and (feat_imp_per_sample.shape == df_te.shape)) |
def test_batchify_fn(data):
error_msg = 'batch must contain tensors, tuples or lists; found {}'
if isinstance(data[0], (str, torch.Tensor)):
return list(data)
elif isinstance(data[0], (tuple, list)):
data = zip(*data)
return [test_batchify_fn(i) for i in data]
raise TypeError(error_msg.format(type(batch[0]))) |
class VideoQABuilder(BaseDatasetBuilder):
train_dataset_cls = VideoQADataset
eval_dataset_cls = VideoQADataset
def build(self):
datasets = super().build()
ans2label = self.config.build_info.annotations.get('ans2label')
if (ans2label is None):
raise ValueError('ans2label is not specified in build_info.')
ans2label = get_cache_path(ans2label.storage)
for split in datasets:
datasets[split]._build_class_labels(ans2label)
return datasets |
def _get_rpn_stage(arch_def, num_blocks):
rpn_stage = arch_def.get('rpn')
ret = mbuilder.get_blocks(arch_def, stage_indices=rpn_stage)
if (num_blocks > 0):
logger.warn('Use last {} blocks in {} as rpn'.format(num_blocks, ret))
block_count = len(ret['stages'])
assert (num_blocks <= block_count), 'use block {}, block count {}'.format(num_blocks, block_count)
blocks = range((block_count - num_blocks), block_count)
ret = mbuilder.get_blocks(ret, block_indices=blocks)
return ret['stages'] |
_KEYPOINT_PREDICTOR.register('KeypointRCNNPredictor')
class KeypointRCNNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(KeypointRCNNPredictor, self).__init__()
input_features = in_channels
num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_CLASSES
deconv_kernel = 4
self.kps_score_lowres = layers.ConvTranspose2d(input_features, num_keypoints, deconv_kernel, stride=2, padding=((deconv_kernel // 2) - 1))
nn.init.kaiming_normal_(self.kps_score_lowres.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(self.kps_score_lowres.bias, 0)
self.up_scale = 2
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
x = layers.interpolate(x, scale_factor=self.up_scale, mode='bilinear', align_corners=False)
return x |
def test_d1_mean(barrel):
m = barrel.first_derivative_mean()
assert isinstance(m, np.ndarray) |
class Request():
def __init__(self, method: str, operation: str, data: dict) -> None:
self.method: str = method
self.operation: str = operation
self.data: dict = data |
def demo(word):
print('[code-vectors] 5 closest words to "{}"'.format(word))
for (i, (n, _)) in enumerate(kv.most_similar(word, topn=5)):
print('[code-vectors] #{}. {}'.format(i, n))
print() |
class TimedRule(abstract_rule.AbstractRule):
def __init__(self, step_interval, rules):
if (not callable(step_interval)):
self._step_interval = (lambda : step_interval)
else:
self._step_interval = step_interval
if (not isinstance(rules, (list, tuple))):
rules = (rules,)
self._rules = rules
def reset(self, state, meta_state):
(self._steps_until_start, self._steps_until_stop) = self._step_interval()
for rule in self._rules:
rule.reset(state, meta_state)
def step(self, state, meta_state):
if ((self._steps_until_start <= 0) and (self._steps_until_stop > 0)):
for rule in self._rules:
rule.step(state, meta_state)
self._steps_until_start -= 1
self._steps_until_stop -= 1 |
def evaluate_model(data_loader, model, idx2attr, device, topk=5):
model.eval()
test_num = 0
prediction = {}
for (images, asins) in tqdm.tqdm(data_loader):
with torch.no_grad():
images = images.to(device)
outs = model(images)
(top_scores, top_outs) = outs.topk(dim=1, k=(topk * 2), largest=True)
for j in range(images.size(0)):
top_out_tags = [idx2attr[top_outs[(j, m)].item()] for m in range((topk * 2))]
prediction[asins[j]] = {'predict': top_out_tags, 'pred_score': top_scores[j].cpu().numpy().tolist()}
return prediction |
class SimpleGaussianGRUModel(Model):
def __init__(self, output_dim, hidden_dim, name='SimpleGaussianGRUModel', *args, **kwargs):
super().__init__(name)
self.output_dim = output_dim
self.hidden_dim = hidden_dim
def network_input_spec(self):
return ['full_input', 'step_input', 'step_hidden_input']
def network_output_spec(self):
return ['mean', 'step_mean', 'log_std', 'step_log_std', 'step_hidden', 'init_hidden', 'dist']
def _build(self, obs_input, step_obs_input, step_hidden, step_cell, name=None):
return_var = tf.compat.v1.get_variable('return_var', (), initializer=tf.constant_initializer(0.5))
mean = log_std = tf.fill((tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim), return_var)
step_mean = step_log_std = tf.fill((tf.shape(step_obs_input)[0], self.output_dim), return_var)
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden', shape=(self.hidden_dim,), initializer=tf.zeros_initializer(), trainable=False, dtype=tf.float32)
dist = DiagonalGaussian(self.output_dim)
return (mean, step_mean, log_std, step_log_std, step_hidden, hidden_init_var, dist) |
def _dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None):
_Pickler(file, protocol, fix_imports=fix_imports, buffer_callback=buffer_callback).dump(obj) |
def CheckCheck(filename, clean_lines, linenum, error):
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if (not check_macro):
return
(last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum, start_pos)
if (end_pos < 0):
return
if (not Match('\\s*;', last_line[end_pos:])):
return
if (linenum == end_line):
expression = lines[linenum][(start_pos + 1):(end_pos - 1)]
else:
expression = lines[linenum][(start_pos + 1):]
for i in xrange((linenum + 1), end_line):
expression += lines[i]
expression += last_line[0:(end_pos - 1)]
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match('^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||==|!=|>=|>|<=|<|\\()(.*)$', expression)
if matched:
token = matched.group(1)
if (token == '('):
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if (end < 0):
return
lhs += ('(' + expression[0:end])
expression = expression[end:]
elif (token in ('&&', '||')):
return
elif (token in ('<<', '<<=', '>>', '>>=', '->*', '->')):
lhs += token
expression = matched.group(2)
else:
operator = token
rhs = matched.group(2)
break
else:
matched = Match('^([^-=!<>()&|]+)(.*)$', expression)
if (not matched):
matched = Match('^(\\s*\\S)(.*)$', expression)
if (not matched):
break
lhs += matched.group(1)
expression = matched.group(2)
if (not (lhs and operator and rhs)):
return
if ((rhs.find('&&') > (- 1)) or (rhs.find('||') > (- 1))):
return
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = '^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\\\'.*\\\')$'
if (Match(match_constant, lhs) or Match(match_constant, rhs)):
error(filename, linenum, 'readability/check', 2, ('Consider using %s instead of %s(a %s b)' % (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator))) |
class testset_pytable_with_soft_label(Dataset):
def __init__(self, test_h5file_name, show=False, outname=None):
self.outname = outname
self.ratio_list = [0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2]
self.test_h5file_name = test_h5file_name
self.return_dict = {}
if (isinstance(test_h5file_name, list) and (len(test_h5file_name) == 2)):
self.data = None
print('two pytable files for dcp inference')
with pytables.open_file(test_h5file_name[0], mode='r') as h5file1:
with pytables.open_file(test_h5file_name[1], mode='r') as h5file2:
self.len1 = len(h5file1.get_node('/data'))
self.len2 = len(h5file2.get_node('/data'))
self.len = (self.len1 + self.len2)
else:
self.data = None
with pytables.open_file(test_h5file_name, mode='r') as h5file:
self.len = len(h5file.get_node('/data'))
def __getitem__(self, idx):
if (isinstance(self.test_h5file_name, list) and (len(self.test_h5file_name) == 2)):
print('two pytable files for dcp inference')
if (idx in range(0, self.len1)):
if (self.data == None):
self.data = pytables.open_file(self.test_h5file_name[0], mode='r').get_node('/data')
data_dict = self.digest_data(idx)
elif (idx in range(self.len1, (self.len1 + self.len2))):
if (self.data == None):
self.data = pytables.open_file(self.test_h5file_name[1], mode='r').get_node('/data')
data_dict = self.digest_data((idx - self.len2))
try:
return {'src_flat': data_dict['src_flat'], 'tgt_flat': data_dict['tgt_flat'], 'label_flat': data_dict['label_flat'], 'scores_flat': data_dict['scores_flat'], 'sl_002': data_dict['sl_002'], 'sl_004': data_dict['sl_004'], 'sl_006': data_dict['sl_006'], 'sl_008': data_dict['sl_008'], 'sl_01': data_dict['sl_01'], 'sl_012': data_dict['sl_012'], 'sl_014': data_dict['sl_014'], 'sl_016': data_dict['sl_016'], 'sl_018': data_dict['sl_018'], 'sl_02': data_dict['sl_02']}
except:
return {'src_flat': data_dict['src_flat'], 'tgt_flat': data_dict['tgt_flat'], 'label_flat': data_dict['label_flat'], 'sl_002': data_dict['sl_002'], 'sl_004': data_dict['sl_004'], 'sl_006': data_dict['sl_006'], 'sl_008': data_dict['sl_008'], 'sl_01': data_dict['sl_01'], 'sl_012': data_dict['sl_012'], 'sl_014': data_dict['sl_014'], 'sl_016': data_dict['sl_016'], 'sl_018': data_dict['sl_018'], 'sl_02': data_dict['sl_02']}
else:
if (self.data == None):
self.data = pytables.open_file(self.test_h5file_name, mode='r').get_node('/data')
data_dict = self.digest_data(idx)
try:
return {'src_flat': data_dict['src_flat'], 'tgt_flat': data_dict['tgt_flat'], 'label_flat': data_dict['label_flat'], 'scores_flat': data_dict['scores_flat'], 'sl_002': data_dict['sl_002'], 'sl_004': data_dict['sl_004'], 'sl_006': data_dict['sl_006'], 'sl_008': data_dict['sl_008'], 'sl_01': data_dict['sl_01'], 'sl_012': data_dict['sl_012'], 'sl_014': data_dict['sl_014'], 'sl_016': data_dict['sl_016'], 'sl_018': data_dict['sl_018'], 'sl_02': data_dict['sl_02']}
except:
return {'src_flat': data_dict['src_flat'], 'tgt_flat': data_dict['tgt_flat'], 'label_flat': data_dict['label_flat'], 'sl_002': data_dict['sl_002'], 'sl_004': data_dict['sl_004'], 'sl_006': data_dict['sl_006'], 'sl_008': data_dict['sl_008'], 'sl_01': data_dict['sl_01'], 'sl_012': data_dict['sl_012'], 'sl_014': data_dict['sl_014'], 'sl_016': data_dict['sl_016'], 'sl_018': data_dict['sl_018'], 'sl_02': data_dict['sl_02']}
def __len__(self):
return self.len
def digest_data(self, idx):
assert (self.data != None)
key_set = ['src_flat', 'tgt_flat', 'label_flat', 'scores_flat']
for each_ratio in self.ratio_list:
key_name = ('sl_' + str(each_ratio).replace('.', ''))
key_set.append(key_name)
for name_ in key_set:
if ((name_ == 'src_flat') or (name_ == 'tgt_flat')):
self.return_dict[name_] = self.data[idx][name_].reshape([1024, 3])
else:
if (name_ == 'scores_flat'):
try:
self.return_dict[name_] = self.data[idx][name_].reshape([1024, 1024])
except:
continue
self.return_dict[name_] = self.data[idx][name_].reshape([1024, 1024])
return self.return_dict
def corr_to_list(self, corr_matrix):
single_corr_list = []
try:
pair = torch.nonzero(corr_matrix.clone().detach(), as_tuple=False)
except:
pair = torch.nonzero(corr_matrix.clone().detach())
for each in pair.tolist():
single_corr_list.append(tuple(each))
return single_corr_list
def label_ACC_percentage_for_inference(self, label_in, label_gt, pinput1, input2, name, sample_n_to_visualize=None):
assert (label_in.shape == label_gt.shape)
bsize = label_in.shape[0]
b_acc = []
for i in range(bsize):
element_product = torch.mul(label_in[i], label_gt[i])
N1 = label_in[i].shape[0]
sum_row = torch.sum(element_product, dim=(- 1))
idx = torch.nonzero(sum_row).squeeze()
if (idx.ndimension() == 0):
idx = [idx]
p2p_corr_matrix = label_in[i].clone()
single_all_corr_list = self.corr_to_list(p2p_corr_matrix)
name_out = (((name + '_') + str(i)) + '_out.png')
hit = (sum_row != 0).sum()
acc = (hit.float() / torch.tensor(N1).float())
b_acc.append((acc * 100.0))
mean = torch.mean(torch.stack(b_acc))
return mean |
class SARPN(nn.Module):
def __init__(self, args):
super(SARPN, self).__init__()
print('backbone:', args.backbone)
self.feature_extraction = get_models(args)
if (args.backbone in ['ResNet18', 'ResNet34']):
adff_num_features = 640
rpd_num_features = 512
block_channel = [64, 64, 128, 256, 512]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['ResNet50', 'ResNet101', 'ResNet152']):
adff_num_features = 1280
rpd_num_features = 2048
block_channel = [64, 256, 512, 1024, 2048]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['DenseNet121']):
adff_num_features = 640
rpd_num_features = 1024
block_channel = [64, 128, 256, 512, 1024]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['DenseNet161']):
adff_num_features = 1280
rpd_num_features = 2048
block_channel = [96, 192, 384, 1056, 2208]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['DenseNet169']):
adff_num_features = 1280
rpd_num_features = 2048
block_channel = [64, 128, 256, 640, 1664]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['DenseNet201']):
adff_num_features = 1280
rpd_num_features = 2048
block_channel = [64, 128, 256, 896, 1920]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['SENet154']):
adff_num_features = 1280
rpd_num_features = 2048
block_channel = [128, 256, 512, 1024, 2048]
top_num_features = block_channel[(- 1)]
if (args.backbone in ['SE_ResNet50', 'SE_ResNet101', 'SE_ResNet152', 'SE_ResNext50_32x4d', 'SE_ResNext101_32x4d']):
adff_num_features = 1280
rpd_num_features = 2048
block_channel = [64, 256, 512, 1024, 2048]
top_num_features = block_channel[(- 1)]
self.residual_pyramid_decoder = modules.RPD(rpd_num_features, top_num_features)
self.adaptive_dense_feature_fusion = modules.ADFF(block_channel, adff_num_features, rpd_num_features)
def forward(self, x):
feature_pyramid = self.feature_extraction(x)
fused_feature_pyramid = self.adaptive_dense_feature_fusion(feature_pyramid)
multiscale_depth = self.residual_pyramid_decoder(feature_pyramid, fused_feature_pyramid)
return multiscale_depth |
class SFUniDADataset(Dataset):
def __init__(self, args, data_dir, data_list, d_type, preload_flg=True) -> None:
super(SFUniDADataset, self).__init__()
self.d_type = d_type
self.dataset = args.dataset
self.preload_flg = preload_flg
self.shared_class_num = args.shared_class_num
self.source_private_class_num = args.source_private_class_num
self.target_private_class_num = args.target_private_class_num
self.shared_classes = [i for i in range(args.shared_class_num)]
self.source_private_classes = [(i + args.shared_class_num) for i in range(args.source_private_class_num)]
if ((args.dataset == 'Office') and (args.target_label_type == 'OSDA')):
self.target_private_classes = [(((i + args.shared_class_num) + args.source_private_class_num) + 10) for i in range(args.target_private_class_num)]
else:
self.target_private_classes = [((i + args.shared_class_num) + args.source_private_class_num) for i in range(args.target_private_class_num)]
self.source_classes = (self.shared_classes + self.source_private_classes)
self.target_classes = (self.shared_classes + self.target_private_classes)
self.data_dir = data_dir
self.data_list = [item.strip().split() for item in data_list]
if (self.d_type == 'source'):
self.data_list = [item for item in self.data_list if (int(item[1]) in self.source_classes)]
else:
self.data_list = [item for item in self.data_list if (int(item[1]) in self.target_classes)]
self.pre_loading()
self.train_transform = train_transform()
self.test_transform = test_transform()
def pre_loading(self):
if (('Office' in self.dataset) and self.preload_flg):
self.resize_trans = transforms.Resize((256, 256))
print('Dataset Pre-Loading Started ....')
self.img_list = [self.resize_trans(Image.open(os.path.join(self.data_dir, item[0])).convert('RGB')) for item in tqdm(self.data_list, ncols=60)]
print('Dataset Pre-Loading Done!')
else:
pass
def load_img(self, img_idx):
(img_f, img_label) = self.data_list[img_idx]
if (('Office' in self.dataset) and self.preload_flg):
img = self.img_list[img_idx]
else:
img = Image.open(os.path.join(self.data_dir, img_f)).convert('RGB')
return (img, img_label)
def __len__(self):
return len(self.data_list)
def __getitem__(self, img_idx):
(img, img_label) = self.load_img(img_idx)
if (self.d_type == 'source'):
img_label = int(img_label)
else:
img_label = (int(img_label) if (int(img_label) in self.source_classes) else len(self.source_classes))
img_train = self.train_transform(img)
img_test = self.test_transform(img)
return (img_train, img_test, img_label, img_idx) |
_grad()
def convert_owlvit_checkpoint(pt_backbone, flax_params, attn_params, pytorch_dump_folder_path, config_path=None):
repo = Repository(pytorch_dump_folder_path, clone_from=f'google/{pytorch_dump_folder_path}')
repo.git_pull()
if (config_path is not None):
config = OwlViTConfig.from_pretrained(config_path)
else:
config = OwlViTConfig()
hf_backbone = OwlViTModel(config).eval()
hf_model = OwlViTForObjectDetection(config).eval()
copy_text_model_and_projection(hf_backbone, pt_backbone)
copy_vision_model_and_projection(hf_backbone, pt_backbone)
hf_backbone.logit_scale = pt_backbone.logit_scale
copy_flax_attn_params(hf_backbone, attn_params)
hf_model.owlvit = hf_backbone
copy_class_merge_token(hf_model, flax_params)
copy_class_box_heads(hf_model, flax_params)
hf_model.save_pretrained(repo.local_dir)
feature_extractor = OwlViTFeatureExtractor(size=config.vision_config.image_size, crop_size=config.vision_config.image_size)
tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32', pad_token='!', model_max_length=16)
processor = OwlViTProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
feature_extractor.save_pretrained(repo.local_dir)
processor.save_pretrained(repo.local_dir)
repo.git_add()
repo.git_commit('Upload model and processor')
repo.git_push() |
def RRSE_torch(pred, true, mask_value=None):
if (mask_value != None):
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return (torch.sqrt(torch.sum(((pred - true) ** 2))) / torch.sqrt(torch.sum(((pred - true.mean()) ** 2)))) |
class VideoDownloader(VideoCompressor):
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if (not os.path.isfile(output_file)):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
cmd = ((('wget -O' + output_file) + ' ') + video_path)
os.system(cmd)
return {'video': None, 'input': video_path, 'output': output_file} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.