code stringlengths 101 5.91M |
|---|
def relatively_safe_pickle_dump(obj, path, compression=False):
temp_storage = (path + '.relatively_safe')
if compression:
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, 'w', compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, 'data')
else:
with open(temp_storage, 'wb') as f:
pickle.dump(obj, f)
os.rename(temp_storage, path) |
def windows(x, window_size, window_stride=1):
x = x.unfold(1, window_size, window_stride)
dims = list(range(x.ndim))[:(- 1)]
dims.insert(2, (x.ndim - 1))
x = x.permute(dims)
return x |
def init_nets(net_configs, dropout_p, n_parties, args):
nets = {net_i: None for net_i in range(n_parties)}
if (args.dataset in {'mnist', 'cifar10', 'svhn', 'fmnist'}):
n_classes = 10
elif (args.dataset == 'celeba'):
n_classes = 2
elif (args.dataset == 'cifar100'):
n_classes = 100
elif (args.dataset == 'tinyimagenet'):
n_classes = 200
elif (args.dataset == 'femnist'):
n_classes = 62
elif (args.dataset == 'emnist'):
n_classes = 47
elif (args.dataset in {'a9a', 'covtype', 'rcv1', 'SUSY'}):
n_classes = 2
if args.use_projection_head:
add = ''
if (('mnist' in args.dataset) and (args.model == 'simple-cnn')):
add = '-mnist'
for net_i in range(n_parties):
net = ModelFedCon((args.model + add), args.out_dim, n_classes, net_configs)
nets[net_i] = net
elif (args.alg == 'moon'):
add = ''
if (('mnist' in args.dataset) and (args.model == 'simple-cnn')):
add = '-mnist'
for net_i in range(n_parties):
net = ModelFedCon_noheader((args.model + add), args.out_dim, n_classes, net_configs)
nets[net_i] = net
else:
for net_i in range(n_parties):
if (args.dataset == 'generated'):
net = PerceptronModel()
elif (args.model == 'mlp'):
if (args.dataset == 'covtype'):
input_size = 54
output_size = 2
hidden_sizes = [32, 16, 8]
elif (args.dataset == 'a9a'):
input_size = 123
output_size = 2
hidden_sizes = [32, 16, 8]
elif (args.dataset == 'rcv1'):
input_size = 47236
output_size = 2
hidden_sizes = [32, 16, 8]
elif (args.dataset == 'SUSY'):
input_size = 18
output_size = 2
hidden_sizes = [16, 8]
net = FcNet(input_size, hidden_sizes, output_size, dropout_p)
elif (args.model == 'vgg'):
net = vgg11()
elif (args.model == 'simple-cnn'):
if (args.dataset in ('cifar10', 'cinic10', 'svhn')):
net = SimpleCNN(input_dim=((16 * 5) * 5), hidden_dims=[120, 84], output_dim=10)
elif (args.dataset in ('mnist', 'femnist', 'fmnist')):
net = SimpleCNNMNIST(input_dim=((16 * 4) * 4), hidden_dims=[120, 84], output_dim=10)
elif (args.dataset == 'celeba'):
net = SimpleCNN(input_dim=((16 * 5) * 5), hidden_dims=[120, 84], output_dim=2)
elif (args.model == 'vgg-9'):
if (args.dataset in ('mnist', 'femnist')):
net = ModerateCNNMNIST()
elif (args.dataset in ('cifar10', 'cinic10', 'svhn')):
net = ModerateCNN()
elif (args.dataset == 'celeba'):
net = ModerateCNN(output_dim=2)
elif (args.model == 'resnet'):
net = ResNet50_cifar10()
elif (args.model == 'vgg16'):
net = vgg16()
else:
print('not supported yet')
exit(1)
nets[net_i] = net
model_meta_data = []
layer_type = []
for (k, v) in nets[0].state_dict().items():
model_meta_data.append(v.shape)
layer_type.append(k)
return (nets, model_meta_data, layer_type) |
def generate_itemset(row_count, max_per_basket, num_freq_sets, item_count, prob_frequent):
pop_frequent = [('F' + str(n)) for n in range(0, max_per_basket)]
pop_regular = [('I' + str(n)) for n in range(max_per_basket, item_count)]
freq_itemsets = []
filename = (((((str(prob_frequent) + '_tsz') + str(max_per_basket)) + '_tct') + sizeof_fmt(row_count)) + '.txt')
for i in tqdm(range(num_freq_sets), desc=f'{filename}:pass 1/2'):
cnt = random.randint(1, max_per_basket)
freq_itemsets.append(random.sample(pop_frequent, cnt))
with open(filename, 'w') as f:
for i in tqdm(range(row_count), desc=f'{filename}:pass 2/2'):
line = []
cnt = random.randint(1, max_per_basket)
if (random.random() <= prob_frequent):
idx = random.randint(0, (len(freq_itemsets) - 1))
for j in range(len(freq_itemsets[idx])):
line.append(freq_itemsets[idx][j])
needed = max(0, (cnt - len(line)))
line = (line + random.sample(pop_regular, needed))
f.write((' '.join(line) + '\n')) |
class CoExNet(BaseModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) |
class ModelOutputTester(unittest.TestCase):
def test_get_attributes(self):
x = ModelOutputTest(a=30)
self.assertEqual(x.a, 30)
self.assertIsNone(x.b)
self.assertIsNone(x.c)
with self.assertRaises(AttributeError):
_ = x.d
def test_index_with_ints_and_slices(self):
x = ModelOutputTest(a=30, b=10)
self.assertEqual(x[0], 30)
self.assertEqual(x[1], 10)
self.assertEqual(x[:2], (30, 10))
self.assertEqual(x[:], (30, 10))
x = ModelOutputTest(a=30, c=10)
self.assertEqual(x[0], 30)
self.assertEqual(x[1], 10)
self.assertEqual(x[:2], (30, 10))
self.assertEqual(x[:], (30, 10))
def test_index_with_strings(self):
x = ModelOutputTest(a=30, b=10)
self.assertEqual(x['a'], 30)
self.assertEqual(x['b'], 10)
with self.assertRaises(KeyError):
_ = x['c']
x = ModelOutputTest(a=30, c=10)
self.assertEqual(x['a'], 30)
self.assertEqual(x['c'], 10)
with self.assertRaises(KeyError):
_ = x['b']
def test_dict_like_properties(self):
x = ModelOutputTest(a=30)
self.assertEqual(list(x.keys()), ['a'])
self.assertEqual(list(x.values()), [30])
self.assertEqual(list(x.items()), [('a', 30)])
self.assertEqual(list(x), ['a'])
x = ModelOutputTest(a=30, b=10)
self.assertEqual(list(x.keys()), ['a', 'b'])
self.assertEqual(list(x.values()), [30, 10])
self.assertEqual(list(x.items()), [('a', 30), ('b', 10)])
self.assertEqual(list(x), ['a', 'b'])
x = ModelOutputTest(a=30, c=10)
self.assertEqual(list(x.keys()), ['a', 'c'])
self.assertEqual(list(x.values()), [30, 10])
self.assertEqual(list(x.items()), [('a', 30), ('c', 10)])
self.assertEqual(list(x), ['a', 'c'])
with self.assertRaises(Exception):
x = x.update({'d': 20})
with self.assertRaises(Exception):
del x['a']
with self.assertRaises(Exception):
_ = x.pop('a')
with self.assertRaises(Exception):
_ = x.setdefault('d', 32)
def test_set_attributes(self):
x = ModelOutputTest(a=30)
x.a = 10
self.assertEqual(x.a, 10)
self.assertEqual(x['a'], 10)
def test_set_keys(self):
x = ModelOutputTest(a=30)
x['a'] = 10
self.assertEqual(x.a, 10)
self.assertEqual(x['a'], 10) |
class TFCvtSelfOutput(tf.keras.layers.Layer):
def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: float, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='dense')
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
hidden_state = self.dense(inputs=hidden_state)
hidden_state = self.dropout(inputs=hidden_state, training=training)
return hidden_state |
class TestCityscapesDataset(unittest.TestCase):
def setUp(self) -> None:
image1 = {'file_name': 'munster/munster_000102_000019_leftImg8bit.png', 'height': 1024, 'width': 2048, 'segm_file': 'munster/munster_000102_000019_gtFine_labelIds.png', 'id': 0}
image2 = {'file_name': 'munster/munster_000157_000019_leftImg8bit.png', 'height': 1024, 'width': 2048, 'segm_file': 'munster/munster_000157_000019_gtFine_labelIds.png', 'id': 1}
image3 = {'file_name': 'munster/munster_000139_000019_leftImg8bit.png', 'height': 1024, 'width': 2048, 'segm_file': 'munster/munster_000139_000019_gtFine_labelIds.png', 'id': 2}
image4 = {'file_name': 'munster/munster_000034_000019_leftImg8bit.png', 'height': 31, 'width': 15, 'segm_file': 'munster/munster_000034_000019_gtFine_labelIds.png', 'id': 3}
images = [image1, image2, image3, image4]
categories = [{'id': 24, 'name': 'person'}, {'id': 25, 'name': 'rider'}, {'id': 26, 'name': 'car'}]
annotations = [{'iscrowd': 0, 'category_id': 24, 'bbox': [379.0, 435.0, 52.0, 124.0], 'area': 2595, 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 0, 'id': 0}, {'iscrowd': 0, 'category_id': 25, 'bbox': [379.0, 435.0, 52.0, 124.0], 'area': (- 1), 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 0, 'id': 1}, {'iscrowd': 0, 'category_id': 26, 'bbox': [379.0, 435.0, (- 1), 124.0], 'area': 2, 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 0, 'id': 2}, {'iscrowd': 0, 'category_id': 24, 'bbox': [379.0, 435.0, 52.0, (- 1)], 'area': 2, 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 0, 'id': 3}, {'iscrowd': 0, 'category_id': 1, 'bbox': [379.0, 435.0, 52.0, 124.0], 'area': 2595, 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 0, 'id': 4}, {'iscrowd': 1, 'category_id': 26, 'bbox': [379.0, 435.0, 52.0, 124.0], 'area': 2595, 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 1, 'id': 5}, {'iscrowd': 0, 'category_id': 26, 'bbox': [379.0, 435.0, 10, 2], 'area': 2595, 'segmentation': {'size': [1024, 2048], 'counts': 'xxx'}, 'image_id': 3, 'id': 6}]
fake_json = {'images': images, 'annotations': annotations, 'categories': categories}
self.json_name = 'cityscapes.json'
dump(fake_json, self.json_name)
self.metainfo = dict(classes=('person', 'rider', 'car'))
def tearDown(self):
os.remove(self.json_name)
def test_cityscapes_dataset(self):
dataset = CityscapesDataset(ann_file=self.json_name, data_prefix=dict(img='imgs'), metainfo=self.metainfo, filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = CityscapesDataset(ann_file=self.json_name, data_prefix=dict(img='imgs'), metainfo=self.metainfo, test_mode=True, filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
def test_cityscapes_dataset_without_filter_cfg(self):
dataset = CityscapesDataset(ann_file=self.json_name, data_prefix=dict(img='imgs'), metainfo=self.metainfo, filter_cfg=None, pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = CityscapesDataset(ann_file=self.json_name, data_prefix=dict(img='imgs'), metainfo=self.metainfo, test_mode=True, filter_cfg=None, pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4) |
def create_valid_dataloader(ly_vocab, re_vocab, pickle_path, batch_size, num_workers):
dataset = PickleLoader(pickle_path, ly_vocab, re_vocab, mode='test')
dataloader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, collate_fn=valid_collate_func, batch_size=batch_size, shuffle=False, drop_last=False)
return dataloader |
class NegativeVideoRetrievalDataset(Dataset):
def __init__(self, split, args):
self.args = args
self.data = []
self.prompts = []
self.videos = []
self.video_durations = []
self.video_feat_dir = args.video_feature_dir
self.asr_dir = args.asr_dir
print(f'split: {split}')
with open(f'{args.data_dir}/all_data_{split}.json', 'r') as f:
data = json.load(f)
for prompt in data:
self.prompts.append(prompt)
for video in data[prompt]:
self.videos.append(video)
self.data.append({'video_id': video.replace('.mp4', ''), 'clip_feature': f'{self.video_feat_dir}/{video}.pt', 'asr': f"{self.asr_dir}/{video.replace('.mp4', '')}.srt"})
print(f'self.videos: {len(self.videos)}')
print(f'self.prompts: {len(self.prompts)}') |
def get_dst_diff(prev_d, crnt_d):
assert (len(prev_d) == len(crnt_d))
diff = {}
for ((k1, v1), (k2, v2)) in zip(prev_d.items(), crnt_d.items()):
assert (k1 == k2)
if (v1 != v2):
diff[k2] = v2
return diff |
class TextualHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config |
def test_non_local_embedded_gaussian_3d():
N = 10
C = 128
inner_channel = (C // 16)
data = torch.randn(N, C, 4, 7, 7)
model = NonLocal3DEmbeddedGaussian(in_channels=C, inner_channels=inner_channel)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (N, C, 4, 7, 7)) |
def getFrameScore(tag_pred_fname, tag_target_fname, intent_pred_fname, intent_target_fname):
hit = 0.0
sample_nb = 0.0
with open(tag_pred_fname, 'rb') as tag_fpred, open(tag_target_fname, 'rb') as tag_ftarget, open(intent_pred_fname, 'rb') as intent_fpred, open(intent_target_fname, 'rb') as intent_ftarget:
for (tag_pred, tag_target, intent_pred, intent_target) in zip(tag_fpred, tag_ftarget, intent_fpred, intent_ftarget):
sample_nb += 1.0
i_pred = sorted(set(intent_pred.split(';')))
i_target = sorted(set(intent_target.split(';')))
if ((i_pred == i_target) and (tag_pred == tag_target)):
hit += 1.0
accuracy_frame = (hit / sample_nb)
return accuracy_frame |
class MockFinder():
def find_module(self, fullname, path=None):
if (fullname in MOCK_MODULES):
return self
return None
def load_module(self, fullname):
return Mock() |
class FastFormerEncoder(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, ff_factor: int, share_qv_weights: bool, activation: str):
super(FastFormerEncoder, self).__init__()
self.attn = AdditiveAttention(input_dim, n_heads, use_bias, attn_dropout, share_qv_weights)
self.ff = FeedForward(input_dim, ff_dropout, ff_factor, activation)
self.attn_addnorm = AddNorm(input_dim, attn_dropout)
self.ff_addnorm = AddNorm(input_dim, ff_dropout)
def forward(self, X: Tensor) -> Tensor:
x = self.attn_addnorm(X, self.attn)
return self.ff_addnorm(x, self.ff) |
_module()
class MeshAdversarialDataset(Dataset):
def __init__(self, train_dataset, adversarial_dataset):
super().__init__()
self.train_dataset = build_dataset(train_dataset)
self.adversarial_dataset = build_dataset(adversarial_dataset)
self.length = len(self.train_dataset)
def __len__(self):
return self.length
def __getitem__(self, i):
data = self.train_dataset[i]
ind_adv = np.random.randint(low=0, high=len(self.adversarial_dataset), dtype=np.int)
data.update(self.adversarial_dataset[(ind_adv % len(self.adversarial_dataset))])
return data |
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
(obs, _, _, info) = env.step(env.action_space.sample())
dims = {'o': obs['observation'].shape[0], 'u': env.action_space.shape[0], 'g': obs['desired_goal'].shape[0]}
return dims |
def calculate_fid(dataset_name, generated_dir, real_dir, target_size=256):
fid = fid_score.calculate_fid_given_paths([real_dir, generated_dir], 128, 'cuda', 2048)
torch.cuda.empty_cache() |
def deserialize_vocab(src):
with open(src) as f:
d = json.load(f)
vocab = Vocabulary()
vocab.word2idx = d['word2idx']
vocab.idx2word = d['idx2word']
vocab.idx = d['idx']
return vocab |
class BulletClient(object):
def __init__(self, connection_mode=pybullet.DIRECT, options=''):
self._client = pybullet.connect(pybullet.SHARED_MEMORY)
if (self._client < 0):
print('options=', options)
self._client = pybullet.connect(connection_mode, options=options)
self._shapes = {}
def __del__(self):
try:
pybullet.disconnect(physicsClientId=self._client)
except pybullet.error:
pass
def __getattr__(self, name):
attribute = getattr(pybullet, name)
if inspect.isbuiltin(attribute):
attribute = functools.partial(attribute, physicsClientId=self._client)
return attribute |
def split_code(cfg, block_list):
code_block_list = []
for block in block_list:
code_block = ''
for node_id in block:
code = cfg[node_id]['source_code']
if ((code.find('catch') >= 0) or (code.find('finally') >= 0)):
continue
code_block += code
missing_count = (code_block.count('{') - code_block.count('}'))
if (missing_count > 0):
code_block += ('}' * missing_count)
if (code_block != ''):
code_block_list.append(code_block)
return code_block_list |
def parse_args():
desc = 'Pytorch implementation of DCShadowNet'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='[train / test]')
parser.add_argument('--dataset', type=str, default='SRD', help='dataset_name')
parser.add_argument('--datasetpath', type=str, default='/disk1/yeying/dataset/SRD', help='dataset_path')
parser.add_argument('--iteration', type=int, default=2000000, help='The number of training iterations')
parser.add_argument('--batch_size', type=int, default=1, help='The size of batch size')
parser.add_argument('--print_freq', type=int, default=1000, help='The number of image print freq')
parser.add_argument('--save_freq', type=int, default=100000, help='The number of model save freq')
parser.add_argument('--decay_flag', type=str2bool, default=True, help='The decay_flag')
parser.add_argument('--lr', type=float, default=0.0001, help='The learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='The weight decay')
parser.add_argument('--adv_weight', type=int, default=1, help='Weight for GAN')
parser.add_argument('--cycle_weight', type=int, default=10, help='Weight for Cycle')
parser.add_argument('--identity_weight', type=int, default=10, help='Weight for Identity')
parser.add_argument('--dom_weight', type=int, default=1, help='Weight for domain classification')
parser.add_argument('--ch_weight', type=int, default=1, help='Weight for shadow-free chromaticity')
parser.add_argument('--pecp_weight', type=int, default=1, help='Weight for shadow-robust feature')
parser.add_argument('--smooth_weight', type=int, default=0.01, help='Weight for boundary smoothness')
parser.add_argument('--use_ch_loss', type=str2bool, default=False, help='use shadow-free chromaticity loss')
parser.add_argument('--use_pecp_loss', type=str2bool, default=False, help='use shadow-robust feature loss')
parser.add_argument('--use_smooth_loss', type=str2bool, default=False, help='use boundary smoothness loss')
parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')
parser.add_argument('--n_res', type=int, default=4, help='The number of resblock')
parser.add_argument('--n_dis', type=int, default=6, help='The number of discriminator layer')
parser.add_argument('--img_size', type=int, default=256, help='The size of image')
parser.add_argument('--img_h', type=int, default=480, help='The org size of image')
parser.add_argument('--img_w', type=int, default=640, help='The org size of image')
parser.add_argument('--img_ch', type=int, default=3, help='The size of image channel')
parser.add_argument('--result_dir', type=str, default='results', help='Directory name to save the results')
parser.add_argument('--device', type=str, default='cuda', choices=['cpu', 'cuda'], help='Set gpu mode; [cpu, cuda]')
parser.add_argument('--benchmark_flag', type=str2bool, default=False)
parser.add_argument('--resume', type=str2bool, default=True)
parser.add_argument('--use_original_name', type=str2bool, default=False, help='use original name the same as the test images')
parser.add_argument('--im_suf_A', type=str, default='.png', help='The suffix of test images [.png / .jpg]')
return check_args(parser.parse_args()) |
class ChainMail(BaseSuit):
def __init__(self):
super().__init__('chain mail', weight=300, armour_class=5, material=M.Iron) |
class PReLUParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PRELUPARAMETER |
def ConvertImageListToNumpy(data, format='numpy', data_format='NHWC', dtype=np.uint8):
length = len(data)
images = []
for raw in data:
img = JpegToNumpy(raw)
if (data_format == 'NCHW'):
img = np.transpose(img, [2, 0, 1])
images.append(img)
if (format == 'numpy'):
images = np.array(images, dtype=dtype)
return images |
def write_gt_file(ids, id2gt, file_name):
fw = open(file_name, 'w')
for id in ids:
if (id in IDS_ERROR):
continue
fw.write(('%s\t%s\n' % (id, id2gt[id])))
fw.close() |
class StageWorld():
def __init__(self, beam_num, index, num_env):
self.index = index
self.num_env = num_env
node_name = ('StageEnv_' + str(index))
rospy.init_node(node_name, anonymous=None)
self.beam_mum = beam_num
self.laser_cb_num = 0
self.scan = None
self.self_speed = [0.0, 0.0]
self.step_goal = [0.0, 0.0]
self.step_r_cnt = 0.0
self.map_size = np.array([8.0, 8.0], dtype=np.float32)
self.goal_size = 0.5
self.robot_value = 10.0
self.goal_value = 0.0
cmd_vel_topic = (('robot_' + str(index)) + '/cmd_vel')
self.cmd_vel = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)
cmd_pose_topic = (('robot_' + str(index)) + '/cmd_pose')
self.cmd_pose = rospy.Publisher(cmd_pose_topic, Pose, queue_size=10)
object_state_topic = (('robot_' + str(index)) + '/base_pose_ground_truth')
self.object_state_sub = rospy.Subscriber(object_state_topic, Odometry, self.ground_truth_callback)
laser_topic = (('robot_' + str(index)) + '/base_scan')
self.laser_sub = rospy.Subscriber(laser_topic, LaserScan, self.laser_scan_callback)
odom_topic = (('robot_' + str(index)) + '/odom')
self.odom_sub = rospy.Subscriber(odom_topic, Odometry, self.odometry_callback)
crash_topic = (('robot_' + str(index)) + '/is_crashed')
self.check_crash = rospy.Subscriber(crash_topic, Int8, self.crash_callback)
self.sim_clock = rospy.Subscriber('clock', Clock, self.sim_clock_callback)
self.reset_stage = rospy.ServiceProxy('reset_positions', Empty)
self.speed = None
self.state = None
self.speed_GT = None
self.state_GT = None
self.is_crashed = None
while ((self.scan is None) or (self.speed is None) or (self.state is None) or (self.speed_GT is None) or (self.state_GT is None) or (self.is_crashed is None)):
pass
rospy.sleep(1.0)
def ground_truth_callback(self, GT_odometry):
Quaternious = GT_odometry.pose.pose.orientation
Euler = tf.transformations.euler_from_quaternion([Quaternious.x, Quaternious.y, Quaternious.z, Quaternious.w])
self.state_GT = [GT_odometry.pose.pose.position.x, GT_odometry.pose.pose.position.y, Euler[2]]
v_x = GT_odometry.twist.twist.linear.x
v_y = GT_odometry.twist.twist.linear.y
v = np.sqrt(((v_x ** 2) + (v_y ** 2)))
self.speed_GT = [v, GT_odometry.twist.twist.angular.z]
def laser_scan_callback(self, scan):
self.scan_param = [scan.angle_min, scan.angle_max, scan.angle_increment, scan.time_increment, scan.scan_time, scan.range_min, scan.range_max]
self.scan = np.array(scan.ranges)
self.laser_cb_num += 1
def odometry_callback(self, odometry):
Quaternions = odometry.pose.pose.orientation
Euler = tf.transformations.euler_from_quaternion([Quaternions.x, Quaternions.y, Quaternions.z, Quaternions.w])
self.state = [odometry.pose.pose.position.x, odometry.pose.pose.position.y, Euler[2]]
self.speed = [odometry.twist.twist.linear.x, odometry.twist.twist.angular.z]
def sim_clock_callback(self, clock):
self.sim_time = (clock.clock.secs + (clock.clock.nsecs / .0))
def crash_callback(self, flag):
self.is_crashed = flag.data
def get_self_stateGT(self):
return self.state_GT
def get_self_speedGT(self):
return self.speed_GT
def get_laser_observation(self):
scan = copy.deepcopy(self.scan)
scan[np.isnan(scan)] = 6.0
scan[np.isinf(scan)] = 6.0
raw_beam_num = len(scan)
sparse_beam_num = self.beam_mum
step = (float(raw_beam_num) / sparse_beam_num)
sparse_scan_left = []
index = 0.0
for x in range(int((sparse_beam_num / 2))):
sparse_scan_left.append(scan[int(index)])
index += step
sparse_scan_right = []
index = (raw_beam_num - 1.0)
for x in range(int((sparse_beam_num / 2))):
sparse_scan_right.append(scan[int(index)])
index -= step
scan_sparse = np.concatenate((sparse_scan_left, sparse_scan_right[::(- 1)]), axis=0)
return ((scan_sparse / 6.0) - 0.5)
def get_self_speed(self):
return self.speed
def get_self_state(self):
return self.state
def get_crash_state(self):
return self.is_crashed
def get_sim_time(self):
return self.sim_time
def get_local_goal(self):
[x, y, theta] = self.get_self_stateGT()
[goal_x, goal_y] = self.goal_point
local_x = (((goal_x - x) * np.cos(theta)) + ((goal_y - y) * np.sin(theta)))
local_y = (((- (goal_x - x)) * np.sin(theta)) + ((goal_y - y) * np.cos(theta)))
return [local_x, local_y]
def reset_world(self):
self.reset_stage()
self.self_speed = [0.0, 0.0]
self.step_goal = [0.0, 0.0]
self.step_r_cnt = 0.0
self.start_time = time.time()
rospy.sleep(0.5)
def generate_goal_point(self):
if ((self.index > 33) and (self.index < 44)):
self.goal_point = self.generate_random_goal()
else:
self.goal_point = get_goal_point(self.index)
self.pre_distance = 0
self.distance = copy.deepcopy(self.pre_distance)
def get_reward_and_terminate(self, t):
terminate = False
laser_scan = self.get_laser_observation()
laser_min = np.amin(laser_scan)
[x, y, theta] = self.get_self_stateGT()
[v, w] = self.get_self_speedGT()
self.pre_distance = copy.deepcopy(self.distance)
self.distance = np.sqrt((((self.goal_point[0] - x) ** 2) + ((self.goal_point[1] - y) ** 2)))
reward_g = ((self.pre_distance - self.distance) * 2.5)
reward_c = 0
reward_w = 0
result = 0
is_crash = self.get_crash_state()
if (self.distance < self.goal_size):
terminate = True
reward_g = 15
result = 'Reach Goal'
if (is_crash == 1):
terminate = True
reward_c = (- 15.0)
result = 'Crashed'
if (np.abs(w) > 1.05):
reward_w = ((- 0.1) * np.abs(w))
if (t > 200):
terminate = True
result = 'Time out'
reward = ((reward_g + reward_c) + reward_w)
return (reward, terminate, result)
def reset_pose(self):
if ((self.index > 33) and (self.index < 44)):
reset_pose = self.generate_random_pose()
else:
reset_pose = get_init_pose(self.index)
rospy.sleep(0.05)
self.control_pose(reset_pose)
[x_robot, y_robot, theta] = self.get_self_stateGT()
while ((np.abs((reset_pose[0] - x_robot)) > 0.2) or (np.abs((reset_pose[1] - y_robot)) > 0.2)):
[x_robot, y_robot, theta] = self.get_self_stateGT()
rospy.sleep(0.05)
def control_vel(self, action):
move_cmd = Twist()
move_cmd.linear.x = action[0]
move_cmd.linear.y = 0.0
move_cmd.linear.z = 0.0
move_cmd.angular.x = 0.0
move_cmd.angular.y = 0.0
move_cmd.angular.z = action[1]
self.cmd_vel.publish(move_cmd)
def control_pose(self, pose):
pose_cmd = Pose()
assert (len(pose) == 3)
pose_cmd.position.x = pose[0]
pose_cmd.position.y = pose[1]
pose_cmd.position.z = 0
qtn = tf.transformations.quaternion_from_euler(0, 0, pose[2], 'rxyz')
pose_cmd.orientation.x = qtn[0]
pose_cmd.orientation.y = qtn[1]
pose_cmd.orientation.z = qtn[2]
pose_cmd.orientation.w = qtn[3]
self.cmd_pose.publish(pose_cmd)
def generate_random_pose(self):
[x_robot, y_robot, theta] = self.get_self_stateGT()
x = np.random.uniform(9, 19)
y = np.random.uniform(0, 1)
if (y <= 0.4):
y = (- ((y * 10) + 1))
else:
y = (- ((y * 10) + 9))
dis_goal = np.sqrt((((x - x_robot) ** 2) + ((y - y_robot) ** 2)))
while ((dis_goal < 7) and (not rospy.is_shutdown())):
x = np.random.uniform(9, 19)
y = np.random.uniform(0, 1)
if (y <= 0.4):
y = (- ((y * 10) + 1))
else:
y = (- ((y * 10) + 9))
dis_goal = np.sqrt((((x - x_robot) ** 2) + ((y - y_robot) ** 2)))
theta = np.random.uniform(0, (2 * np.pi))
return [x, y, theta]
def generate_random_goal(self):
[x_robot, y_robot, theta] = self.get_self_stateGT()
x = np.random.uniform(9, 19)
y = np.random.uniform(0, 1)
if (y <= 0.4):
y = (- ((y * 10) + 1))
else:
y = (- ((y * 10) + 9))
dis_goal = np.sqrt((((x - x_robot) ** 2) + ((y - y_robot) ** 2)))
while ((dis_goal < 7) and (not rospy.is_shutdown())):
x = np.random.uniform(9, 19)
y = np.random.uniform(0, 1)
if (y <= 0.4):
y = (- ((y * 10) + 1))
else:
y = (- ((y * 10) + 9))
dis_goal = np.sqrt((((x - x_robot) ** 2) + ((y - y_robot) ** 2)))
return [x, y] |
class SklearnDataModule(pl.LightningDataModule):
name = 'sklearn'
def __init__(self, X, y, x_val=None, y_val=None, x_test=None, y_test=None, val_split: float=0.2, test_split: float=0.1, num_workers: int=0, seed: int=123, batch_size: int=16, dataset_kwargs: dict={}, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.num_workers = num_workers
self.batch_size = batch_size
self.dataset_kwargs = dataset_kwargs
self.seed = seed
(X, y) = sklearn.utils.shuffle(X, y, random_state=self.seed)
val_split = (0 if ((x_val is not None) or (y_val is not None)) else val_split)
test_split = (0 if ((x_test is not None) or (y_test is not None)) else test_split)
hold_out_split = (val_split + test_split)
if (hold_out_split > 0):
val_split = (val_split / hold_out_split)
hold_out_size = math.floor((len(X) * hold_out_split))
(x_holdout, y_holdout) = (X[:hold_out_size], y[:hold_out_size])
test_i_start = int((val_split * hold_out_size))
(x_val_hold_out, y_val_holdout) = (x_holdout[:test_i_start], y_holdout[:test_i_start])
(x_test_hold_out, y_test_holdout) = (x_holdout[test_i_start:], y_holdout[test_i_start:])
(X, y) = (X[hold_out_size:], y[hold_out_size:])
if ((x_val is None) and (y_val is None) and (val_split > 0)):
(x_val, y_val) = (x_val_hold_out, y_val_holdout)
if ((x_test is None) and (y_test is None) and (test_split > 0)):
(x_test, y_test) = (x_test_hold_out, y_test_holdout)
self._init_datasets(X, y, x_val, y_val, x_test, y_test)
def train_dataloader(self, batch_size: Optional[int]=None, **kwargs) -> DataLoader:
if (batch_size is None):
batch_size = self.batch_size
loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True, **kwargs)
return loader
def val_dataloader(self, batch_size: Optional[int]=None, **kwargs) -> DataLoader:
if (batch_size is None):
batch_size = self.batch_size
loader = DataLoader(self.val_dataset, batch_size=batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, **kwargs)
return loader
def test_dataloader(self, batch_size: Optional[int]=None, **kwargs) -> DataLoader:
if (batch_size is None):
batch_size = self.batch_size
loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, **kwargs)
return loader
def eval_dataloader(self, is_eval_on_test: bool, **kwargs) -> DataLoader:
if is_eval_on_test:
return self.test_dataloader(**kwargs)
else:
return self.val_dataloader(**kwargs)
def _init_datasets(self, X: np.ndarray, y: np.ndarray, x_val: np.ndarray, y_val: np.ndarray, x_test: np.ndarray, y_test: np.ndarray) -> None:
self.train_dataset = SklearnDataset(X, y, **self.dataset_kwargs)
self.val_dataset = SklearnDataset(x_val, y_val, **self.dataset_kwargs)
self.test_dataset = SklearnDataset(x_test, y_test, **self.dataset_kwargs) |
_REGISTRY.register()
def mobilenet_v2(norm_layer=nn.BatchNorm2d):
return MobileNetV2(norm_layer=norm_layer) |
def parse_args():
parser = ArgumentParser(description='Valid all models in model-index.yml')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
parser.add_argument('--checkpoint_root', help='Checkpoint file root path. If set, load checkpoint before test.')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--models', nargs='+', help='models name to inference')
parser.add_argument('--batch-size', type=int, default=1, help='The batch size during the inference.')
parser.add_argument('--flops', action='store_true', help='Get Flops and Params of models')
parser.add_argument('--flops-str', action='store_true', help='Output FLOPs and params counts in a string form.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--size_divisor', type=int, default=32, help='Pad the input image, the minimum size that is divisible by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args |
class DefaultResourceLimits(object):
CPU_LIMIT = 100
MEMORY_LIMIT = '102400Mi'
GPU_LIMIT = 4 |
def send_mail(message, subject, email_id):
try:
subprocess.Popen('echo "{message}" | mail -s "{subject}" {email}'.format(message=message, subject=subject, email=email_id), shell=True)
except Exception as e:
logger.info('Unable to send mail due to error:\n {error}'.format(error=str(e)))
pass |
def import_conllu(compressed_corpus):
train_sents = {}
dev_sents = {}
test_sents = {}
tar = tarfile.open(compressed_corpus, 'r:gz')
sent = ''
sent_id = ''
for fname in tar.getmembers():
if ('.conllu' in fname.path):
content = tar.extractfile(fname)
for line in content:
line = line.decode('utf8')
if (line == '\n'):
if (len(sent) > 0):
if ('train' in fname.path):
train_sents[sent_id] = sent
elif ('dev' in fname.path):
dev_sents[sent_id] = sent
elif ('test' in fname.path):
test_sents[sent_id] = sent
sent = ''
sent_id = ''
elif line.startswith('# sent_id ='):
sent_id = line.strip().split(' = ')[(- 1)]
elif line.startswith('#'):
pass
else:
sent += line
return (train_sents, dev_sents, test_sents) |
def _parse_string_to_float_array(string):
if (not string):
return []
parsed_string = string.strip('[] ')
if (not parsed_string):
return []
return [(float(x) if x.strip() else 'NaN') for x in parsed_string.split(',')] |
def convert_opus_name_to_hf_name(x):
for (substr, grp_name) in GROUPS:
x = x.replace(substr, grp_name)
return x.replace('+', '_') |
class Decoder(nn.Module):
def __init__(self, c_in=512, c_out=513, c_h=512, c_a=8, emb_size=128, ns=0.2):
super(Decoder, self).__init__()
self.ns = ns
self.conv1 = nn.Conv1d(c_in, (2 * c_h), kernel_size=3)
self.conv2 = nn.Conv1d(c_h, c_h, kernel_size=3)
self.conv3 = nn.Conv1d(c_h, (2 * c_h), kernel_size=3)
self.conv4 = nn.Conv1d(c_h, c_h, kernel_size=3)
self.conv5 = nn.Conv1d(c_h, (2 * c_h), kernel_size=3)
self.conv6 = nn.Conv1d(c_h, c_h, kernel_size=3)
self.dense1 = nn.Linear(c_h, c_h)
self.dense2 = nn.Linear(c_h, c_h)
self.dense3 = nn.Linear(c_h, c_h)
self.dense4 = nn.Linear(c_h, c_h)
self.RNN = nn.GRU(input_size=c_h, hidden_size=(c_h // 2), num_layers=1, bidirectional=True)
self.dense5 = nn.Linear(((2 * c_h) + c_h), c_h)
self.linear = nn.Linear(c_h, c_out)
self.ins_norm1 = nn.InstanceNorm1d(c_h)
self.ins_norm2 = nn.InstanceNorm1d(c_h)
self.ins_norm3 = nn.InstanceNorm1d(c_h)
self.ins_norm4 = nn.InstanceNorm1d(c_h)
self.ins_norm5 = nn.InstanceNorm1d(c_h)
self.emb1 = nn.Embedding(c_a, c_h)
self.emb2 = nn.Embedding(c_a, c_h)
self.emb3 = nn.Embedding(c_a, c_h)
self.emb4 = nn.Embedding(c_a, c_h)
self.emb5 = nn.Embedding(c_a, c_h)
def conv_block(self, x, conv_layers, norm_layer, emb, res=True):
x_add = (x + emb.view(emb.size(0), emb.size(1), 1))
out = pad_layer(x_add, conv_layers[0])
out = F.leaky_relu(out, negative_slope=self.ns)
out = pixel_shuffle_1d(out, upscale_factor=2)
out = (out + emb.view(emb.size(0), emb.size(1), 1))
out = pad_layer(out, conv_layers[1])
out = F.leaky_relu(out, negative_slope=self.ns)
out = norm_layer(out)
if res:
x_up = upsample(x, scale_factor=2)
out = (out + x_up)
return out
def dense_block(self, x, emb, layers, norm_layer, res=True):
out = x
for layer in layers:
out = (out + emb.view(emb.size(0), emb.size(1), 1))
out = linear(out, layer)
out = F.leaky_relu(out, negative_slope=self.ns)
out = norm_layer(out)
if res:
out = (out + x)
return out
def forward(self, x, c):
out = self.conv_block(x, [self.conv1, self.conv2], self.ins_norm1, self.emb1(c), res=True)
out = self.conv_block(out, [self.conv3, self.conv4], self.ins_norm2, self.emb2(c), res=True)
out = self.conv_block(out, [self.conv5, self.conv6], self.ins_norm3, self.emb3(c), res=True)
out = self.dense_block(out, self.emb4(c), [self.dense1, self.dense2], self.ins_norm4, res=True)
out = self.dense_block(out, self.emb4(c), [self.dense3, self.dense4], self.ins_norm5, res=True)
emb = self.emb5(c)
out_add = (out + emb.view(emb.size(0), emb.size(1), 1))
out_rnn = RNN(out_add, self.RNN)
out = torch.cat([out, out_rnn], dim=1)
out = append_emb(self.emb5(c), out.size(2), out)
out = linear(out, self.dense5)
out = F.leaky_relu(out, negative_slope=self.ns)
out = linear(out, self.linear)
return out |
def noam_norm(x, epsilon=1.0, scope=None, reuse=None):
with tf.name_scope(scope, default_name='noam_norm', values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, (ndims - 1), epsilon=epsilon) * tf.sqrt(tf.to_float(shape[(- 1)]))) |
class TimeStepBatch(collections.namedtuple('TimeStepBatch', ['env_spec', 'observations', 'actions', 'rewards', 'next_observations', 'terminals', 'env_infos', 'agent_infos'])):
__slots__ = ()
def __new__(cls, env_spec, observations, actions, rewards, next_observations, terminals, env_infos, agent_infos):
inferred_batch_size = len(terminals)
if (inferred_batch_size < 1):
raise ValueError('Expected batch dimension of terminals to be greater than 1, but got length {} instead.'.format(inferred_batch_size))
first_observation = observations[0]
first_action = actions[0]
if (not env_spec.observation_space.contains(first_observation)):
if isinstance(env_spec.observation_space, (akro.Box, akro.Discrete, akro.Dict)):
if (env_spec.observation_space.flat_dim != np.prod(first_observation.shape)):
raise ValueError('observations should have the same dimensionality as the observation_space ({}), but got data with shape {} instead'.format(env_spec.observation_space.flat_dim, first_observation.shape))
else:
raise ValueError('observations must conform to observation_space {}, but got data with shape {} instead.'.format(env_spec.observation_space, first_observation.shape))
if (observations.shape[0] != inferred_batch_size):
raise ValueError('Expected batch dimension of observations to be length {}, but got length {} instead.'.format(inferred_batch_size, observations.shape[0]))
if (not env_spec.observation_space.contains(next_observations[0])):
if isinstance(env_spec.observation_space, (akro.Box, akro.Discrete, akro.Dict)):
if (env_spec.observation_space.flat_dim != np.prod(next_observations[0].shape)):
raise ValueError('next_observations should have the same dimensionality as the observation_space ({}), but got data with shape {} instead'.format(env_spec.observation_space.flat_dim, next_observations[0].shape))
else:
raise ValueError('next_observations must conform to observation_space {}, but got data with shape {} instead.'.format(env_spec.observation_space, next_observations[0].shape[0]))
if (next_observations.shape[0] != inferred_batch_size):
raise ValueError('Expected batch dimension of next_observations to be length {}, but got length {} instead.'.format(inferred_batch_size, next_observations[0].shape[0]))
if (not env_spec.action_space.contains(first_action)):
if isinstance(env_spec.action_space, (akro.Box, akro.Discrete, akro.Dict)):
if (env_spec.action_space.flat_dim != np.prod(first_action.shape)):
raise ValueError('actions should have the same dimensionality as the action_space ({}), but got data with shape {} instead'.format(env_spec.action_space.flat_dim, first_action.shape))
else:
raise ValueError('actions must conform to action_space {}, but got data with shape {} instead.'.format(env_spec.action_space, first_action.shape))
if (actions.shape[0] != inferred_batch_size):
raise ValueError('Expected batch dimension of actions to be length {}, but got length {} instead.'.format(inferred_batch_size, actions.shape[0]))
if (rewards.shape[0] != inferred_batch_size):
raise ValueError('Expected batch dimension of rewards to be length {}, but got length {} instead.'.format(inferred_batch_size, rewards.shape[0]))
if (terminals.dtype != np.bool):
raise ValueError('terminals tensor must be dtype np.bool, but got tensor of dtype {} instead.'.format(terminals.dtype))
for (key, val) in env_infos.items():
if (not isinstance(val, (dict, np.ndarray))):
raise ValueError('Each entry in env_infos must be a numpy array or dictionary, but got key {} with value type {} instead.'.format(key, type(val)))
if (isinstance(val, np.ndarray) and (val.shape[0] != inferred_batch_size)):
raise ValueError('Each entry in env_infos must have a batch dimension of length {}, but got key {} with batch size {} instead.'.format(inferred_batch_size, key, val.shape[0]))
for (key, val) in agent_infos.items():
if (not isinstance(val, (dict, np.ndarray))):
raise ValueError('Each entry in agent_infos must be a numpy array or dictionary, but got key {} with value type {} instead.instead'.format(key, type(val)))
if (isinstance(val, np.ndarray) and (val.shape[0] != inferred_batch_size)):
raise ValueError('Each entry in agent_infos must have a batch dimension of length {}, but got key {} with batch size {} instead.'.format(inferred_batch_size, key, val.shape[0]))
return super().__new__(TimeStepBatch, env_spec, observations, actions, rewards, next_observations, terminals, env_infos, agent_infos)
def concatenate(cls, *batches):
if (len(batches) < 1):
raise ValueError('Please provide at least one TimeStepBatch to concatenate')
env_infos = {k: np.concatenate([b.env_infos[k] for b in batches]) for k in batches[0].env_infos.keys()}
agent_infos = {k: np.concatenate([b.agent_infos[k] for b in batches]) for k in batches[0].agent_infos.keys()}
return cls(batches[0].env_spec, np.concatenate([batch.observations for batch in batches]), np.concatenate([batch.actions for batch in batches]), np.concatenate([batch.rewards for batch in batches]), np.concatenate([batch.next_observations for batch in batches]), np.concatenate([batch.terminals for batch in batches]), env_infos, agent_infos)
def split(self):
time_steps = []
for i in range(len(self.terminals)):
time_step = TimeStepBatch(env_spec=self.env_spec, observations=np.asarray([self.observations[i]]), actions=np.asarray([self.actions[i]]), rewards=np.asarray([self.rewards[i]]), next_observations=np.asarray([self.next_observations[i]]), terminals=np.asarray([self.terminals[i]]), env_infos={k: np.asarray([v[i]]) for (k, v) in self.env_infos.items()}, agent_infos={k: np.asarray([v[i]]) for (k, v) in self.agent_infos.items()})
time_steps.append(time_step)
return time_steps
def to_time_step_list(self):
samples = []
for i in range(len(self.terminals)):
samples.append({'observations': np.asarray([self.observations[i]]), 'actions': np.asarray([self.actions[i]]), 'rewards': np.asarray([self.rewards[i]]), 'next_observations': np.asarray([self.next_observations[i]]), 'terminals': np.asarray([self.terminals[i]]), 'env_infos': {k: np.asarray([v[i]]) for (k, v) in self.env_infos.items()}, 'agent_infos': {k: np.asarray([v[i]]) for (k, v) in self.agent_infos.items()}})
return samples
def from_time_step_list(cls, env_spec, ts_samples):
if (len(ts_samples) < 1):
raise ValueError('Please provide at least one dict')
ts_batches = [TimeStepBatch(env_spec=env_spec, observations=sample['observations'], actions=sample['actions'], rewards=sample['rewards'], next_observations=sample['next_observations'], terminals=sample['terminals'], env_infos=sample['env_infos'], agent_infos=sample['agent_infos']) for sample in ts_samples]
return TimeStepBatch.concatenate(*ts_batches) |
def entanglement_of_formation(state, d0, d1=None):
state = np.array(state)
if (d1 is None):
d1 = int((len(state) / d0))
if ((state.ndim == 2) and (len(state) == 4) and (d0 == 2) and (d1 == 2)):
return __eof_qubit(state)
elif (state.ndim == 1):
if (d0 < d1):
tr = [1]
else:
tr = [0]
state = partial_trace(state, tr, dimensions=[d0, d1])
return entropy(state)
else:
print('Input must be a state-vector or 2-qubit density matrix.')
return None |
_arg_scope
def max_pool2d(inputs, kernel_size, stride=2, padding='VALID', data_format=DATA_FORMAT_NHWC, outputs_collections=None, scope=None):
if (data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC)):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if (data_format and data_format.startswith('NC')) else 'channels_last')
layer = pooling_layers.MaxPooling2D(pool_size=kernel_size, strides=stride, padding=padding, data_format=df, _scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs) |
class SparseMaxPoolFunction(Function):
def forward(ctx, features, indice_pairs, indice_pair_num, num_activate_out):
out = ops.indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out)
ctx.save_for_backward(indice_pairs, indice_pair_num, features, out)
return out
def backward(ctx, grad_output):
(indice_pairs, indice_pair_num, features, out) = ctx.saved_tensors
input_bp = ops.indice_maxpool_backward(features, out, grad_output, indice_pairs, indice_pair_num)
return (input_bp, None, None, None) |
def get_relative_speed_state(ego_speed, other_speed):
relative_speed = (other_speed - ego_speed)
if (relative_speed < (- 15)):
return 0
elif (relative_speed < (- 5)):
return 1
elif (relative_speed < 0):
return 2
elif (relative_speed < 5):
return 3
elif (relative_speed < 15):
return 4
else:
return 5 |
def download_and_prepare(name, root):
print('Dataset: {}'.format(name))
print('Root: {}'.format(root))
if (name == 'cifar10'):
train = CIFAR10(root, train=True, download=True)
test = CIFAR10(root, train=False)
elif (name == 'cifar100'):
train = CIFAR100(root, train=True, download=True)
test = CIFAR100(root, train=False)
elif (name == 'svhn'):
train = SVHN(root, split='train', download=True)
test = SVHN(root, split='test', download=True)
else:
raise ValueError
train_dir = osp.join(root, name, 'train')
test_dir = osp.join(root, name, 'test')
extract_and_save_image(train, train_dir)
extract_and_save_image(test, test_dir) |
class AverageMeterCollection():
def __init__(self):
self._batch_count = 0
self.n = 0
self._meters = collections.defaultdict(AverageMeter)
def update(self, metrics, n=1):
self._batch_count += 1
self.n += n
for (metric, value) in metrics.items():
self._meters[metric].update(value, n=n)
def summary(self, sync_stats=False, dist_backend=None):
stats = {BATCH_COUNT: self._batch_count, NUM_SAMPLES: self.n}
for (metric, meter) in self._meters.items():
if sync_stats:
world_size = dist_backend.get_world_size()
avg = torch.tensor(meter.avg)
dist_backend.all_reduce(avg)
last_val = torch.tensor(meter.val)
dist_backend.all_reduce(last_val)
avg = (avg.item() / world_size)
last_val = (last_val.item() / world_size)
else:
avg = meter.avg
last_val = meter.val
stats[str(metric)] = avg
stats[('last_' + str(metric))] = last_val
return stats |
class RegionWeight(torch.nn.Module):
def __init__(self, in_channel):
super(RegionWeight, self).__init__()
self.branch_0 = torch.nn.Conv2d(in_channel, 32, kernel_size=(3, 3), stride=1, padding=1, bias=False)
self.norm_0 = torch.nn.BatchNorm2d(32, affine=True)
self.branch_1 = torch.nn.Conv2d(in_channel, 32, kernel_size=(5, 5), stride=1, padding=2, bias=False)
self.norm_1 = torch.nn.BatchNorm2d(32, affine=True)
self.branch_2 = torch.nn.Conv2d(in_channel, 32, kernel_size=(7, 7), stride=1, padding=3, bias=False)
self.norm_2 = torch.nn.BatchNorm2d(32, affine=True)
self.relu = torch.nn.ReLU(inplace=True)
self.softplus = torch.nn.Softplus()
self.attention_score = torch.nn.Conv2d(96, 1, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
x = torch.nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = torch.nn.functional.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_0 = self.relu(self.norm_0(self.branch_0(x)))
branch_1 = self.relu(self.norm_1(self.branch_1(x)))
branch_2 = self.relu(self.norm_2(self.branch_2(x)))
fusion = torch.cat([branch_0, branch_1, branch_2], 1)
score = self.softplus(self.attention_score(fusion))
score = torch.nn.functional.upsample_nearest(score, scale_factor=2)
return score |
class ThrowerBulletEnv(MJCFBaseBulletEnv):
def __init__(self):
self.robot = Thrower()
MJCFBaseBulletEnv.__init__(self, self.robot)
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.002, frame_skip=5)
def _step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state()
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([self.robot.shoulder_pan_joint.get_velocity(), self.robot.shoulder_lift_joint.get_velocity(), self.robot.upper_arm_roll_joint.get_velocity(), self.robot.elbow_flex_joint.get_velocity(), self.robot.upper_arm_roll_joint.get_velocity(), self.robot.wrist_flex_joint.get_velocity(), self.robot.wrist_roll_joint.get_velocity()])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (((- 0.1) * action_product) - (0.01 * action_sum))
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if ((np.abs(j.current_relative_position()[0]) - 1) < 0.01):
stuck_joint_cost += (- 0.1)
object_xy = self.robot.object.pose().xyz()[:2]
target_xy = self.robot.target.pose().xyz()[:2]
if ((not self.robot._object_hit_ground) and (self.robot.object.pose().xyz()[2] < (- 0.25))):
self.robot._object_hit_ground = True
self.robot._object_hit_location = self.robot.object.pose().xyz()
if self.robot._object_hit_ground:
object_hit_xy = self.robot._object_hit_location[:2]
reward_dist = (- np.linalg.norm((object_hit_xy - target_xy)))
else:
reward_dist = (- np.linalg.norm((object_xy - target_xy)))
reward_ctrl = (- np.square(a).sum())
self.rewards = [float((self.potential - potential_old)), float(electricity_cost), float(stuck_joint_cost), reward_dist, (0.002 * reward_ctrl)]
self.HUD(state, a, False)
return (state, sum(self.rewards), False, {})
def camera_adjust(self):
(x, y, z) = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z) |
def single(sequence: List[E]) -> E:
first = sequence[0]
assert (len(sequence) == 1)
return first |
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, time_emb_dim=None, dropout=0, norm_groups=32):
super().__init__()
self.mlp = (nn.Sequential(Swish(), nn.Linear(time_emb_dim, dim_out)) if exists(time_emb_dim) else None)
self.noise_func = FeatureWiseAffine(time_emb_dim, dim_out, use_affine_level=False)
self.block1 = Block(dim, dim_out, groups=norm_groups)
self.block2 = Block(dim_out, dim_out, groups=norm_groups, dropout=dropout)
self.res_conv = (nn.Conv2d(dim, dim_out, 1) if (dim != dim_out) else nn.Identity())
def forward(self, x, time_emb):
h = self.block1(x)
h = self.noise_func(h, time_emb)
h = self.block2(h)
return (h + self.res_conv(x)) |
class MRCNERDataLoader(object):
def __init__(self, config, data_processor, label_list, tokenizer, mode='train', allow_impossible=True, entity_scheme='bes'):
self.data_ratio = config.data_ratio
self.data_dir = config.data_dir
self.data_mode = config.data_mode
self.lang_type = config.lang_type
self.save_cache_path = os.path.join(self.data_dir, ((self.lang_type + '_') + self.data_mode))
if (not os.path.exists(self.save_cache_path)):
os.mkdir(self.save_cache_path)
self.max_seq_length = config.max_seq_length
self.entity_scheme = entity_scheme
self.distributed_data_sampler = ((config.n_gpu > 1) and (config.data_parallel == 'ddp'))
if (mode == 'train'):
self.train_batch_size = config.train_batch_size
self.dev_batch_size = config.dev_batch_size
self.test_batch_size = config.test_batch_size
self.num_train_epochs = config.num_train_epochs
elif (mode == 'test'):
self.test_batch_size = config.test_batch_size
elif (mode == 'transform_binary_files'):
print(('=*=' * 15))
print('Transform pre-processed MRC-NER datasets into binary files. ')
print('max_sequence_length is : ', config.max_seq_length)
print('data_dir is : ', config.data_dir)
print(('=*=' * 15))
else:
raise ValueError('[mode] for MRCNERDataLoader does not exist.')
self.data_processor = data_processor
self.label_list = label_list
self.allow_impossible = allow_impossible
self.tokenizer = tokenizer
self.max_seq_len = config.max_seq_length
self.data_cache = config.data_cache
self.num_train_instances = 0
self.num_dev_instances = 0
self.num_test_instances = 0
def examples_for_diff_data_mode(self, data_sign):
src_qa = 'squad_en'
src_ner_qa = 'conll_mrc_en'
if (self.lang_type == 'esp'):
tgt_qa = 'esp_qa'
src_trans_qa = 'squad_es'
src_trans_ner_qa = 'conll_mrc_es'
pseudo_qa = 'esp_pseudo'
elif (self.lang_type == 'deu1'):
tgt_qa = 'deu_qa_1'
src_trans_qa = 'squad_de'
src_trans_ner_qa = 'conll_mrc_de'
pseudo_qa = 'deu_pseudo'
elif (self.lang_type == 'deu2'):
tgt_qa = 'deu_qa_2'
src_trans_qa = 'squad_de'
src_trans_ner_qa = 'conll_mrc_de'
pseudo_qa = 'deu_pseudo'
elif (self.lang_type == 'ned'):
tgt_qa = None
src_trans_qa = 'squad_nl'
src_trans_ner_qa = 'conll_mrc_nl'
pseudo_qa = 'ned_pseudo'
elif (self.lang_type == 'no'):
tgt_qa = None
src_trans_qa = 'squad_no'
src_trans_ner_qa = 'conll_mrc_no'
else:
print('Language type is not valid!')
if (self.data_mode == 'tgt'):
if (tgt_qa is not None):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign)
else:
print('No qa dataset for {}'.format(self.lang_type))
return
if (self.data_mode == 'src'):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
update_data_path = os.path.join(self.data_dir, src_qa)
if (self.data_mode == 'src+trans'):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
limit_len = 10000
sp_examples = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
examples.extend(sp_examples)
if (self.data_mode == 'tgt+src'):
if (tgt_qa is not None):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign)
src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
limit_len = 10000
sp_examples = random.sample(src_examples, min(limit_len, len(src_examples)))
examples.extend(sp_examples)
else:
print('No qa dataset for {}'.format(self.lang_type))
return
if (self.data_mode == 'tgt+src+trans'):
if (tgt_qa is not None):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign)
src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples)))
sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
else:
print('No qa dataset for {}'.format(self.lang_type))
return
if (self.data_mode == 'src+trans+conll+pseudo'):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign)
pseudo_examples = self.data_processor.get_examples(os.path.join(self.data_dir, pseudo_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
sp_examples_2 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples)))
sp_examples_3 = random.sample(pseudo_examples, min(limit_len, len(pseudo_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
examples.extend(sp_examples_3)
if (self.data_mode == 'src+trans+conll'):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
sp_examples_2 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
if (self.data_mode == 'trans+pseudo+conll_trans'):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
pseudo_examples = self.data_processor.get_examples(os.path.join(self.data_dir, pseudo_qa), data_sign)
src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(pseudo_examples, min(limit_len, len(pseudo_examples)))
sp_examples_2 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
if (self.data_mode == 'src+trans+conll+conll_trans'):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign)
src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
sp_examples_2 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples)))
sp_examples_3 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
examples.extend(sp_examples_3)
if (self.data_mode == 'tgt+src+trans+conll'):
if (tgt_qa is not None):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign)
src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples)))
sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
sp_examples_3 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
examples.extend(sp_examples_3)
else:
print('No qa dataset for {}'.format(self.lang_type))
return
if (self.data_mode == 'tgt+src+trans+conll+conll_trans+pseudo'):
if (tgt_qa is not None):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign)
src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign)
src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign)
pseudo_examples = self.data_processor.get_examples(os.path.join(self.data_dir, pseudo_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples)))
sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
sp_examples_3 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples)))
sp_examples_4 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples)))
sp_examples_5 = random.sample(pseudo_examples, min(limit_len, len(pseudo_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
examples.extend(sp_examples_3)
examples.extend(sp_examples_4)
examples.extend(sp_examples_5)
else:
print('No qa dataset for {}'.format(self.lang_type))
return
if (self.data_mode == 'tgt+src+trans+conll+conll_trans'):
if (tgt_qa is not None):
examples = self.data_processor.get_examples(os.path.join(self.data_dir, tgt_qa), data_sign)
src_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_qa), data_sign)
src_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_qa), data_sign)
src_ner_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_ner_qa), data_sign)
src_ner_trans_examples = self.data_processor.get_examples(os.path.join(self.data_dir, src_trans_ner_qa), data_sign)
limit_len = 10000
sp_examples_1 = random.sample(src_examples, min(limit_len, len(src_examples)))
sp_examples_2 = random.sample(src_trans_examples, min(limit_len, len(src_trans_examples)))
sp_examples_3 = random.sample(src_ner_examples, min(limit_len, len(src_ner_examples)))
sp_examples_4 = random.sample(src_ner_trans_examples, min(limit_len, len(src_ner_trans_examples)))
examples.extend(sp_examples_1)
examples.extend(sp_examples_2)
examples.extend(sp_examples_3)
examples.extend(sp_examples_4)
else:
print('No qa dataset for {}'.format(self.lang_type))
return
sub_examples = random.sample(examples, round((len(examples) * self.data_ratio)))
print('data_size:{}, data_ratio:{}'.format(len(sub_examples), self.data_ratio))
return sub_examples
def convert_examples_to_features(self, data_sign='train', num_data_processor=1, logger=None):
print(f'loading {data_sign} data ... ...')
examples = self.examples_for_diff_data_mode(data_sign)
if (data_sign == 'train'):
self.num_train_instances = len(examples)
elif (data_sign == 'dev'):
self.num_dev_instances = len(examples)
elif (data_sign == 'test'):
self.num_test_instances = len(examples)
else:
raise ValueError('please notice that the data_sign can only be train/dev/test !!')
if (num_data_processor == 1):
cache_path = os.path.join(self.save_cache_path, 'mrc-ner.{}.{}.cache.{}'.format(self.data_ratio, data_sign, str(self.max_seq_len)))
if os.path.exists(cache_path):
features = torch.load(cache_path)
else:
features = convert_examples_to_features(examples, self.tokenizer, self.label_list, self.max_seq_length, allow_impossible=self.allow_impossible, entity_scheme=self.entity_scheme)
torch.save(features, cache_path)
return features
def export_features_to_cache_file(idx, sliced_features, num_data_processor):
cache_path = os.path.join(self.save_cache_path, 'mrc-ner.{}.{}.cache.{}.{}-{}'.format(self.data_ratio, data_sign, str(self.max_seq_len), str(num_data_processor), str(idx)))
torch.save(sliced_features, cache_path)
features_lst = []
total_examples = len(examples)
size_of_one_process = math.ceil((total_examples / num_data_processor))
path_to_preprocessed_cache = os.path.join(self.save_cache_path, 'mrc-ner.{}.{}.cache.{}.{}-*'.format(self.data_ratio, data_sign, str(self.max_seq_len), str(num_data_processor)))
collection_of_preprocessed_cache = glob(path_to_preprocessed_cache)
if (len(collection_of_preprocessed_cache) == num_data_processor):
print(f'%%%% %%%% Load Saved Cache files in {self.save_cache_path} %%% %%% ')
elif (len(collection_of_preprocessed_cache) != 0):
for item_of_preprocessed_cache in collection_of_preprocessed_cache:
os.remove(item_of_preprocessed_cache)
for idx in range(num_data_processor):
start = (size_of_one_process * idx)
end = (((idx + 1) * size_of_one_process) if (((idx + 1) * size_of_one_process) < total_examples) else total_examples)
sliced_examples = examples[start:end]
sliced_features = convert_examples_to_features(sliced_examples, self.tokenizer, self.label_list, self.max_seq_length, allow_impossible=self.allow_impossible, entity_scheme=self.entity_scheme)
export_features_to_cache_file(idx, sliced_features, num_data_processor)
del examples
else:
for idx in range(num_data_processor):
start = (size_of_one_process * idx)
end = (((idx + 1) * size_of_one_process) if (((idx + 1) * size_of_one_process) < total_examples) else total_examples)
sliced_examples = examples[start:end]
sliced_features = convert_examples_to_features(sliced_examples, self.tokenizer, self.label_list, self.max_seq_length, allow_impossible=self.allow_impossible, entity_scheme=self.entity_scheme)
export_features_to_cache_file(idx, sliced_features, num_data_processor)
del examples
multi_process_for_data = Pool(num_data_processor)
for idx in range(num_data_processor):
features_lst.append(multi_process_for_data.apply_async(MRCNERDataLoader.read_features_from_cache_file, args=(idx, self.data_ratio, self.save_cache_path, data_sign, self.max_seq_len, num_data_processor, logger)))
multi_process_for_data.close()
multi_process_for_data.join()
features = []
for feature_slice in features_lst:
features.extend(feature_slice.get())
return features
def get_dataloader(self, data_sign='train', num_data_processor=1, logger=None):
features = self.convert_examples_to_features(data_sign=data_sign, num_data_processor=num_data_processor, logger=logger)
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
start_pos = torch.tensor([f.start_position for f in features], dtype=torch.long)
end_pos = torch.tensor([f.end_position for f in features], dtype=torch.long)
span_pos = torch.tensor([f.span_position for f in features], dtype=torch.long)
ner_cate = torch.tensor([f.ner_cate for f in features], dtype=torch.long)
span_label_mask = torch.tensor([f.span_label_mask for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_mask, segment_ids, start_pos, end_pos, span_pos, span_label_mask, ner_cate)
if (data_sign == 'train'):
if self.distributed_data_sampler:
datasampler = DistributedSampler(dataset)
else:
datasampler = RandomSampler(dataset)
dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.train_batch_size)
elif (data_sign == 'dev'):
datasampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.dev_batch_size)
elif (data_sign == 'test'):
datasampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.test_batch_size)
return dataloader
def read_features_from_cache_file(idx, data_ratio, data_dir, data_sign, max_seq_len, num_data_processor, logger):
cache_path = os.path.join(data_dir, 'mrc-ner.{}.{}.cache.{}.{}-{}'.format(data_ratio, data_sign, str(max_seq_len), str(num_data_processor), str(idx)))
sliced_features = torch.load(cache_path)
return sliced_features
def get_train_instance(self):
return self.num_train_instances |
def merge_docs(doc_list):
comb_feat = list(itertools.chain.from_iterable(list(map((lambda x: x._.Features), doc_list))))
comb_label = list(itertools.chain.from_iterable(list(map((lambda x: x._.Labels), doc_list))))
comb_clpr_label = list(itertools.chain.from_iterable(list(map((lambda x: x._.CLPR_Labels), doc_list))))
comb_embedding = list(itertools.chain.from_iterable(list(map((lambda x: x._.embeddings), doc_list))))
final_text = 'FinalDocument'
final_doc = parse(final_text)
final_doc._.Features = comb_feat
final_doc._.Labels = comb_label
final_doc._.CLPR_Labels = comb_clpr_label
final_doc._.embeddings = comb_embedding
print('Merged Lists')
return final_doc |
class PredefinedNoiseSchedule(torch.nn.Module):
def __init__(self, noise_schedule, timesteps, precision):
super(PredefinedNoiseSchedule, self).__init__()
self.timesteps = timesteps
if (noise_schedule == 'cosine'):
alphas2 = cosine_beta_schedule(timesteps)
elif ('polynomial' in noise_schedule):
splits = noise_schedule.split('_')
assert (len(splits) == 2)
power = float(splits[1])
alphas2 = polynomial_schedule(timesteps, s=precision, power=power)
else:
raise ValueError(noise_schedule)
sigmas2 = (1 - alphas2)
log_alphas2 = np.log(alphas2)
log_sigmas2 = np.log(sigmas2)
log_alphas2_to_sigmas2 = (log_alphas2 - log_sigmas2)
self.gamma = torch.nn.Parameter(torch.from_numpy((- log_alphas2_to_sigmas2)).float(), requires_grad=False)
def forward(self, t):
t_int = torch.round((t * self.timesteps)).long()
return self.gamma[t_int] |
def random_projection_transform(input_shape=None, output_shape=None, translation=False, scale=False, rotation=False, horizontal_flip=False, vertical_flip=False, name=None, seed=None):
with tf.name_scope(name, 'random_projection_transform', [input_shape, output_shape]) as name:
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, name='input_shape')
if (output_shape is None):
output_shape = input_shape
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32, name='output_shape')
check = control_flow_ops.Assert(math_ops.reduce_all((input_shape >= output_shape)), ['Need input_shape >= output_shape ', input_shape, output_shape], summarize=1000)
input_shape = control_flow_ops.with_dependencies([check], input_shape)
features = {}
transforms = []
input_height_f = tf.cast(output_shape[0], tf.float32)
input_width_f = tf.cast(output_shape[1], tf.float32)
if ((translation is not None) and (translation is not False)):
if (isinstance(translation, bool) and translation):
offset = random_crop_offset(input_shape, output_shape, seed=seed)
else:
offset = translation
offset = tf.cast(offset, tf.float32)
else:
offset = tf.constant([0.0, 0.0], dtype=tf.float32)
transforms += [tf.contrib.image.translations_to_projective_transforms(offset)]
features['random_translation_offset'] = offset
identity = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
if ((rotation is not None) and (rotation is not False)):
if (isinstance(rotation, bool) and rotation):
rotation = ops.convert_to_tensor([(- math.pi), math.pi], dtype=tf.float32, name='input_shape')
theta = tf.random_uniform([1], minval=rotation[0], maxval=rotation[1], seed=seed, dtype=tf.float32)
transforms += [tf.contrib.image.angles_to_projective_transforms(theta, input_height_f, input_width_f)]
features['random_rotation'] = theta
if ((scale is not None) and (scale is not False)):
if (isinstance(scale, bool) and scale):
max_input_dim = input_shape[tf.argmax(input_shape)]
max_crop_dim = output_shape[tf.argmax(output_shape)]
s0 = (tf.cast(max_input_dim, dtype=tf.float32) / tf.cast(max_crop_dim, dtype=tf.float32))
s1 = (tf.cast(max_crop_dim, dtype=tf.float32) / tf.cast(max_input_dim, dtype=tf.float32))
scale = [s0, s1]
s = tf.random_uniform([1], minval=scale[0], maxval=scale[1], seed=seed, dtype=tf.float32)
scale_matrix = [s, array_ops.zeros(1, dtypes.float32), array_ops.zeros(1, dtypes.float32), array_ops.zeros(1, dtypes.float32), s, array_ops.zeros(1, dtypes.float32), array_ops.zeros(1, dtypes.float32), array_ops.zeros(1, dtypes.float32)]
scale_matrix = tf.stack(scale_matrix, axis=(- 1))
transforms += [scale_matrix]
features['random_scale'] = s
else:
features['random_scale'] = tf.constant(1.0, dtype=tf.float32)
batch_size = 1
if horizontal_flip:
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
shape = [(- 1.0), 0.0, input_width_f, 0.0, 1.0, 0.0, 0.0, 0.0]
flip_transform = tf.convert_to_tensor(shape, dtype=tf.float32)
flip = tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1])
no_flip = tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])
random_flip_transform = tf.where(coin, flip, no_flip)
transforms.append(random_flip_transform)
features['random_horizontal_flip'] = coin
if vertical_flip:
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
shape = [1.0, 0.0, 0.0, 0.0, (- 1.0), input_height_f, 0.0, 0.0]
flip_transform = tf.convert_to_tensor(shape, dtype=tf.float32)
flip = tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1])
no_flip = tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])
random_flip_transform = tf.where(coin, flip, no_flip)
transforms.append(random_flip_transform)
features['random_vertical_flip'] = coin
composed_transforms = tf.contrib.image.compose_transforms(*transforms)
features['random_projection_transform'] = composed_transforms
return (composed_transforms, features) |
class ConvEncoder(Encoder):
def __init__(self, params, mode, name='conv_encoder'):
super(ConvEncoder, self).__init__(params, mode, name)
self._combiner_fn = locate(self.params['position_embeddings.combiner_fn'])
def default_params():
return {'attention_cnn.units': 512, 'attention_cnn.kernel_size': 3, 'attention_cnn.layers': 15, 'embedding_dropout_keep_prob': 0.8, 'output_cnn.units': 256, 'output_cnn.kernel_size': 3, 'output_cnn.layers': 5, 'position_embeddings.enable': True, 'position_embeddings.combiner_fn': 'tensorflow.multiply', 'position_embeddings.num_positions': 100}
def encode(self, inputs, sequence_length):
if self.params['position_embeddings.enable']:
positions_embed = _create_position_embedding(embedding_dim=inputs.get_shape().as_list()[(- 1)], num_positions=self.params['position_embeddings.num_positions'], lengths=sequence_length, maxlen=tf.shape(inputs)[1])
inputs = self._combiner_fn(inputs, positions_embed)
inputs = tf.contrib.layers.dropout(inputs=inputs, keep_prob=self.params['embedding_dropout_keep_prob'], is_training=(self.mode == tf.contrib.learn.ModeKeys.TRAIN))
with tf.variable_scope('cnn_a'):
cnn_a_output = inputs
for layer_idx in range(self.params['attention_cnn.layers']):
next_layer = tf.contrib.layers.conv2d(inputs=cnn_a_output, num_outputs=self.params['attention_cnn.units'], kernel_size=self.params['attention_cnn.kernel_size'], padding='SAME', activation_fn=None)
if (layer_idx > 0):
next_layer += cnn_a_output
cnn_a_output = tf.tanh(next_layer)
with tf.variable_scope('cnn_c'):
cnn_c_output = inputs
for layer_idx in range(self.params['output_cnn.layers']):
next_layer = tf.contrib.layers.conv2d(inputs=cnn_c_output, num_outputs=self.params['output_cnn.units'], kernel_size=self.params['output_cnn.kernel_size'], padding='SAME', activation_fn=None)
if (layer_idx > 0):
next_layer += cnn_c_output
cnn_c_output = tf.tanh(next_layer)
final_state = tf.reduce_mean(cnn_c_output, 1)
return EncoderOutput(outputs=cnn_a_output, final_state=final_state, attention_values=cnn_c_output, attention_values_length=sequence_length) |
def add_dict_to_collection(dict_, collection_name):
key_collection = (collection_name + '_keys')
value_collection = (collection_name + '_values')
for (key, value) in dict_.items():
tf.add_to_collection(key_collection, key)
tf.add_to_collection(value_collection, value) |
_torch
_sentencepiece
_tokenizers
class MBart50OneToManyIntegrationTest(unittest.TestCase):
checkpoint_name = 'facebook/mbart-large-50-one-to-many-mmt'
src_text = [' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.']
tgt_text = ['Seful ONU declara ca nu exista o solutie militara in Siria', 'Secretarul General Ban Ki-moon declara ca raspunsul sau la intensificarea sprijinului militar al Rusiei pentru Siria este ca "nu exista o solutie militara" la conflictul de aproape cinci ani si ca noi arme nu vor face decat sa inrautateasca violentele si mizeria pentru milioane de oameni.']
expected_src_tokens = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
def setUpClass(cls):
cls.tokenizer: MBart50Tokenizer = MBart50Tokenizer.from_pretrained(cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO')
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 250020)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'], 250038)
def test_tokenizer_batch_encode_plus(self):
ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_tokenizer_decode_ignores_language_codes(self):
self.assertIn(RO_CODE, self.tokenizer.all_special_ids)
generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_romanian)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_tokenizer_truncation(self):
src_text = [('this is gunna be a long sentence ' * 20)]
assert isinstance(src_text[0], str)
desired_max_length = 10
ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0]
self.assertEqual(ids[0], EN_CODE)
self.assertEqual(ids[(- 1)], 2)
self.assertEqual(len(ids), desired_max_length)
def test_mask_token(self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']), [250053, 250001])
def test_special_tokens_unaffacted_by_save_load(self):
tmpdirname = tempfile.mkdtemp()
original_special_tokens = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(tmpdirname)
new_tok = MBart50Tokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens)
_torch
def test_batch_fairseq_parity(self):
batch = self.tokenizer(self.src_text, padding=True)
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, return_tensors='pt')
labels = targets['input_ids']
batch['decoder_input_ids'] = shift_tokens_right(labels, self.tokenizer.pad_token_id).tolist()
labels = labels.tolist()
assert (batch.input_ids[1][0] == EN_CODE)
assert (batch.input_ids[1][(- 1)] == 2)
assert (labels[1][0] == RO_CODE)
assert (labels[1][(- 1)] == 2)
assert (batch.decoder_input_ids[1][:2] == [2, RO_CODE])
_torch
def test_tokenizer_prepare_batch(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors='pt')
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors='pt')
labels = targets['input_ids']
batch['decoder_input_ids'] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, result)
self.assertEqual(2, batch.decoder_input_ids[(0, 0)])
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
def test_seq2seq_max_target_length(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors='pt')
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors='pt')
labels = targets['input_ids']
batch['decoder_input_ids'] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR')
self.assertEqual(nested_simplify(inputs), {'input_ids': [[250004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], 'forced_bos_token_id': 250001}) |
def regular_equidistant_sphere_points(N, r):
return_points = []
n_count = 0
a = (((4 * np.pi) * (r ** 2)) / N)
d = np.sqrt(a)
M_theta = int(np.round((np.pi / d)))
d_theta = (np.pi / M_theta)
d_phi = (a / d_theta)
for m in range(M_theta):
theta = ((np.pi * (m + 0.5)) / M_theta)
M_phi = int(np.round((((2 * np.pi) * np.sin(theta)) / d_phi)))
for n in range(M_phi):
phi = (((2 * np.pi) * n) / M_phi)
return_points.append([((np.sin(theta) * np.cos(phi)) * r), ((np.sin(theta) * np.sin(phi)) * r), (np.cos(theta) * r)])
n_count += 1
return np.array(return_points) |
class LxmertVisualFeatureEncoder():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
('basic')
class BasicIterator(DataIterator):
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
for instance_list in self._memory_sized_lists(instances):
if shuffle:
random.shuffle(instance_list)
iterator = iter(instance_list)
for batch_instances in lazy_groups_of(iterator, self._batch_size):
for possibly_smaller_batches in self._ensure_batch_is_sufficiently_small(batch_instances):
batch = Batch(possibly_smaller_batches)
(yield batch) |
class TestTransforms(unittest.TestCase):
def testCompose(self):
self.assertEqual(transform.compose([(lambda x: (x + 1)), (lambda x: (x + 2)), (lambda x: (x / 2))])(1), 2)
def testTableMergeKeys(self):
x = {'sample1': {'input': 1, 'target': 'a'}, 'sample2': {'input': 2, 'target': 'b', 'flag': 'hard'}}
y = transform.tablemergekeys()(x)
self.assertEqual(y['input'], {'sample1': 1, 'sample2': 2})
self.assertEqual(y['target'], {'sample1': 'a', 'sample2': 'b'})
self.assertEqual(y['flag'], {'sample2': 'hard'})
def testTableApply(self):
x = {1: 1, 2: 2}
y = transform.tableapply((lambda x: (x + 1)))(x)
self.assertEqual(y, {1: 2, 2: 3})
def testMakeBatch(self):
x = [{'input': torch.randn(4), 'target': 'a'}, {'input': torch.randn(4), 'target': 'b'}]
y = transform.makebatch()(x)
self.assertEqual(y['input'].size(), torch.Size([2, 4]))
self.assertEqual(y['target'], ['a', 'b']) |
class RoFormerConfig(PretrainedConfig):
model_type = 'roformer'
def __init__(self, vocab_size=50000, embedding_size=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1536, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, rotary_value=False, use_cache=True, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = (hidden_size if (embedding_size is None) else embedding_size)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.rotary_value = rotary_value
self.use_cache = use_cache |
class ExpEi(torch.autograd.Function):
def forward(ctx, input):
ctx.save_for_backward(input)
dev = input.device
with torch.no_grad():
x = special.exp1(input.detach().cpu()).to(dev)
input.to(dev)
return x
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = (grad_output * ((- torch.exp((- input))) / input))
return grad_input |
class Model(nn.Module):
def __init__(self, opt):
super(Model, self).__init__()
num_hid = opt.num_hid
activation = opt.activation
dropG = opt.dropG
dropW = opt.dropW
dropout = opt.dropout
dropL = opt.dropL
norm = opt.norm
dropC = opt.dropC
self.opt = opt
self.w_emb = WordEmbedding(opt.ntokens, emb_dim=300, dropout=dropW)
self.w_emb.init_embedding('data/glove6b_init_300d.npy')
self.q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
self.q_net = FCNet([self.q_emb.num_hid, num_hid], dropout=dropL, norm=norm, act=activation)
self.gv_net = FCNet([2048, num_hid], dropout=dropL, norm=norm, act=activation)
self.gv_att_1 = Att_3(v_dim=2048, q_dim=self.q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm, act=activation)
self.gv_att_2 = Att_3(v_dim=2048, q_dim=self.q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm, act=activation)
self.classifier = SimpleClassifier(in_dim=num_hid, hid_dim=(2 * num_hid), out_dim=3129, dropout=dropC, norm=norm, act=activation)
def forward(self, q, gv):
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb)
att_1 = self.gv_att_1(gv, q_emb)
att_2 = self.gv_att_2(gv, q_emb)
att_gv = (att_1 + att_2)
gv_embs = (att_gv * gv)
gv_emb = gv_embs.sum(1)
q_repr = self.q_net(q_emb)
gv_repr = self.gv_net(gv_emb)
joint_repr = (q_repr * gv_repr)
logits = self.classifier(joint_repr)
ansidx = torch.argsort(logits, dim=1, descending=True)
return (logits, att_gv, ansidx) |
def model_exists(project, label, outcome, epoch=None, kfold=None):
try:
find_model(project, label, outcome, kfold=kfold, epoch=epoch)
return True
except ModelNotFoundError:
return False |
def train(model, train_loader, optimizer, tokenizer, epoch, global_step, device, scheduler, scaler, config):
model.train()
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss', SmoothedValue(window=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
log_freq = config.log_freq
if config.distributed:
train_loader.sampler.set_epoch(epoch)
iterator = metric_logger.log_every(train_loader, log_freq, header)
for (i, data) in enumerate(iterator):
(image, question, answer, weights, n) = data
image = image.to(device, non_blocking=True)
weights = weights.to(device, non_blocking=True)
question_input = tokenizer(question, padding='max_length', truncation=True, max_length=config.max_q_len, return_tensors='pt').to(device)
answer_input = tokenizer(answer, padding='max_length', truncation=True, max_length=config.max_a_len, return_tensors='pt').to(device)
with torch.cuda.amp.autocast(enabled=config.fp16):
loss = model(image, question_input, answer_input, train=True, k=n, weights=weights)
optimizer.zero_grad()
scaler.scale(loss).backward()
if (config.optimizer.max_grad_norm > 0):
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), config.optimizer.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
if (is_main_process() and config.wandb.enable and ((global_step % log_freq) == 0)):
logs = metric_logger.get_global_avg_dict()
log_dict_to_wandb(logs, step=global_step, prefix='train/')
global_step += 1
if (config.debug and (((i + 1) % 5) == 0)):
break
metric_logger.synchronize_between_processes()
logger.info(f'Averaged train stats: {metric_logger.global_avg()}')
return global_step |
def build_cnn(input_image=None, image_size=32, n_colors=3, activation_function=tf.nn.relu, reuse=None, name='VGG_NET_CNN'):
with tf.variable_scope(name, reuse=reuse):
input_image = tf.reshape(input_image, shape=[(- 1), image_size, image_size, n_colors], name='Reshape_inputs')
h_conv1_1 = my_conv2d(input_image, filters=32, kernel_size=(3, 3), activation=activation_function, name='conv1_1')
h_conv1_2 = my_conv2d(h_conv1_1, filters=32, kernel_size=(3, 3), activation=activation_function, name='conv1_2')
h_conv1_3 = my_conv2d(h_conv1_2, filters=32, kernel_size=(3, 3), activation=activation_function, name='conv1_3')
h_conv1_4 = my_conv2d(h_conv1_3, filters=32, kernel_size=(3, 3), activation=activation_function, name='conv1_4')
h_pool1 = tf.layers.max_pooling2d(h_conv1_4, pool_size=(2, 2), strides=(2, 2), padding='same', name='max_pooling_1')
h_conv2_1 = my_conv2d(h_pool1, filters=64, kernel_size=(3, 3), activation=activation_function, name='conv2_1')
h_conv2_2 = my_conv2d(h_conv2_1, filters=64, kernel_size=(3, 3), activation=activation_function, name='conv2_2')
h_pool2 = tf.layers.max_pooling2d(h_conv2_2, pool_size=(2, 2), strides=(2, 2), padding='same', name='max_pooling_2')
h_conv3_1 = my_conv2d(h_pool2, filters=128, kernel_size=(3, 3), activation=activation_function, name='conv3_1')
h_pool3 = tf.layers.max_pooling2d(h_conv3_1, pool_size=(2, 2), strides=(2, 2), padding='same', name='max_pooling_3')
return h_pool3 |
def AssertEq(expected, actual):
if (expected != actual):
print(('Expected: %s' % (expected,)))
print((' Actual: %s' % (actual,)))
raise AssertionError |
def quad_double_t_value(vrblvl=0):
if (vrblvl > 0):
print('in quad_double_t_value ...')
phc = get_phcfun()
apar = pointer(c_int32(2))
bvrb = pointer(c_int32(0))
ctval = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> quad_double_t_value calls phc', end='')
retval = phc(867, apar, bvrb, ctval, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('the t value :', ctval[0])
return ctval[0] |
def saturate(params):
return {ikey: jax.nn.softplus(ivalue) for (ikey, ivalue) in params.items()} |
def build(setup_kwargs):
cy_ext = cythonize(Extension(name='textnets._ext', sources=['textnets/_ext.pyx']))
setup_kwargs.update({'ext_modules': cy_ext}) |
class PythiaConcatDataset(ConcatDataset):
_SINGLE_CALL_FUNCS = []
def __init__(self, datasets):
super().__init__(datasets)
self._dir_representation = dir(self)
def __getattr__(self, name):
if (name in self._dir_representation):
return getattr(self, name)
elif hasattr(self.datasets[0], name):
attr = getattr(self.datasets[0], name)
if isinstance(attr, types.MethodType):
attr = functools.partial(self._call_all_datasets_func, name)
return attr
else:
raise AttributeError(name)
def _get_single_call_funcs(self):
return PythiaConcatDataset._SINGLE_CALL_FUNCS
def _call_all_datasets_func(self, name, *args, **kwargs):
for dataset in self.datasets:
value = getattr(dataset, name)(*args, **kwargs)
if (value is not None):
return value
if (hasattr(dataset, 'get_single_call_funcs') and (name in dataset.get_single_call_funcs())):
return |
def get_auto_reg_predictions(model, row, window, teacher_forcing=True, exponentiate=False, predict_deaths=True):
if predict_deaths:
key = 'deaths'
else:
key = 'cases'
deaths = row[key]
predictions = ([0] * window)
if teacher_forcing:
for i in range((len(deaths) - window)):
x = deaths[i:(i + window)]
cur_prediction = model.predict([x])
if exponentiate:
cur_prediction = np.exp(cur_prediction)
predictions.append(cur_prediction)
else:
raise NotImplementedError
return predictions |
class IntEq():
def __init__(self, binding_node, lb_lambda, ub_lambda, out_symbols, free_symbols, eq_lambda, name):
self.binding_node = binding_node
self.lb_lambda = lb_lambda
self.ub_lambda = ub_lambda
self.out_symbols = out_symbols
self.free_symbols = free_symbols
self.eq_lambda = eq_lambda
self.name = name
def __call__(self, var: Variables):
var = {k: v for (k, v) in var.items()}
lb_value = self.lb_lambda(**{k: v for (k, v) in var.items() if (k in self.out_symbols)})
ub_value = self.ub_lambda(**{k: v for (k, v) in var.items() if (k in self.out_symbols)})
xx = dict()
for syp in self.free_symbols:
if (syp not in var.keys()):
continue
value = var[syp]
_value = (torch.ones_like(self.binding_node.quad_s) * value)
_value = _value.reshape((- 1), 1)
xx.update({syp: _value})
quad_w = (((ub_value - lb_value) / 2) * self.binding_node.quad_w)
quad_s = ((((self.binding_node.quad_s + 1) * (ub_value - lb_value)) / 2) + lb_value)
shape = quad_w.shape
quad_w = quad_w.reshape((- 1), 1)
quad_s = quad_s.reshape((- 1), 1)
new_var = dict()
for (_, fun) in self.binding_node.funs.items():
input_map = fun['input_map']
output_map = fun['output_map']
tmp_var = dict()
for (k, v) in xx.items():
tmp_var[k] = v
for (k, v) in input_map.items():
tmp_var[k] = quad_s
res = fun['eval'].evaluate(tmp_var)
for (k, v) in output_map.items():
res[v] = res.pop(k)
new_var.update(res)
xx.update(new_var)
values = (quad_w * self.eq_lambda(**dict(**{self.binding_node.var.name: quad_s}, **xx)))
values = values.reshape(shape)
return {self.name: values.sum(1, keepdim=True)} |
def get_llama2_question_and_answers(pipeline, caption):
resp = llama2_completion(pipeline, caption)
question_instances = parse_resp(resp)
this_caption_qas = []
for question_instance in question_instances:
this_qa = {}
this_qa['caption'] = caption
this_qa['element'] = question_instance[0]
this_qa['question'] = question_instance[1]
this_qa['choices'] = question_instance[2]
this_qa['answer'] = question_instance[3]
this_qa['element_type'] = question_instance[4]
if (question_instance[4] not in categories):
continue
if (this_qa['element_type'] in ['animal', 'human']):
this_qa['element_type'] = 'animal/human'
this_caption_qas.append(this_qa)
return this_caption_qas |
_model
def dla102x(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla102x']
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4, residual_root=True, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class SampleGeneratorImage(SampleGeneratorBase):
def __init__(self, samples_path, debug, batch_size, sample_process_options=SampleProcessor.Options(), output_sample_types=[], raise_on_no_data=True, **kwargs):
super().__init__(debug, batch_size)
self.initialized = False
self.sample_process_options = sample_process_options
self.output_sample_types = output_sample_types
samples = SampleLoader.load(SampleType.IMAGE, samples_path)
if (len(samples) == 0):
if raise_on_no_data:
raise ValueError('No training data provided.')
return
self.generators = ([ThisThreadGenerator(self.batch_func, samples)] if self.debug else [SubprocessGenerator(self.batch_func, samples)])
self.generator_counter = (- 1)
self.initialized = True
def __iter__(self):
return self
def __next__(self):
self.generator_counter += 1
generator = self.generators[(self.generator_counter % len(self.generators))]
return next(generator)
def batch_func(self, samples):
samples_len = len(samples)
idxs = [*range(samples_len)]
shuffle_idxs = []
while True:
batches = None
for n_batch in range(self.batch_size):
if (len(shuffle_idxs) == 0):
shuffle_idxs = idxs.copy()
np.random.shuffle(shuffle_idxs)
idx = shuffle_idxs.pop()
sample = samples[idx]
(x,) = SampleProcessor.process([sample], self.sample_process_options, self.output_sample_types, self.debug)
if (batches is None):
batches = [[] for _ in range(len(x))]
for i in range(len(x)):
batches[i].append(x[i])
(yield [np.array(batch) for batch in batches]) |
class D3DFACS(Instance, ABC):
def __init__(self):
super(D3DFACS, self).__init__()
self.dst = '/scratch/NFC/OnFlame/D3DFACS/'
self.src = '/home/wzielonka/datasets/D3DFACS/'
def get_images(self):
images = {}
for file in sorted(glob((self.get_src() + 'processed/images/*'))):
actor = Path(file).stem
images[actor] = glob(f'{file}/*.jpg')
return images
def get_flame_params(self):
params = {}
for file in sorted(glob((self.get_src() + 'processed/FLAME/*.npz'))):
actor = Path(file).stem
params[actor] = [file]
return params
def get_registrations(self):
registrations = {}
for file in sorted(glob((self.get_src() + 'processed/registrations/*'))):
actor = Path(file).stem.split('_')[0]
registrations[actor] = [file]
return registrations |
def optimize_simplify(inputs, output, size_dict, use_ssa=False):
cp = ContractionProcessor(inputs, output, size_dict)
cp.simplify()
if use_ssa:
return cp.ssa_path
return ssa_to_linear(cp.ssa_path, len(inputs)) |
_model
def nfnet_f3s(pretrained=False, **kwargs):
return _create_normfreenet('nfnet_f3s', pretrained=pretrained, **kwargs) |
def postprocess(data: pd.DataFrame) -> pd.DataFrame:
data.drop('Rk', axis=1, inplace=True)
repl_dict = {'Gtm': 'Game', 'Unnamed: 3': 'Home', '#': 'NumPlayers', 'Opp. Starter (GmeSc)': 'OppStart', 'Pitchers Used (Rest-GameScore-Dec)': 'PitchersUsed'}
data.rename(repl_dict, axis=1, inplace=True)
data['Home'] = data['Home'].isnull()
data = data[data['Game'].str.match('\\d+')]
data = data.apply(pd.to_numeric, errors='ignore')
data['Game'] = data['Game'].astype(int)
return data.reset_index(drop=True) |
def parse_args():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--data', default='test.txt', help='Data to eval interactively (pairs of sentences); use - for stdin')
parser.add_argument('--model', default='models/bowman_snli/6.pth')
parser.add_argument('--model_type', default='bowman', choices=['bowman', 'snli'])
parser.add_argument('--eval', action='store_true')
parser.add_argument('--eval_data_path', default='data/snli_1.0/')
parser.add_argument('--cuda', action='store_true')
return parser.parse_args() |
class ResNet50Spg(CoreClassifier):
def __init__(self, encoder_weights=constants.IMAGENET, num_classes=1000, large_feature_map=False, scale_in=1.0, in_channels: int=3):
super(ResNet50Spg, self).__init__()
self.encoder_name = constants.RESNET50
self.task = constants.STD_CL
assert (scale_in > 0.0)
self.scale_in = float(scale_in)
assert isinstance(in_channels, int)
assert (in_channels == 3)
self._in_channels = in_channels
self.name = self.encoder_name
self.encoder_weights = encoder_weights
self.method = constants.METHOD_SPG
self.arch = constants.SPGARCH
self.logits_dict = None
block = Bottleneck
layers = [3, 4, 6, 3]
stride_l3 = (1 if large_feature_map else 2)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block=block, planes=64, blocks=layers[0], stride=1, split=False)
self.layer2 = self._make_layer(block=block, planes=128, blocks=layers[1], stride=2, split=False)
(self.SPG_A1, self.SPG_A2) = self._make_layer(block=block, planes=256, blocks=layers[2], stride=stride_l3, split=True)
self.layer4 = self._make_layer(block=block, planes=512, blocks=layers[3], stride=1, split=False)
self.SPG_A4 = nn.Conv2d((512 * block.expansion), num_classes, kernel_size=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.SPG_B_1a = nn.Sequential(nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=False))
self.SPG_B_2a = nn.Sequential(nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=False))
self.SPG_B_shared = nn.Sequential(nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=False), nn.Conv2d(1024, 1, kernel_size=1))
self.SPG_C = nn.Sequential(nn.Conv2d(2048, 512, kernel_size=3, padding=1), nn.ReLU(inplace=False), nn.Conv2d(512, 1, kernel_size=1))
initialize_weights(self.modules(), init_mode='xavier')
if (self.encoder_weights == constants.IMAGENET):
self._init_load_pretrained_w()
def _make_layer(self, block, planes, blocks, stride, split=None):
downsample = get_downsampling_layer(self.inplanes, block, planes, stride)
first_layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = (planes * block.expansion)
other_layers = []
for _ in range(1, blocks):
other_layers.append(block(self.inplanes, planes))
if split:
return (nn.Sequential(*first_layers), nn.Sequential(*other_layers))
else:
return nn.Sequential(*(first_layers + other_layers))
def _layer(self, block, planes, blocks, stride):
downsample = get_downsampling_layer(self.inplanes, block, planes, stride)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return layers
def forward(self, x, labels=None):
x_shape = x.shape
if (self.scale_in != 1.0):
(h, w) = (x_shape[2], x_shape[3])
x = F.interpolate(input=x, size=[int((h * self.scale_in)), int((w * self.scale_in))], mode='bilinear', align_corners=True)
self.x_in = x
batch_size = x.shape[0]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.SPG_A1(x)
logits_b1 = self.SPG_B_1a(x)
logits_b1 = self.SPG_B_shared(logits_b1)
x = self.SPG_A2(x)
logits_b2 = self.SPG_B_2a(x)
logits_b2 = self.SPG_B_shared(logits_b2)
x = self.layer4(x)
feat_map = self.SPG_A4(x)
logits_c = self.SPG_C(x)
logits = self.avgpool(feat_map)
logits = logits.view(logits.shape[0:2])
labels = (logits.argmax(dim=1).long() if (labels is None) else labels)
(attention, fused_attention) = spg.compute_attention(feat_map=feat_map, labels=labels, logits_b1=logits_b1, logits_b2=logits_b2)
if (labels is not None):
feature_map = feat_map.clone().detach()
self.cams = feature_map[(range(batch_size), labels)].detach()
self.logits_dict = {'attention': attention, 'fused_attention': fused_attention, 'logits': logits, 'logits_b1': logits_b1, 'logits_b2': logits_b2, 'logits_c': logits_c}
return logits
def _init_load_pretrained_w(self):
load_pretrained_model(model=self, architecture_type=self.arch, path=None, dataset_name='') |
def _load(pickle_fp, map_location, picklemoudle, pickle_file='data.pkl', zip_file=None):
load_module_mapping: Dict[(str, str)] = {'torch.tensor': 'torch._tensor'}
class LazyUnpickler(picklemoudle.Unpickler):
def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
super().__init__(fp)
self.data_base_path = data_base_path
self.zip_file = zip_file
def persistent_load(self, pid):
data_type = pid[1].dtype
filename_stem = pid[2]
filename = f'{self.data_base_path}/{filename_stem}'
info = self.zip_file.getinfo(filename)
def load(offset: int, elm_count: int):
dtype = data_type
fp = self.zip_file.open(info)
fp.seek((offset * item_size[dtype]))
size = (elm_count * item_size[dtype])
data = fp.read(size)
return torch.frombuffer(bytearray(data), dtype=dtype)
description = f'storage data_type={data_type} path-in-zip={{filename}} path={{self.zip_file.filename}}'
return LazyStorage(load=load, kind=pid[1], description=description)
def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, requires_grad: Any, backward_hooks: Any, metadata: Any=None) -> LazyTensor:
invalidInputError(isinstance(storage, LazyStorage), f'storage should be an instance of class `LazyStorage`, but get {type(storage)}.')
def load() -> torch.Tensor:
elm_count = (stride[0] * size[0])
return storage.load(storage_offset, elm_count).reshape(size)
description = f'pickled storage_offset={storage_offset} in {storage.description}'
return LazyTensor(load, list(size), storage.kind.dtype, description)
def rebuild_from_type_v2(func, new_type, args, state):
return func(*args)
CLASSES: dict[(tuple[(str, str)], Any)] = {('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), ('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'), ('torch', 'Tensor'): LazyTensor}
def find_class(self, mod_name, name):
if ((mod_name, name) in self.CLASSES):
return self.CLASSES[(mod_name, name)]
if ((type(name) is str) and ('Storage' in name)):
try:
return StorageType(name)
except KeyError:
pass
mod_name = load_module_mapping.get(mod_name, mod_name)
return super().find_class(mod_name, name)
unpickler = LazyUnpickler(pickle_fp, data_base_path=pickle_file, zip_file=zip_file)
result = unpickler.load()
return result |
class Mask2FormerModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def collect_results(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
tmpdir = tempfile.mkdtemp()
tmpdir = torch.Tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results |
def test_mongo_observer_equality(mongo_obs):
runs = mongo_obs.runs
fs = mock.MagicMock()
m = MongoObserver(runs, fs)
assert (mongo_obs == m)
assert (not (mongo_obs != m))
assert (not (mongo_obs == 'foo'))
assert (mongo_obs != 'foo') |
def visual_representation(ckpt_path=None, use_3d=False):
if (ckpt_path is None):
ckpt_path = 'checkpoints/videoae_co3d.tar'
if (not os.path.exists(ckpt_path)):
raise FileNotFoundError('Checkpoint path does not exist')
encoder_3d = nn.DataParallel(Encoder3D())
checkpoint = torch.load(ckpt_path)
encoder_3d.load_state_dict(checkpoint['encoder_3d'])
print(colored('>>pretrained 3D visual representation is loaded.', 'red'))
if use_3d:
return encoder_3d
else:
encoder_2d = encoder_3d.module.feature_extraction
return encoder_2d |
.parametrize('params', [(torch.tensor([[[[1.0, 1.0], [0.0, 0.0]]]]), torch.tensor([[[[1.0, 1.0], [0.0, 0.0]]]]), 0.0, AdapWingLoss()), (torch.tensor([[[[1.0, 1.0], [0.0, 0.0]]]]), torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), 29.0147, AdapWingLoss()), (torch.tensor([[[[1.0, 1.0], [0.0, 0.0]]]]), torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]), 41.4496, AdapWingLoss(omega=20)), (torch.tensor([[[[0.5, 0.5], [0.5, 0.5]]]]), torch.tensor([[[[0.5, 1.0], [1.0, 1.0]]]]), 8.2703, AdapWingLoss(epsilon=2))])
def test_adapwingloss(params):
(input, target, expected_value, loss_fct) = params
loss = loss_fct.forward(input, target)
assert isclose(loss.detach().cpu().numpy(), expected_value, rel_tol=0.01) |
def do_train(dataloaders, params: MinkLocParams, debug=False, visualize=False):
s = get_datetime()
model = model_factory(params)
model_name = ((('model_' + params.model_params.model) + '_') + s)
print('Model name: {}'.format(model_name))
weights_path = create_weights_folder()
model_pathname = os.path.join(weights_path, model_name)
if hasattr(model, 'print_info'):
model.print_info()
else:
n_params = sum([param.nelement() for param in model.parameters()])
print('Number of model parameters: {}'.format(n_params))
if torch.cuda.is_available():
device = 'cuda'
model.to(device)
else:
device = 'cpu'
print('Model device: {}'.format(device))
loss_fn = make_loss(params)
if ((params.weight_decay is None) or (params.weight_decay == 0)):
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr, weight_decay=params.weight_decay)
if (params.scheduler is None):
scheduler = None
elif (params.scheduler == 'CosineAnnealingLR'):
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(params.epochs + 1), eta_min=params.min_lr)
elif (params.scheduler == 'MultiStepLR'):
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, params.scheduler_milestones, gamma=0.1)
else:
raise NotImplementedError('Unsupported LR scheduler: {}'.format(params.scheduler))
now = datetime.now()
logdir = os.path.join('../tf_logs', now.strftime('%Y%m%d-%H%M%S'))
writer = SummaryWriter(logdir)
is_validation_set = ('val' in dataloaders)
if is_validation_set:
phases = ['train', 'val']
else:
phases = ['train']
stats = {'train': [], 'val': [], 'eval': []}
for epoch in tqdm.tqdm(range(1, (params.epochs + 1))):
for phase in phases:
if (phase == 'train'):
model.train()
else:
model.eval()
running_stats = []
count_batches = 0
for (batch, positives_mask, negatives_mask) in dataloaders[phase]:
count_batches += 1
batch_stats = {}
if (debug and (count_batches > 2)):
break
batch = {e: batch[e].to(device) for e in batch}
n_positives = torch.sum(positives_mask).item()
n_negatives = torch.sum(negatives_mask).item()
if ((n_positives == 0) or (n_negatives == 0)):
print('WARNING: Skipping batch without positive or negative examples')
continue
optimizer.zero_grad()
if visualize:
pass
with torch.set_grad_enabled((phase == 'train')):
embeddings = model(batch)
(loss, temp_stats, _) = loss_fn(embeddings, positives_mask, negatives_mask)
temp_stats = tensors_to_numbers(temp_stats)
batch_stats.update(temp_stats)
batch_stats['loss'] = loss.item()
if (phase == 'train'):
loss.backward()
optimizer.step()
running_stats.append(batch_stats)
torch.cuda.empty_cache()
epoch_stats = {}
for key in running_stats[0].keys():
temp = [e[key] for e in running_stats]
epoch_stats[key] = np.mean(temp)
stats[phase].append(epoch_stats)
print_stats(epoch_stats, phase)
if (scheduler is not None):
scheduler.step()
loss_metrics = {'train': stats['train'][(- 1)]['loss']}
if ('val' in phases):
loss_metrics['val'] = stats['val'][(- 1)]['loss']
writer.add_scalars('Loss', loss_metrics, epoch)
if ('num_triplets' in stats['train'][(- 1)]):
nz_metrics = {'train': stats['train'][(- 1)]['num_non_zero_triplets']}
if ('val' in phases):
nz_metrics['val'] = stats['val'][(- 1)]['num_non_zero_triplets']
writer.add_scalars('Non-zero triplets', nz_metrics, epoch)
elif ('num_pairs' in stats['train'][(- 1)]):
nz_metrics = {'train_pos': stats['train'][(- 1)]['pos_pairs_above_threshold'], 'train_neg': stats['train'][(- 1)]['neg_pairs_above_threshold']}
if ('val' in phases):
nz_metrics['val_pos'] = stats['val'][(- 1)]['pos_pairs_above_threshold']
nz_metrics['val_neg'] = stats['val'][(- 1)]['neg_pairs_above_threshold']
writer.add_scalars('Non-zero pairs', nz_metrics, epoch)
if (params.batch_expansion_th is not None):
epoch_train_stats = stats['train'][(- 1)]
if ('num_non_zero_triplets' not in epoch_train_stats):
print('WARNING: Batch size expansion is enabled, but the loss function is not supported')
else:
rnz = (epoch_train_stats['num_non_zero_triplets'] / epoch_train_stats['num_triplets'])
if (rnz < params.batch_expansion_th):
dataloaders['train'].batch_sampler.expand_batch()
print('')
final_model_path = (model_pathname + '_final.pth')
torch.save(model.state_dict(), final_model_path)
stats = {'train_stats': stats, 'params': params}
model.eval()
final_eval_stats = evaluate(model, device, params)
print('Final model:')
print_eval_stats(final_eval_stats)
stats['eval'] = {'final': final_eval_stats}
print('')
pickle_path = (model_pathname + '_stats.pickle')
pickle.dump(stats, open(pickle_path, 'wb'))
model_params_name = os.path.split(params.model_params.model_params_path)[1]
config_name = os.path.split(params.params_path)[1]
(_, model_name) = os.path.split(model_pathname)
prefix = '{}, {}, {}'.format(model_params_name, config_name, model_name)
export_eval_stats('experiment_results.txt', prefix, final_eval_stats) |
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt = (rt / nprocs)
return rt |
class TFBlenderbotPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def majority_voting(data_loader, model, mlps_list):
acc_ = 0.0
for (images, labels) in data_loader:
final_prediction = []
images = images.cuda()
vit_output = model(images)
vit_predictions = torch.argmax(vit_output.detach().cpu(), dim=(- 1))
final_prediction.append(vit_predictions.detach().cpu())
x = model.patch_embed(images)
x_0 = model.pos_drop(x)
i = 0
for mlp in mlps_list:
x_0 = model.blocks[i](x_0)
mlp_output = mlp(x_0)
mlp_predictions = torch.argmax(mlp_output.detach().cpu(), dim=(- 1))
final_prediction.append(mlp_predictions.detach().cpu())
i += 1
stacked_tesnor = torch.stack(final_prediction, dim=1)
preds_major = torch.argmax(torch.nn.functional.one_hot(stacked_tesnor).sum(dim=1), dim=(- 1))
acc = ((preds_major == labels).sum().item() / len(labels))
acc_ += acc
final_acc = (acc_ / len(data_loader))
print(f'Final Accuracy From Majority Voting = {(final_acc * 100):.3f}%')
return final_acc |
class Tschebycheff(AggregativeFunction):
def __init__(self, dimension: int):
self.ideal_point = IdealPoint(dimension)
def compute(self, vector: [], weight_vector: []) -> float:
max_fun = (- 1e+30)
for i in range(len(vector)):
diff = abs((vector[i] - self.ideal_point.point[i]))
if (weight_vector[i] == 0):
feval = (0.0001 * diff)
else:
feval = (diff * weight_vector[i])
if (feval > max_fun):
max_fun = feval
return max_fun
def update(self, vector: []) -> None:
self.ideal_point.update(vector) |
class ConceptCapLoaderTrain(RNGDataFlow):
def __init__(self, num_split):
lmdb_file = '/srv/share/vgoswami8/conceptual_captions/training_feat_all.lmdb'
caption_path = '/srv/share/vgoswami8/conceptual_captions/caption_train.json'
print(('Loading from %s' % lmdb_file))
ds = td.LMDBSerializer.load(lmdb_file, shuffle=False)
self.num_dataset = (int((len(ds) / num_split)) + 1)
ds = td.PrefetchDataZMQ(ds, nr_proc=1)
ds = td.FixedSizeData(ds, self.num_dataset, keep_state=True)
self.ds = ds
self.ds.reset_state()
def __iter__(self):
for batch in self.ds.get_data():
(yield batch)
def __len__(self):
return self.ds.size() |
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'Conv2d_1a_3x3', 'classifier': 'fc', **kwargs} |
def _extend_tensor(input_tensor: torch.Tensor, N: int) -> torch.Tensor:
if (input_tensor.ndim < 2):
raise ValueError('Input tensor must have ndimensions >= 2.')
B = input_tensor.shape[0]
non_batch_dims = tuple(input_tensor.shape[1:])
constant_dims = (((- 1),) * input_tensor.ndim)
return input_tensor.clone()[(None, ...)].expand(N, *constant_dims).transpose(0, 1).reshape((N * B), *non_batch_dims) |
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=200):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(((512 * block.expansion) * 4), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class SpotPathHandler():
def __init__(self) -> None:
return
def video_from_path(self, decoder: DecoderType, video_path: str, num_frames: int, **kwargs) -> Video:
if (DecoderType(decoder) == DecoderType.FRAME):
from eztorch.datasets.decoders.frame_spot_video import FrameSpotVideo
return FrameSpotVideo(video_path=video_path, num_frames=num_frames, **kwargs)
elif (DecoderType(decoder) == DecoderType.DUMB):
from eztorch.datasets.decoders.dumb_spot_video import DumbSpotVideo
return DumbSpotVideo(video_path=video_path, num_frames=num_frames, **kwargs)
else:
raise NotImplementedError |
def make_parser():
parser = argparse.ArgumentParser('YOLOX Eval')
parser.add_argument('-expn', '--experiment-name', type=str, default=None)
parser.add_argument('-n', '--name', type=str, default=None, help='model name')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--dist-url', default=None, type=str, help='url used to set up distributed training')
parser.add_argument('-b', '--batch-size', type=int, default=64, help='batch size')
parser.add_argument('-d', '--devices', default=None, type=int, help='device for training')
parser.add_argument('--local_rank', default=0, type=int, help='local rank for dist training')
parser.add_argument('--num_machines', default=1, type=int, help='num of node for training')
parser.add_argument('--machine_rank', default=0, type=int, help='node rank for multi-node training')
parser.add_argument('-f', '--exp_file', default=None, type=str, help='pls input your expriment description file')
parser.add_argument('--fp16', dest='fp16', default=False, action='store_true', help='Adopting mix precision evaluating.')
parser.add_argument('--fuse', dest='fuse', default=False, action='store_true', help='Fuse conv and bn for testing.')
parser.add_argument('--trt', dest='trt', default=False, action='store_true', help='Using TensorRT model for testing.')
parser.add_argument('--test', dest='test', default=False, action='store_true', help='Evaluating on test-dev set.')
parser.add_argument('--speed', dest='speed', default=False, action='store_true', help='speed test only.')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
parser.add_argument('-c', '--ckpt', default=None, type=str, help='ckpt for eval')
parser.add_argument('--conf', default=0.1, type=float, help='test conf')
parser.add_argument('--nms', default=0.7, type=float, help='test nms threshold')
parser.add_argument('--tsize', default=None, type=int, help='test img size')
parser.add_argument('--seed', default=None, type=int, help='eval seed')
parser.add_argument('--track_thresh', type=float, default=0.5, help='tracking confidence threshold')
parser.add_argument('--track_buffer', type=int, default=30, help='the frames for keep lost tracks')
parser.add_argument('--match_thresh', type=float, default=0.9, help='matching threshold for tracking')
parser.add_argument('--min-box-area', type=float, default=100, help='filter out tiny boxes')
parser.add_argument('--model_folder', type=str, default='pretrained/ckpt.t7', help='reid model folder')
return parser |
def all_gather(data):
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.IntTensor([tensor.numel()]).to('cuda')
size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.