code stringlengths 101 5.91M |
|---|
def _create_linear_initializer(input_size, output_size, dtype=tf.float32):
return {'w': tf.orthogonal_initializer(), 'b': tf.zeros_initializer(dtype=dtype)} |
class Timm_Encoder_toy(nn.Module):
def __init__(self, obs_shape, feature_dim):
super().__init__()
self.num_step = int((obs_shape[0] / 3))
self.feature_dim = feature_dim
self.image_encode = vit_toy_patch6_84()
self.linear_map = nn.Linear(192, 50)
self.byol_project = nn.Sequential(nn.Linear(192, 384), nn.BatchNorm1d(384), nn.ReLU(), nn.Linear(384, 96), nn.BatchNorm1d(96))
self.byol_predict = nn.Sequential(nn.Linear(96, 384), nn.BatchNorm1d(384), nn.ReLU(), nn.Linear(384, 96))
def set_reuse(self):
self.image_encode.copy_token()
def forward_1(self, img_sequence, detach):
latent = self.image_encode.forward_features2(img_sequence)
policy_feature = self.linear_map(latent)
if detach:
policy_feature = policy_feature.detach()
return policy_feature
def forward_2(self, img_sequence, detach):
latent = self.image_encode.forward_features3(img_sequence)
policy_feature = self.linear_map(latent)
if detach:
policy_feature = policy_feature.detach()
return policy_feature
def forward_0(self, img_sequence, detach):
latent = self.image_encode.forward_features1(img_sequence)
policy_feature = self.linear_map(latent)
if detach:
policy_feature = policy_feature.detach()
return policy_feature
def get_rec(self, input):
result = self.decoder_input(input)
result = result.view((- 1), 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def forward_rec(self, img_sequence):
rec = self.image_encode.forward_reconstruction(img_sequence)
return rec |
_model
def res2net101_26w_4s(pretrained=False, **kwargs):
model_args = dict(block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs)
return _create_res2net('res2net101_26w_4s', pretrained, **model_args) |
class TestSameTransfoms(unittest.TestCase):
def setUpClass(cls):
if (platform.system().lower() == 'windows'):
cls.skipTest(cls, 'not support mxnet on windows yet')
cls.img = (np.random.random_sample([10, 10, 3]) * 255)
cls.tf_trans = TRANSFORMS('tensorflow', 'preprocess')
cls.pt_trans = TRANSFORMS('pytorch', 'preprocess')
cls.mx_trans = TRANSFORMS('mxnet', 'preprocess')
cls.ox_trans = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
cls.mx_img = mx.nd.array(cls.img.astype(np.uint8))
cls.pt_img = Image.fromarray(cls.img.astype(np.uint8))
cls.tf_img = tf.constant(cls.img)
_ = TRANSFORMS('tensorflow', 'postprocess')
_ = TRANSFORMS('pytorch', 'postprocess')
_ = TRANSFORMS('mxnet', 'postprocess')
_ = TRANSFORMS('onnxrt_qlinearops', 'postprocess')
_ = TRANSFORMS('onnxrt_integerops', 'postprocess')
def testCast(self):
args = {'dtype': 'int64'}
tf_func = TestSameTransfoms.tf_trans['Cast'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result[0][0][0].dtype, 'int64')
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result[0][0][0].dtype, 'int64')
mx_func = TestSameTransfoms.mx_trans['Cast'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))
self.assertEqual(mx_result[0][0][0].dtype, np.int64)
ox_func = TestSameTransfoms.ox_trans['Cast'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))
self.assertEqual(ox_result[0][0][0].dtype, 'int64')
totensor = TestSameTransfoms.pt_trans['ToTensor']()
cast = TestSameTransfoms.pt_trans['Cast'](**args)
pt_func = TestSameTransfoms.pt_trans['Compose']([totensor, cast])
pt_result = pt_func((TestSameTransfoms.pt_img, None))
self.assertEqual(pt_result[0][0][0].dtype, torch.int64)
def testCropToBoundingBox(self):
args = {'offset_height': 2, 'offset_width': 2, 'target_height': 5, 'target_width': 5}
pt_func = TestSameTransfoms.pt_trans['CropToBoundingBox'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
self.assertEqual(pt_result.size, (5, 5))
ox_func = TestSameTransfoms.ox_trans['CropToBoundingBox'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(ox_result.shape, (5, 5, 3))
mx_func = TestSameTransfoms.mx_trans['CropToBoundingBox'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(mx_result.shape, (5, 5, 3))
tf_func = TestSameTransfoms.tf_trans['CropToBoundingBox'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5, 5, 3))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (5, 5, 3))
def testNormalize(self):
args = {}
normalize = TestSameTransfoms.pt_trans['Normalize'](**args)
totensor = TestSameTransfoms.pt_trans['ToTensor']()
pt_func = TestSameTransfoms.pt_trans['Compose']([totensor, normalize])
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
self.assertEqual((TestSameTransfoms.img.astype(np.uint8)[0][0][0] / 255.0), pt_result[0][0][0])
args = {'std': [0.0]}
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['Normalize'](**args)
def testRescale(self):
ox_func = TestSameTransfoms.ox_trans['Rescale']()
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertAlmostEqual(ox_result[1][2][0], (TestSameTransfoms.img[1][2][0] / 255.0))
def testTranspose(self):
args = {'perm': [2, 0, 1]}
tf_func = TestSameTransfoms.tf_trans['Transpose'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
ox_func = TestSameTransfoms.ox_trans['Transpose'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Transpose'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
pt_transpose = TestSameTransfoms.pt_trans['Transpose'](**args)
pt_totensor = TestSameTransfoms.pt_trans['ToTensor']()
pt_compose = TestSameTransfoms.pt_trans['Compose']([pt_totensor, pt_transpose])
pt_result = pt_compose((TestSameTransfoms.pt_img, None))[0]
self.assertEqual(tf_result.shape, (3, 10, 10))
self.assertEqual(ox_result.shape, (3, 10, 10))
self.assertEqual(mx_result.shape, (3, 10, 10))
self.assertEqual(pt_result.shape, (10, 3, 10))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (3, 10, 10))
def testCenterCrop(self):
args = {'size': [4, 4]}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['CenterCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CenterCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
self.assertEqual(pt_result.size, (4, 4))
self.assertEqual(mx_result.shape, (4, 4, 3))
self.assertEqual(np.array(pt_result)[0][0][0], mx_result.asnumpy()[0][0][0])
self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0]))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (4, 4, 3))
tf_result = tf_func((tf.constant(TestSameTransfoms.img.reshape((1, 10, 10, 3))), None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (1, 4, 4, 3))
args = {'size': 4}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['CenterCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CenterCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
self.assertEqual(pt_result.size, (4, 4))
self.assertEqual(mx_result.shape, (4, 4, 3))
self.assertEqual(np.array(pt_result)[0][0][0], mx_result.asnumpy()[0][0][0])
self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0]))
args = {'size': [4]}
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
with self.assertRaises(ValueError):
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((np.array([[TestSameTransfoms.img]]), None))
with self.assertRaises(ValueError):
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((tf.constant(TestSameTransfoms.img.reshape((1, 1, 10, 10, 3))), None))
args = {'size': [20]}
with self.assertRaises(ValueError):
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))
with self.assertRaises(ValueError):
tf_func = TestSameTransfoms.tf_trans['CenterCrop'](**args)
tf_result = tf_func((TestSameTransfoms.tf_img, None))
def testResizeWithRatio(self):
args = {'padding': True}
label = [[0.1, 0.1, 0.5, 0.5], [], [], []]
tf_func = TestSameTransfoms.tf_trans['ResizeWithRatio'](**args)
tf_result = tf_func((TestSameTransfoms.img, label))[0]
self.assertEqual(tf_result.shape, (1365, 1365, 3))
args = {'padding': False}
tf_func = TestSameTransfoms.tf_trans['ResizeWithRatio'](**args)
tf_result = tf_func((TestSameTransfoms.img, label))[0]
self.assertTrue(((tf_result.shape[0] == 800) or (tf_result.shape[1] == 1365)))
def testResize(self):
tf_func = TestSameTransfoms.tf_trans['Resize'](**{'size': [4, 5]})
tf_result = tf_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['Resize'](**{'size': [4, 5]})
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Resize'](**{'size': [4, 5]})
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (5, 4, 3))
self.assertEqual(pt_result.size, (5, 4))
self.assertEqual(mx_result.shape, (4, 5, 3))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (4, 5, 3))
args = {'size': 4}
tf_func = TestSameTransfoms.tf_trans['Resize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['Resize'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Resize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
self.assertEqual(pt_result.size, (4, 4))
self.assertEqual(mx_result.shape, (4, 4, 3))
args = {'size': [4]}
tf_func = TestSameTransfoms.tf_trans['Resize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['Resize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
self.assertEqual(mx_result.shape, (4, 4, 3))
args = {'size': 4, 'interpolation': 'test'}
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['Resize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['Resize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['Resize'](**args)
def testRandomResizedCrop(self):
tf_func = TestSameTransfoms.tf_trans['RandomResizedCrop'](**{'size': [4, 5]})
tf_result = tf_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['RandomResizedCrop'](**{'size': [4, 5]})
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomResizedCrop'](**{'size': [4, 5]})
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (5, 4, 3))
self.assertEqual(pt_result.size, (5, 4))
self.assertEqual(mx_result.shape, (4, 5, 3))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (4, 5, 3))
args = {'size': [4]}
tf_func = TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
mx_func = TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(mx_result.shape, (4, 4, 3))
args = {'size': 4}
tf_func = TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['RandomResizedCrop'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertEqual(tf_result.shape, (4, 4, 3))
self.assertEqual(pt_result.size, (4, 4))
self.assertEqual(mx_result.shape, (4, 4, 3))
args = {'size': 4, 'scale': (0.8, 0.2)}
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
args = {'size': 4, 'interpolation': 'test'}
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['RandomResizedCrop'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['RandomResizedCrop'](**args)
def testCropResize(self):
args = {'x': 0, 'y': 0, 'width': 10, 'height': 10, 'size': [5, 5]}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['CropResize'](**args)
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
self.assertEqual(tf_result.shape, (5, 5, 3))
self.assertEqual(mx_result.shape, (5, 5, 3))
self.assertEqual(ox_result.shape, (5, 5, 3))
self.assertEqual(pt_result.size, (5, 5))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertEqual(tf_result.shape, (5, 5, 3))
args = {'x': 0, 'y': 0, 'width': 10, 'height': 10, 'size': 5}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5, 5, 3))
self.assertEqual(mx_result.shape, (5, 5, 3))
self.assertEqual(ox_result.shape, (5, 5, 3))
args = {'x': 0, 'y': 0, 'width': 10, 'height': 10, 'size': [5]}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5, 5, 3))
self.assertEqual(mx_result.shape, (5, 5, 3))
self.assertEqual(ox_result.shape, (5, 5, 3))
args = {'x': 0, 'y': 0, 'width': 10, 'height': 10, 'size': [5, 5]}
tf_func = TestSameTransfoms.tf_trans['CropResize'](**args)
tf_result = tf_func((TestSameTransfoms.img, None))[0]
mx_func = TestSameTransfoms.mx_trans['CropResize'](**args)
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
ox_func = TestSameTransfoms.ox_trans['CropResize'](**args)
ox_result = ox_func((TestSameTransfoms.img, None))[0]
self.assertEqual(tf_result.shape, (5, 5, 3))
self.assertEqual(mx_result.shape, (5, 5, 3))
self.assertEqual(ox_result.shape, (5, 5, 3))
args = {'x': 0, 'y': 0, 'width': 10, 'height': 10, 'size': 5, 'interpolation': 'test'}
with self.assertRaises(ValueError):
TestSameTransfoms.ox_trans['CropResize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.mx_trans['CropResize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.tf_trans['CropResize'](**args)
with self.assertRaises(ValueError):
TestSameTransfoms.pt_trans['CropResize'](**args)
def testRandomHorizontalFlip(self):
tf_func = TestSameTransfoms.tf_trans['RandomHorizontalFlip']()
tf_result = tf_func((TestSameTransfoms.img, None))[0]
ox_func = TestSameTransfoms.ox_trans['RandomHorizontalFlip']()
ox_result = ox_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['RandomHorizontalFlip']()
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomHorizontalFlip']()
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertTrue(((np.array(TestSameTransfoms.pt_img) == np.array(pt_result)).all() or (np.fliplr(np.array(TestSameTransfoms.pt_img)) == np.array(pt_result)).all()))
self.assertTrue(((TestSameTransfoms.img == tf_result).all() or (np.fliplr(TestSameTransfoms.img) == tf_result).all()))
self.assertTrue(((TestSameTransfoms.img == ox_result).all() or (np.fliplr(TestSameTransfoms.img) == ox_result).all()))
self.assertTrue(((TestSameTransfoms.mx_img.asnumpy() == mx_result.asnumpy()).all() or (np.fliplr(TestSameTransfoms.mx_img.asnumpy()) == mx_result.asnumpy()).all()))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertTrue(((TestSameTransfoms.img == tf_result).all() or (np.fliplr(TestSameTransfoms.img) == tf_result).all()))
def testRandomVerticalFlip(self):
tf_func = TestSameTransfoms.tf_trans['RandomVerticalFlip']()
tf_result = tf_func((TestSameTransfoms.img, None))[0]
ox_func = TestSameTransfoms.ox_trans['RandomVerticalFlip']()
ox_result = ox_func((TestSameTransfoms.img, None))[0]
pt_func = TestSameTransfoms.pt_trans['RandomVerticalFlip']()
pt_result = pt_func((TestSameTransfoms.pt_img, None))[0]
mx_func = TestSameTransfoms.mx_trans['RandomVerticalFlip']()
mx_result = mx_func((TestSameTransfoms.mx_img, None))[0]
self.assertTrue(((np.array(TestSameTransfoms.pt_img) == np.array(pt_result)).all() or (np.flipud(np.array(TestSameTransfoms.pt_img)) == np.array(pt_result)).all()))
self.assertTrue(((TestSameTransfoms.img == tf_result).all() or (np.flipud(TestSameTransfoms.img) == tf_result).all()))
self.assertTrue(((TestSameTransfoms.img == ox_result).all() or (np.flipud(TestSameTransfoms.img) == ox_result).all()))
self.assertTrue(((TestSameTransfoms.mx_img.asnumpy() == mx_result.asnumpy()).all() or (np.flipud(TestSameTransfoms.mx_img.asnumpy()) == mx_result.asnumpy()).all()))
tf_result = tf_func((TestSameTransfoms.tf_img, None))[0]
tf_result = tf_result.eval(session=tf.compat.v1.Session())
self.assertTrue(((TestSameTransfoms.img == tf_result).all() or (np.flipud(TestSameTransfoms.img) == tf_result).all())) |
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier, im_num, ex_num):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.bat_low = _bound_learner(hidden_features=128, im_num=im_num, ex_num=ex_num)
def forward(self, x):
input_shape = x.shape[(- 2):]
features = self.backbone(x)
(features, point_pre1, point_pre2, point_pre3) = self.bat_low(features)
outputs = self.classifier(features)
if self.training:
outputs = [F.interpolate(o, size=input_shape, mode='bilinear', align_corners=False) for o in outputs]
else:
outputs = F.interpolate(outputs, size=input_shape, mode='bilinear', align_corners=False)
return (outputs, point_pre1, point_pre2, point_pre3) |
def tokenize_for_mer(text):
reg_range = "[\\u4e00-\\ufaff]|[0-9]+|[a-zA-Z]+\\'*[a-z]*"
matches = re.findall(reg_range, text, re.UNICODE)
p = inflect.engine()
res = []
for item in matches:
try:
temp = (p.number_to_words(item) if (item.isnumeric() and (len(regex.findall('\\p{Han}+', item)) == 0)) else item)
except:
temp = item
res.append(temp)
return res |
def main():
parser = argparse.ArgumentParser(description='SSD evaluation')
parser.add_argument('--exp-name', type=str, default='temp_eval_ssd')
parser.add_argument('--training-mode', type=str, choices=('SimCLR', 'SupCon', 'SupCE'))
parser.add_argument('--results-dir', type=str, default='./eval_results')
parser.add_argument('--arch', type=str, default='resnet50')
parser.add_argument('--classes', type=int, default=10)
parser.add_argument('--clusters', type=int, default=1)
parser.add_argument('--dataset', type=str, default='cifar10')
parser.add_argument('--data-dir', type=str, default='/data/data_vvikash/fall20/SSD/datasets/')
parser.add_argument('--data-mode', type=str, choices=('org', 'base', 'ssl'), default='base')
parser.add_argument('--normalize', action='store_true', default=False)
parser.add_argument('--batch-size', type=int, default=256)
parser.add_argument('--size', type=int, default=32)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--ckpt', type=str, help='checkpoint path')
parser.add_argument('--seed', type=int, default=12345)
args = parser.parse_args()
device = 'cuda:0'
assert args.ckpt, 'Must provide a checkpint for evaluation'
if (not os.path.isdir(args.results_dir)):
os.mkdir(args.results_dir)
results_file = os.path.join(args.results_dir, (args.exp_name + '_ssd.txt'))
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(results_file, 'a'))
logger.info(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if (args.training_mode in ['SimCLR', 'SupCon']):
model = SSLResNet(arch=args.arch).eval()
elif (args.training_mode == 'SupCE'):
model = SupResNet(arch=args.arch, num_classes=args.classes).eval()
else:
raise ValueError('Provide model class')
model.encoder = nn.DataParallel(model.encoder).to(device)
ckpt_dict = torch.load(args.ckpt, map_location='cpu')
if ('model' in ckpt_dict.keys()):
ckpt_dict = ckpt_dict['model']
if ('state_dict' in ckpt_dict.keys()):
ckpt_dict = ckpt_dict['state_dict']
model.load_state_dict(ckpt_dict)
(train_loader, test_loader, norm_layer) = data.__dict__[args.dataset](args.data_dir, args.batch_size, mode=args.data_mode, normalize=args.normalize, size=args.size)
(features_train, labels_train) = get_features(model.encoder, train_loader)
(features_test, _) = get_features(model.encoder, test_loader)
print('In-distribution features shape: ', features_train.shape, features_test.shape)
ds = ['cifar10', 'cifar100', 'svhn', 'texture', 'blobs']
ds.remove(args.dataset)
for d in ds:
(_, ood_loader, _) = data.__dict__[d](args.data_dir, args.batch_size, mode='base', normalize=args.normalize, norm_layer=norm_layer, size=args.size)
(features_ood, _) = get_features(model.encoder, ood_loader)
print('Out-of-distribution features shape: ', features_ood.shape)
(fpr95, auroc, aupr) = get_eval_results(np.copy(features_train), np.copy(features_test), np.copy(features_ood), np.copy(labels_train), args)
logger.info(f'In-data = {args.dataset}, OOD = {d}, Clusters = {args.clusters}, FPR95 = {fpr95}, AUROC = {auroc}, AUPR = {aupr}') |
def write_version_py(filename='cuhnsw/version.py'):
cnt = "\nshort_version = '%(version)s'\ngit_revision = '%(git_revision)s'\n"
git_revision = git_version()
with open(filename, 'w') as fout:
fout.write((cnt % {'version': VERSION, 'git_revision': git_revision})) |
_cache()
def create_local_process_group(num_workers_per_machine: int) -> None:
global _LOCAL_PROCESS_GROUP
assert (_LOCAL_PROCESS_GROUP is None)
assert ((get_world_size() % num_workers_per_machine) == 0)
num_machines = (get_world_size() // num_workers_per_machine)
machine_rank = (get_rank() // num_workers_per_machine)
for i in range(num_machines):
ranks_on_i = list(range((i * num_workers_per_machine), ((i + 1) * num_workers_per_machine)))
pg = dist.new_group(ranks_on_i)
if (i == machine_rank):
_LOCAL_PROCESS_GROUP = pg |
def _compile_to_stage_mod(model_pipe, pipe_group, num_stages, device, chunks, pipe_schedule='1F1B', example_inputs=None, checkpoint=True, data_ranks=None, traced_forward_keys=None, amp_config=None, args_chunk_spec=None, kwargs_chunk_spec=None, output_chunk_spec=None, compiler_configs=dict()):
(complete_args, complete_kwargs) = prepare_args_kwargs(example_inputs, traced_forward_keys)
(complete_args, complete_kwargs) = split_args_kwargs_into_chunks(complete_args, complete_kwargs, chunks, args_chunk_spec, kwargs_chunk_spec)
(complete_args, complete_kwargs) = (complete_args[0], complete_kwargs[0])
propagate_fake_split_gm(list(model_pipe.split_gm.graph.nodes), complete_args, complete_kwargs, compiler_configs=compiler_configs)
pipe_rank = parallel_rank('pipe')
pipe_size = parallel_group_size('pipe')
stage_index = list(range(pipe_rank, num_stages, pipe_size))
if (pipe_schedule not in stage_schedules):
pipe_schedule = '1F1B'
def create_pipeline_stage(s_idx):
pipe_stage = stage_schedules[pipe_schedule](pipe=model_pipe, stage_index=s_idx, nstages=num_stages, chunks=chunks, device=device, checkpoint=checkpoint, group=pipe_group, args_chunk_spec=args_chunk_spec, kwargs_chunk_spec=kwargs_chunk_spec, output_chunk_spec=output_chunk_spec, forward_keys=traced_forward_keys)
materialize_modules_to_device(pipe_stage.submod, device)
pipe_stage.submod.to(device)
if (amp_config is not None):
pipe_stage.submod.forward = autocast(**amp_config)(pipe_stage.submod.forward)
if (data_ranks is not None):
pipe_stage.submod = DistributedDataParallel(pipe_stage.submod, process_group=dp_pg_cb(pipe_rank))
return pipe_stage
pipe_stages = map_aggregate(stage_index, create_pipeline_stage)
stage_interleaver = StageInterleaver(stages=pipe_stages)
return SafeStage(stage_interleaver, device=device, amp_config=amp_config) |
class AverageMeter(object):
def __init__(self):
self.book = dict()
def reset_all(self):
self.book.clear()
def reset(self, id):
item = self.book.get(id, None)
if (item is not None):
item[0] = 0
item[1] = 0
def update(self, id, val):
record = self.book.get(id, None)
if (record is None):
self.book[id] = [val, 1]
else:
record[0] += val
record[1] += 1
def get_results(self, id):
record = self.book.get(id, None)
assert (record is not None)
return (record[0] / record[1]) |
def test_swin_block():
block = SwinBlock(embed_dims=32, num_heads=4, feedforward_channels=128)
assert (block.ffn.embed_dims == 32)
assert (block.attn.w_msa.num_heads == 4)
assert (block.ffn.feedforward_channels == 128)
x = torch.randn(1, (56 * 56), 32)
x_out = block(x, (56, 56))
assert (x_out.shape == torch.Size([1, (56 * 56), 32]))
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, (56 * 56), 64)
x_out = block(x, (56, 56))
assert (x_out.shape == torch.Size([1, (56 * 56), 64])) |
class DistributionParams(Generic[T], nn.Module):
def __init__(self, batch_shape: Size=torch.Size()):
super().__init__()
self.batch_shape = torch.Size(batch_shape)
def get_distribution(self) -> T:
raise NotImplementedError
def from_distribution(dist: T) -> 'DistributionParams[T]':
raise NotImplementedError
def __call__(self, *input, **kwargs) -> Any:
return super().__call__(*input, **kwargs) |
class ResNetV1(nn.Module):
def __init__(self, block, layers, num_classes=1000, deep_stem=False, zero_init_residual=False, norm_layer=nn.BatchNorm2d):
output_stride = cfg.MODEL.OUTPUT_STRIDE
scale = cfg.MODEL.BACKBONE_SCALE
if (output_stride == 32):
dilations = [1, 1]
strides = [2, 2]
elif (output_stride == 16):
dilations = [1, 2]
strides = [2, 1]
elif (output_stride == 8):
dilations = [2, 4]
strides = [1, 1]
else:
raise NotImplementedError
self.inplanes = int(((128 if deep_stem else 64) * scale))
super(ResNetV1, self).__init__()
if deep_stem:
mid_channel = int((64 * scale))
self.conv1 = nn.Sequential(nn.Conv2d(3, mid_channel, 3, 2, 1, bias=False), norm_layer(mid_channel), nn.ReLU(True), nn.Conv2d(mid_channel, mid_channel, 3, 1, 1, bias=False), norm_layer(mid_channel), nn.ReLU(True), nn.Conv2d(mid_channel, self.inplanes, 3, 1, 1, bias=False))
else:
self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(True)
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.layer1 = self._make_layer(block, int((64 * scale)), layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, int((128 * scale)), layers[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, int((256 * scale)), layers[2], stride=strides[0], dilation=dilations[0], norm_layer=norm_layer)
self.layer4 = self._make_layer(block, int((512 * scale)), layers[3], stride=strides[1], dilation=dilations[1], norm_layer=norm_layer, multi_grid=cfg.MODEL.DANET.MULTI_GRID, multi_dilation=cfg.MODEL.DANET.MULTI_DILATION)
self.last_inp_channels = int(((512 * block.expansion) * scale))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(int(((512 * block.expansion) * scale)), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, BottleneckV1b):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlockV1b):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d, multi_grid=False, multi_dilation=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), 1, stride, bias=False), norm_layer((planes * block.expansion)))
layers = []
if (not multi_grid):
if (dilation in (1, 2)):
layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif (dilation == 4):
layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
else:
layers.append(block(self.inplanes, planes, stride, dilation=multi_dilation[0], downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
if multi_grid:
div = len(multi_dilation)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=multi_dilation[(i % div)], previous_dilation=dilation, norm_layer=norm_layer))
else:
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
return (c1, c2, c3, c4) |
_register()
def Maxout(x, num_unit):
input_shape = x.get_shape().as_list()
ndim = len(input_shape)
assert ((ndim == 4) or (ndim == 2))
ch = input_shape[(- 1)]
assert ((ch is not None) and ((ch % num_unit) == 0))
if (ndim == 4):
x = tf.reshape(x, [(- 1), input_shape[1], input_shape[2], (ch / num_unit), num_unit])
else:
x = tf.reshape(x, [(- 1), (ch / num_unit), num_unit])
return tf.reduce_max(x, ndim, name='output') |
def tuple_to_seq_BIOES(tuples, id_to_tag):
sentlen = (max([tuple[1] for tuple in tuples]) + 1)
seq = [None for _ in range(sentlen)]
for tuple in tuples:
if (id_to_tag[tuple[(- 1)]] == 'O'):
for i in range(tuple[0], (tuple[1] + 1)):
seq[i] = 'O'
elif ((tuple[1] - tuple[0]) == 0):
seq[tuple[0]] = ('S-' + id_to_tag[tuple[(- 1)]])
elif ((tuple[1] - tuple[0]) >= 1):
seq[tuple[0]] = ('B-' + id_to_tag[tuple[(- 1)]])
seq[tuple[1]] = ('E-' + id_to_tag[tuple[(- 1)]])
for i in range((tuple[0] + 1), tuple[1]):
seq[i] = ('I-' + id_to_tag[tuple[(- 1)]])
return seq |
_cache()
def _get_cpu_extra_compile_args():
base_args = ['-fopenmp', '-ffast-math']
if (sys.platform == 'darwin'):
return (['-Xpreprocessor'] + base_args)
else:
return base_args |
def train(args, model, train_dataloader, test_dataloader, optimizer, epoch_idx=0.0):
loss_stack = []
iter_idx = (epoch_idx * len(train_dataloader))
iter_max = (args.epochs * len(train_dataloader))
with torch.no_grad():
model.eval()
print('update psd label bank!')
(glob_multi_feat_cent, all_psd_label) = init_multi_cent_psd_label(args, model, test_dataloader)
model.train()
for (imgs_train, imgs_test, imgs_label, imgs_idx) in tqdm(train_dataloader):
iter_idx += 1
imgs_train = imgs_train.cuda()
imgs_idx = imgs_idx.cuda()
psd_label = all_psd_label[imgs_idx]
(embed_feat, pred_cls) = model(imgs_train)
if (pred_cls.shape != psd_label.shape):
psd_label = torch.zeros_like(pred_cls).scatter(1, psd_label.unsqueeze(1), 1)
mean_pred_cls = torch.mean(pred_cls, dim=0, keepdim=True)
reg_loss = (- torch.sum((torch.log(mean_pred_cls) * mean_pred_cls)))
ent_loss = (- torch.sum((torch.log(pred_cls) * pred_cls), dim=1).mean())
psd_loss = (- torch.sum((torch.log(pred_cls) * psd_label), dim=1).mean())
if (epoch_idx >= 1.0):
loss = (ent_loss + (2.0 * psd_loss))
else:
loss = ((- reg_loss) + ent_loss)
normed_emd_feat = (embed_feat / torch.norm(embed_feat, p=2, dim=1, keepdim=True))
dym_feat_simi = torch.einsum('cmd, nd -> ncm', glob_multi_feat_cent, normed_emd_feat)
(dym_feat_simi, _) = torch.max(dym_feat_simi, dim=2)
dym_label = torch.softmax(dym_feat_simi, dim=1)
dym_psd_loss = ((- torch.sum((torch.log(pred_cls) * dym_label), dim=1).mean()) - torch.sum((torch.log(dym_label) * pred_cls), dim=1).mean())
if (epoch_idx >= 1.0):
loss += (0.5 * dym_psd_loss)
lr_scheduler(optimizer, iter_idx, iter_max)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
loss_stack.append(loss.cpu().item())
glob_multi_feat_cent = EMA_update_multi_feat_cent_with_feat_simi(args, glob_multi_feat_cent, embed_feat, decay=0.9999)
train_loss = np.mean(loss_stack)
return train_loss |
def test_imports():
run_cell('import numpy as np')
run_cell('arr = np.zeros((5,))')
run_cell('logging.info(arr * 3)')
deps = set(compute_unparsed_slice(3).keys())
assert (deps == {1, 2, 3}), ('got %s' % deps)
slice_size = num_stmts_in_slice(3)
assert (slice_size == 3), ('got %d' % slice_size) |
class MSRVTT_Caption_DataLoader(Dataset):
def __init__(self, csv_path, json_path, features_path, tokenizer, max_words=30, feature_framerate=1.0, max_frames=100, split_type=''):
self.csv = pd.read_csv(csv_path)
self.data = json.load(open(json_path, 'r'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv['video_id'].values[0]].shape[(- 1)]
assert (split_type in ['train', 'val', 'test'])
video_ids = [self.data['videos'][idx]['video_id'] for idx in range(len(self.data['videos']))]
split_dict = {'train': video_ids[:6513], 'val': video_ids[6513:(6513 + 497)], 'test': video_ids[(6513 + 497):]}
choiced_video_ids = split_dict[split_type]
self.sample_len = 0
self.sentences_dict = {}
self.video_sentences_dict = defaultdict(list)
if (split_type == 'train'):
for itm in self.data['sentences']:
if (itm['video_id'] in choiced_video_ids):
self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption'])
self.video_sentences_dict[itm['video_id']].append(itm['caption'])
elif ((split_type == 'val') or (split_type == 'test')):
for itm in self.data['sentences']:
if (itm['video_id'] in choiced_video_ids):
self.video_sentences_dict[itm['video_id']].append(itm['caption'])
for vid in choiced_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (vid, self.video_sentences_dict[vid][0])
else:
raise NotImplementedError
self.sample_len = len(self.sentences_dict)
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for (i, video_id) in enumerate(choice_video_ids):
words = []
words = (['[CLS]'] + words)
total_length_with_CLS = (self.max_words - 1)
if (len(words) > total_length_with_CLS):
words = words[:total_length_with_CLS]
words = (words + ['[SEP]'])
token_labels = []
masked_tokens = words.copy()
for (token_id, token) in enumerate(masked_tokens):
if ((token_id == 0) or (token_id == (len(masked_tokens) - 1))):
token_labels.append((- 1))
continue
prob = random.random()
if (prob < 0.15):
prob /= 0.15
if (prob < 0.8):
masked_tokens[token_id] = '[MASK]'
elif (prob < 0.9):
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
token_labels.append(self.tokenizer.vocab['[UNK]'])
else:
token_labels.append((- 1))
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = ([1] * len(input_ids))
segment_ids = ([0] * len(input_ids))
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while (len(input_ids) < self.max_words):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append((- 1))
assert (len(input_ids) == self.max_words)
assert (len(input_mask) == self.max_words)
assert (len(segment_ids) == self.max_words)
assert (len(masked_token_ids) == self.max_words)
assert (len(token_labels) == self.max_words)
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
if (caption is not None):
caption_words = self.tokenizer.tokenize(caption)
else:
caption_words = self._get_single_text(video_id)
if (len(caption_words) > total_length_with_CLS):
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = (['[CLS]'] + caption_words)
output_caption_words = (caption_words + ['[SEP]'])
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = ([1] * len(input_caption_ids))
while (len(input_caption_ids) < self.max_words):
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert (len(input_caption_ids) == self.max_words)
assert (len(output_caption_ids) == self.max_words)
assert (len(decoder_mask) == self.max_words)
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return (pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, choice_video_ids)
def _get_single_text(self, video_id):
rind = random.randint(0, (len(self.sentences[video_id]) - 1))
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = ([0] * len(choice_video_ids))
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for (i, video_id) in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if (self.max_frames < video_slice.shape[0]):
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = (max_video_length[i] if (max_video_length[i] > slice_shape[0]) else slice_shape[0])
if (len(video_slice) < 1):
print('video_id: {}'.format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for (i, v_length) in enumerate(max_video_length):
video_mask[i][:v_length] = ([1] * v_length)
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for (i, video_pair_) in enumerate(masked_video):
for (j, _) in enumerate(video_pair_):
if (j < max_video_length[i]):
prob = random.random()
if (prob < 0.15):
masked_video[i][j] = ([0.0] * video.shape[(- 1)])
video_labels_index[i].append(j)
else:
video_labels_index[i].append((- 1))
else:
video_labels_index[i].append((- 1))
video_labels_index = np.array(video_labels_index, dtype=np.long)
return (video, video_mask, masked_video, video_labels_index)
def __getitem__(self, idx):
(video_id, caption) = self.sentences_dict[idx]
(pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, choice_video_ids) = self._get_text(video_id, caption)
(video, video_mask, masked_video, video_labels_index) = self._get_video(choice_video_ids)
return (pairs_text, pairs_mask, pairs_segment, video, video_mask, pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids) |
def max_prim(this, contrs, this_vals=False, contr_vals=False):
this_nlp = get_nlps(this, (lambda x: x), vals=this_vals)
contrs_nlp = [get_nlps(cs, (lambda x: x), vals=contr_vals) for cs in contrs]
contrs_nlp = [item for sublist in contrs_nlp for item in sublist]
this_nlp = filter_oov(this_nlp)
contrs_nlp = filter_oov(contrs_nlp)
max_prims = []
max_prim_sims = []
for this_prim in this_nlp:
max_tp = None
max_tps = 0.0
for contr_prim in contrs_nlp:
sim = this_prim.similarity(contr_prim)
if (sim > max_tps):
max_tps = sim
max_tp = contr_prim
max_prims.append(max_tp)
max_prim_sims.append(max_tps)
max_prims = [mp.text for mp in max_prims]
return (np.mean(max_prim_sims), max_prims) |
def log_prior(z, prob_type='gaussian'):
if (prob_type == 'gaussian'):
return log_prior_gaussian(z)
if (prob_type == 'bernoulli'):
return log_prior_bernoulli(z)
if (prob_type == 'bernoulli_sym'):
return log_prior_bernoulli_sym(z)
if (prob_type == 'softmax'):
return log_prior_softmax(z, int(z.get_shape()[0])) |
def deduplicate_filename(retrieve_filename, img_dir):
print('Starting deduplicate')
files = os.listdir(img_dir)
test_filename = retrieve_filename
(name, ext) = os.path.splitext(retrieve_filename)
i = 1
while (test_filename in files):
test_filename = (((name + '-') + str(i)) + ext)
i += 1
print('Done deduplicating filename')
return test_filename |
def resnet50_ibn_a(last_stride, pretrained=False, **kwargs):
model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model |
class Config(object):
def __init__(self, task):
self.download_url = '
self.raw_data_dir = '../data/cornellmovie/raw_data'
self.task = task
self.task_data_dir = f'../data/cornellmovie/{task}'
self.dataset_path = f'{self.task_data_dir}/dataset.txt'
self.word_count_path = f'{self.task_data_dir}/word_count.txt'
self.word_embedding_path = f'{self.task_data_dir}/glove_twitter_200.json'
self.eval_word_embedding_path = f'{self.task_data_dir}/google_news_300.json' |
def test_merge_intermediate_variable():
cfg_file = osp.join(data_path, 'config/i_child.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2 == dict(a=0))
assert (cfg.item3 is True)
assert (cfg.item4 == 'test')
assert (cfg.item_cfg == dict(b=2))
assert (cfg.item5 == dict(cfg=dict(b=1)))
assert (cfg.item6 == dict(cfg=dict(b=2))) |
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride=16, zero_init_residual=True, groups=1, width_per_group=64, norm_layer=nn.BatchNorm2d, bn_mom=0.05, root_beta=True):
super(ResNet, self).__init__()
self._norm_layer = norm_layer
self.inplanes = (128 if root_beta else 64)
self.dilation = 1
self.bn_mom = bn_mom
assert (output_stride in [8, 16])
self.strides = [1, 2, (2 if (output_stride == 16) else 1), 1]
self.dilations = [1, 1, 1, 1]
self.groups = groups
self.base_width = width_per_group
if root_beta:
self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(64, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(64, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False))
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes, momentum=self.bn_mom)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=self.strides[0], dilation=self.dilations[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=self.strides[1], dilation=self.dilations[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=self.strides[2], dilation=self.dilations[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=self.strides[3], dilation=self.dilations[3])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, norm_layer)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
norm_layer = self._norm_layer
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion), momentum=self.bn_mom))
layers = []
dilation_first = (1 if (dilation in [1, 2]) else 2)
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, dilation_first, norm_layer, bn_mom=self.bn_mom))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=dilation, norm_layer=norm_layer, bn_mom=self.bn_mom))
return nn.Sequential(*layers)
def forward(self, x):
end_points = {}
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
end_points['layer1'] = x
x = self.layer2(x)
end_points['layer2'] = x
x = self.layer3(x)
end_points['layer3'] = x
x = self.layer4(x)
return (x, end_points) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, feature_size=64):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = feature_size
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, feature_size, layers[0])
self.layer2 = self._make_layer(block, (feature_size * 2), layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, (feature_size * 4), layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, (feature_size * 8), layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((feature_size * 8) * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) |
def _split_channels(num_chan, num_groups):
split = [(num_chan // num_groups) for _ in range(num_groups)]
split[0] += (num_chan - sum(split))
return split |
def dataset(tfrecords_path, read_buffer_size=None, map_parallel_calls=None):
raw_dataset = tf.data.TFRecordDataset(tfrecords_path, compression_type=COMPRESSION_TYPE, buffer_size=read_buffer_size)
return raw_dataset.map(_decode, num_parallel_calls=map_parallel_calls) |
def add_ray_init_args(parser):
def init_help_string(help_string):
return (help_string + ' Passed to `ray.init`.')
parser.add_argument('--cpus', type=int, default=None, help=init_help_string('Cpus to allocate to ray process.'))
parser.add_argument('--gpus', type=int, default=None, help=init_help_string('Gpus to allocate to ray process.'))
parser.add_argument('--resources', type=json.loads, default=None, help=init_help_string('Resources to allocate to ray process.'))
parser.add_argument('--temp-dir', type=str, default=None, help=init_help_string('If provided, it will specify the root temporary directory for the Ray process.'))
return parser |
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=(not show), **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results |
def _compute_aspect_ratios_custom_dataset(dataset, indices=None):
if (indices is None):
indices = range(len(dataset))
aspect_ratios = []
for i in indices:
(height, width) = dataset.get_height_and_width(i)
aspect_ratio = (float(width) / float(height))
aspect_ratios.append(aspect_ratio)
return aspect_ratios |
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result |
class PushGinConfigOperator(bpy.types.Operator):
bl_idname = 'scene.zpy_push_gin_config'
bl_label = 'Push gin config to file.'
bl_description = 'Push gin config to file.'
bl_category = 'ZPY'
bl_options = {'REGISTER'}
def execute(self, context):
_text = bpy.data.texts[LoadGinConfigOperator.DEFAULT_TEXT_NAME].as_string()
with open(bpy.path.abspath(context.scene.zpy_gin_config_path), 'w') as _file:
_file.write(_text)
return {'FINISHED'} |
class VOC2012(Dataset):
def __init__(self, root, phase, transform=None):
self.root = os.path.abspath(root)
self.path_devkit = os.path.join(self.root, 'VOCdevkit')
self.path_images = os.path.join(self.root, 'VOCdevkit', 'VOC2012', 'JPEGImages')
self.phase = phase
self.transform = transform
download_voc2012(self.root)
path_csv = os.path.join(self.root, 'files', 'VOC2012')
file_csv = os.path.join(path_csv, (('classification_' + phase) + '.csv'))
if (not os.path.exists(file_csv)):
if (not os.path.exists(path_csv)):
os.makedirs(path_csv)
labeled_data = read_object_labels(self.root, 'VOC2012', self.phase)
write_object_labels_csv(file_csv, labeled_data)
self.classes = object_categories
self.images = read_object_labels_csv(file_csv)
print('[dataset] VOC 2012 classification phase={} number of classes={} number of images={}'.format(phase, len(self.classes), len(self.images)))
def __getitem__(self, index):
(filename, target) = self.images[index]
img = Image.open(os.path.join(self.path_images, (filename + '.jpg'))).convert('RGB')
if (self.transform is not None):
img = self.transform(img)
data = {'image': img, 'name': filename, 'target': target}
return data
def __len__(self):
return len(self.images)
def get_number_classes(self):
return len(self.classes) |
def resnet44_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [7, 7, 7], **kwargs)
return model |
def recall(predictions, gold):
if (len(gold) == 0):
return (1.0 if (len(predictions) == 0) else 0.0)
if (len(predictions) == 0):
return 0.0
predictions_set = set(predictions)
gold_set = set(gold)
nom = len(predictions_set.intersection(gold_set))
denom = len(gold_set)
return (float(nom) / float(denom)) |
def intersection(a, b):
top = max(a[0], b[0])
left = max(a[1], b[1])
bottom = min(a[2], b[2])
right = min(a[3], b[3])
h = max((bottom - top), 0)
w = max((right - left), 0)
return (h * w) |
def main():
print('------')
args = parse_args()
script_path = Path(os.path.abspath(os.getcwd()))
project_path = script_path.parent.parent.parent.parent.absolute()
dump_log_path = '{}/{}'.format(script_path, args.output_file)
if os.path.exists(dump_log_path):
os.remove(dump_log_path)
memory_prefix_list = get_memory_settings(project_path, args)
launcher = None
if (args.mode == 'min_latency'):
launcher = OneInstanceLauncher()
elif (args.mode == 'default_latency'):
launcher = MultiInstanceLauncher()
elif (args.mode == 'max_throughput'):
launcher = MultiInstanceLauncher()
elif (args.mode == 'default_throughput'):
launcher = MultiInstanceLauncher()
launcher.launch(args, memory_prefix_list) |
class BaseTextControl(wx.stc.StyledTextCtrl):
def __init__(self, parent):
super().__init__(parent)
self.SetEditable(False)
self.CmdKeyClear(89, wx.stc.STC_SCMOD_CTRL)
self.CmdKeyAssign(90, (wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL), wx.stc.STC_CMD_REDO)
self.text_font = wx.Font(14, wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, faceName=u'Monaco')
self.SetFont(self.text_font)
self.fonts = 'face:{},size:{}'.format(self.text_font.GetFaceName(), self.text_font.GetPointSize())
self.IndicatorSetStyle(2, wx.stc.STC_INDIC_PLAIN)
self.IndicatorSetForeground(0, wx.RED)
self.SetProperty('styling.within.preprocessor', '1')
self.SetProperty('fold.comment', '1')
self.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
self.SetWindowStyle((self.GetWindowStyle() | wx.DOUBLE_BORDER))
fontname = getBestFont()
if (fontname is None):
fontname = self.text_font.GetFaceName()
self.StyleSetSpec(wx.stc.STC_STYLE_DEFAULT, f'size:15,face:{fontname}')
self.SetWrapMode(wx.stc.STC_WRAP_WORD)
self.SetMarginLeft(0)
self.MarkerSetBackgroundSelected(1, wx.Colour())
self.MarkerEnableHighlight(True)
self.SetMarginCursor(0, wx.stc.STC_CURSORNORMAL)
self.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginSensitive(0, True)
self.SetMarginWidth(1, 24)
self.SetCaretForeground(wx.Colour())
self.SetCaretLineVisible(True)
self.SetCaretLineBackground(wx.Colour(3289650))
self.SetSelBackground(True, wx.Colour(4409162))
self.StyleSetBackground(wx.stc.STC_STYLE_DEFAULT, BACKGROUND_COLOR)
self.StyleSetBackground(wx.stc.STC_STYLE_LINENUMBER, '#313335')
self.StyleSetForeground(wx.stc.STC_STYLE_LINENUMBER, '#606366') |
class parameter(Structure):
_names = ['solver_type', 'eps', 'C', 'nr_weight', 'weight_label', 'weight', 'p', 'init_sol']
_types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double, POINTER(c_double)]
_fields_ = genFields(_names, _types)
def __init__(self, options=None):
if (options == None):
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = (parameter._names + list(self.__dict__.keys()))
values = map((lambda attr: getattr(self, attr)), attrs)
for (attr, val) in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.solver_type = L2R_L2LOSS_SVC_DUAL
self.eps = float('inf')
self.C = 1
self.p = 0.1
self.nr_weight = 0
self.weight_label = None
self.weight = None
self.init_sol = None
self.bias = (- 1)
self.flag_cross_validation = False
self.flag_C_specified = False
self.flag_solver_specified = False
self.flag_find_C = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError('arg 1 should be a list or a str.')
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while (i < len(argv)):
if (argv[i] == '-s'):
i = (i + 1)
self.solver_type = int(argv[i])
self.flag_solver_specified = True
elif (argv[i] == '-c'):
i = (i + 1)
self.C = float(argv[i])
self.flag_C_specified = True
elif (argv[i] == '-p'):
i = (i + 1)
self.p = float(argv[i])
elif (argv[i] == '-e'):
i = (i + 1)
self.eps = float(argv[i])
elif (argv[i] == '-B'):
i = (i + 1)
self.bias = float(argv[i])
elif (argv[i] == '-v'):
i = (i + 1)
self.flag_cross_validation = 1
self.nr_fold = int(argv[i])
if (self.nr_fold < 2):
raise ValueError('n-fold cross validation: n must >= 2')
elif argv[i].startswith('-w'):
i = (i + 1)
self.nr_weight += 1
weight_label += [int(argv[(i - 1)][2:])]
weight += [float(argv[i])]
elif (argv[i] == '-q'):
self.print_func = PRINT_STRING_FUN(print_null)
elif (argv[i] == '-C'):
self.flag_find_C = True
else:
raise ValueError('Wrong options')
i += 1
liblinear.set_print_string_function(self.print_func)
self.weight_label = (c_int * self.nr_weight)()
self.weight = (c_double * self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
if self.flag_find_C:
if (not self.flag_cross_validation):
self.nr_fold = 5
if (not self.flag_solver_specified):
self.solver_type = L2R_L2LOSS_SVC
self.flag_solver_specified = True
elif (self.solver_type not in [L2R_LR, L2R_L2LOSS_SVC]):
raise ValueError('Warm-start parameter search only available for -s 0 and -s 2')
if (self.eps == float('inf')):
if (self.solver_type in [L2R_LR, L2R_L2LOSS_SVC]):
self.eps = 0.01
elif (self.solver_type in [L2R_L2LOSS_SVR]):
self.eps = 0.001
elif (self.solver_type in [L2R_L2LOSS_SVC_DUAL, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L2R_LR_DUAL]):
self.eps = 0.1
elif (self.solver_type in [L1R_L2LOSS_SVC, L1R_LR]):
self.eps = 0.01
elif (self.solver_type in [L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]):
self.eps = 0.1 |
class TFDataDataset(TFDataset):
def get_num_partitions(self):
return self.total_core_num
def _assert_not_batched(dataset):
from tensorflow.python.data.ops import dataset_ops
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
TFDataDataset._assert_not_batched(dataset._dataset)
elif isinstance(dataset, dataset_ops.BatchDataset):
invalidInputError(False, 'Dataset should not be batched,please use a dataset without the batch operation')
else:
for dt in dataset._inputs():
TFDataDataset._assert_not_batched(dt)
def check_rules(dataset, rules, is_training):
from tensorflow.python.data.ops import dataset_ops
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
TFDataDataset.check_rules(dataset._dataset, rules, is_training)
else:
for (rule, message) in rules:
invalidInputError((not rule(dataset, is_training)), message)
else:
for dt in dataset._inputs():
TFDataDataset.check_rules(dt, rules, is_training)
def __init__(self, tf_data_dataset, batch_size, batch_per_thread, hard_code_batch_size=False, validation_dataset=None, sequential_order=False, shuffle=True, remove_checking=False, batch_outside=False, inter_threads=None, intra_threads=None, auto_shard_files=False):
self.auto_shard_files = auto_shard_files
from tensorflow.python.data.ops import dataset_ops
import tensorflow as tf
if (not batch_outside):
rules = [((lambda dataset, is_training: isinstance(dataset, dataset_ops.BatchDataset)), 'Dataset should not be batched, please use a dataset without the batch operation')]
else:
rules = []
rules += [((lambda dataset, is_training: isinstance(dataset, dataset_ops.RepeatDataset)), 'Dataset should not be repeated, please use a dataset without the repeat operation')]
if (not remove_checking):
TFDataDataset.check_rules(tf_data_dataset, rules, True)
if (validation_dataset is not None):
TFDataDataset.check_rules(validation_dataset, rules, False)
py_func_ops = {'PyFunc', 'PyFuncStateless', 'EagerPyFunc'}
for node in tf.get_default_graph().as_graph_def().node:
op_type = node.op
if (op_type in py_func_ops):
invalidInputError(False, ('tf.py_func, tf.py_function, tf.numpy_function and' + ' Dataset.from_generators are not supported in TFPark'))
if shuffle:
from tensorflow.python.keras.engine import training_utils
training_utils.verify_dataset_shuffled(tf_data_dataset)
flatten_shapes = nest.flatten(_tf_get_shapes(tf_data_dataset))
if batch_outside:
flatten_shapes = [shape[1:] for shape in flatten_shapes]
flatten_types = nest.flatten(_tf_get_types(tf_data_dataset))
flatten_tensor_structure = [TensorMeta(dtype=flatten_types[i], shape=list(flatten_shapes[i]), name='zoo_input_{}'.format(i)) for i in range(len(flatten_shapes))]
structure = _tf_get_types(tf_data_dataset)
if isinstance(structure, tf.DType):
structure = (structure,)
tensor_structure = nest.pack_sequence_as(structure, flatten_tensor_structure)
super(TFDataDataset, self).__init__(tensor_structure, batch_size, batch_per_thread, hard_code_batch_size)
self.intra_threads = intra_threads
self.inter_threads = inter_threads
if (intra_threads is None):
self.intra_threads = self.core_num
if (inter_threads is None):
self.inter_threads = 1
if ((self.batch_size > 0) and self.has_batch):
self._per_partition_batch_size = (self.batch_size // self.node_num)
self._shard_num = self.node_num
self.drop_remainder = True
else:
self._per_partition_batch_size = self.batch_per_thread
self._shard_num = self.total_core_num
if hard_code_batch_size:
self.drop_remainder = True
logging.warning('hard_code_batch_size is set to true, so we must drop remainder elements in the dataset to avoid outputting small batches, the dropped elements will not get processed. You can pad your dataset so that the total number of elements is divisible by the total batch size to avoid this.')
else:
self.drop_remainder = False
if self.hard_code_batch_size:
self.drop_remainder = True
if (not batch_outside):
tf_data_dataset = tf_data_dataset.batch(self._per_partition_batch_size, drop_remainder=self.drop_remainder)
if ((validation_dataset is not None) and (not batch_outside)):
drop_remainder = self.hard_code_batch_size
validation_dataset = validation_dataset.batch(self._per_partition_batch_size, drop_remainder=drop_remainder)
shard_index = tf.placeholder(dtype=tf.int64, shape=())
from tensorflow.python.distribute.input_ops import auto_shard_dataset
if self.auto_shard_files:
tf_data_dataset = auto_shard_dataset(tf_data_dataset, self._shard_num, shard_index)
else:
tf_data_dataset = tf_data_dataset.shard(self._shard_num, shard_index)
if (validation_dataset is not None):
if self.auto_shard_files:
validation_dataset = auto_shard_dataset(validation_dataset, self._shard_num, shard_index)
else:
validation_dataset = validation_dataset.shard(self._shard_num, shard_index)
self.shard_index = shard_index
self.train_dataset = tf_data_dataset
self.train_iterator = _tf_make_iterator(self.train_dataset)
self.train_next_ops = nest.flatten(self.train_iterator.get_next())
self.output_types = [t.as_datatype_enum for t in nest.flatten(_tf_get_types(self.train_dataset))]
self.validation_dataset = validation_dataset
self.validation_iterator = None
self.validation_next_ops = None
self._train_init_op_name = self.train_iterator.initializer.name
self._train_output_names = [op.name for op in self.train_next_ops]
if (validation_dataset is not None):
self.validation_iterator = _tf_make_iterator(self.validation_dataset)
self.validation_next_ops = nest.flatten(self.validation_iterator.get_next())
self._val_init_op_name = self.validation_iterator.initializer.name
self._val_output_names = [op.name for op in self.validation_next_ops]
self.table_init_name = tf.tables_initializer().name
self.sequential_order = sequential_order
self.shuffle = shuffle
self.graph = self.train_next_ops[0].graph
self.graph_def = bytearray(self.graph.as_graph_def().SerializeToString())
def _get_prediction_data(self):
invalidInputError(False, 'TFDataDataset cannot be used for prediction')
def _get_evaluation_data(self):
jvalue = callZooFunc('float', 'createMiniBatchRDDFromTFDatasetEval', self.graph_def, self._train_init_op_name, self.table_init_name, self._train_output_names, self.output_types, self.shard_index.name)
rdd = jvalue.value().toJavaRDD()
return rdd
def _get_training_data(self):
jvalue = callZooFunc('float', 'createTFDataFeatureSet', self.graph_def, self._train_init_op_name, self.table_init_name, self._train_output_names, self.output_types, self.shard_index.name, self.inter_threads, self.intra_threads)
return FeatureSet(jvalue=jvalue)
def _get_validation_data(self):
if (self.validation_dataset is not None):
jvalue = callZooFunc('float', 'createTFDataFeatureSet', self.graph_def, self._val_init_op_name, self.table_init_name, self._val_output_names, self.output_types, self.shard_index.name, self.inter_threads, self.intra_threads)
return FeatureSet(jvalue=jvalue)
return None |
class S2Image():
def __init__(self, name, yyyymmdd, cloudy_pct, coverage, aws_path, local_path, data_collection):
self.name = name
self.yyyymmdd = yyyymmdd
self.cloudy_pct = cloudy_pct
self.coverage = coverage
self.aws_path = aws_path
self.local_path = local_path
if (data_collection == 'l1c'):
self.bands_10m = [(local_path / '{}.jp2'.format(x)) for x in ['B02', 'B03', 'B04', 'B08']]
self.bands_20m = [(local_path / '{}.jp2'.format(x)) for x in ['B05', 'B06', 'B07', 'B8A', 'B11', 'B12']]
self.bands_60m = [(local_path / '{}.jp2'.format(x)) for x in ['B01', 'B09', 'B10']]
self.cloud_mask = None
else:
self.bands_10m = [((local_path / 'R10m') / '{}.jp2'.format(x)) for x in ['B02', 'B03', 'B04', 'B08']]
self.bands_20m = [((local_path / 'R20m') / '{}.jp2'.format(x)) for x in ['B05', 'B06', 'B07', 'B8A', 'B11', 'B12']]
self.bands_60m = [((local_path / 'R60m') / '{}.jp2'.format(x)) for x in ['B01', 'B09', 'B10']]
self.cloud_mask = ((local_path / 'qi') / 'MSK_CLOUDS_B00.gml')
def get_date(self):
return datetime.strptime(self.yyyymmdd, '%Y%m%d')
def __repr__(self):
return f'S2Image(tile={self.name}, date={self.yyyymmdd}, clouds={self.cloudy_pct}, coverage={self.coverage})' |
def _clean_sexp(sexp):
if isinstance(sexp, sexpdata.Symbol):
return sexp.value()
return tuple((_clean_sexp(s) for s in sexp)) |
def syuv_to_rgb(yuv):
yuv = torch.as_tensor(yuv)
kernel = torch.tensor([[1, 1, 1], [0, (- 0.), 2.], [1., (- 0.), 0]]).to(yuv)
rgb = torch.reshape(torch.matmul(torch.reshape(yuv, [(- 1), 3]), kernel), yuv.shape)
return (rgb / _VOLUME_PRESERVING_YUV_SCALE) |
def gather_tensor(tensor, args):
output_tensors = [tensor.clone() for _ in range(args.world_size)]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
return concat |
class EvalCOCO(data.Dataset):
def __init__(self, root, split, mode, res=128, transform_list=[], label=True, stuff=True, thing=False):
self.root = root
self.split = split
self.mode = mode
self.res = res
self.imdb = self.load_imdb()
self.stuff = stuff
self.thing = thing
self.label = label
self.view = (- 1)
self.fine_to_coarse = self._get_fine_to_coarse()
self.transform_list = transform_list
def load_imdb(self):
imdb = os.path.join(self.root, 'curated', '{}2017'.format(self.split), 'Coco164kFull_Stuff_Coarse_7.txt')
imdb = tuple(open(imdb, 'r'))
imdb = [id_.rstrip() for id_ in imdb]
return imdb
def __getitem__(self, index):
image_id = self.imdb[index]
(img, lbl) = self.load_data(image_id)
return ((index,) + self.transform_data(img, lbl, index))
def load_data(self, image_id):
N = len(self.imdb)
image_path = os.path.join(self.root, 'images', '{}2017'.format(self.split), '{}.jpg'.format(image_id))
label_path = os.path.join(self.root, 'annotations', '{}2017'.format(self.split), '{}.png'.format(image_id))
image = Image.open(image_path).convert('RGB')
label = Image.open(label_path)
return (image, label)
def transform_data(self, image, label, index, raw_image=False):
image = TF.resize(image, self.res, Image.BILINEAR)
label = TF.resize(label, self.res, Image.NEAREST)
(w, h) = image.size
left = int(round(((w - self.res) / 2.0)))
top = int(round(((h - self.res) / 2.0)))
image = TF.crop(image, top, left, self.res, self.res)
label = TF.crop(label, top, left, self.res, self.res)
if raw_image:
return image
image = self._image_transform(image, self.mode)
if (not self.label):
return (image, None)
label = self._label_transform(label)
return (image, label)
def _get_fine_to_coarse(self):
with open(os.path.join(self.root, FINE_TO_COARSE_PATH), 'rb') as dict_f:
d = pickle.load(dict_f)
fine_to_coarse_dict = d['fine_index_to_coarse_index']
fine_to_coarse_dict[255] = (- 1)
fine_to_coarse_map = np.vectorize((lambda x: fine_to_coarse_dict[x]))
return fine_to_coarse_map
def _label_transform(self, label):
label = np.array(label)
label = self.fine_to_coarse(label)
mask = (label >= 255)
if (self.stuff and (not self.thing)):
label[mask] -= 12
elif (self.thing and (not self.stuff)):
mask = (label > 11)
label[mask] = (- 1)
label = torch.LongTensor(label)
return label
def _image_transform(self, image, mode):
if (self.mode == 'test'):
transform = self._get_data_transformation()
return transform(image)
else:
raise NotImplementedError()
def _get_data_transformation(self):
trans_list = []
if ('jitter' in self.transform_list):
trans_list.append(transforms.RandomApply([transforms.ColorJitter(0.3, 0.3, 0.3, 0.1)], p=0.8))
if ('grey' in self.transform_list):
trans_list.append(transforms.RandomGrayscale(p=0.2))
if ('blur' in self.transform_list):
trans_list.append(transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5))
trans_list += [transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
return transforms.Compose(trans_list)
def __len__(self):
return len(self.imdb) |
class domainTextIterator():
def __init__(self, s_domain_data, t_domain_data, g_domain_data, dic, batch=1, maxlen=50, n_words_target=(- 1)):
self.s_domain_data = fopen(s_domain_data, 'r')
self.t_domain_data = fopen(t_domain_data, 'r')
self.g_domain_data = fopen(g_domain_data, 'r')
with open(dic) as f_trg:
self.dic_target = pkl.load(f_trg)
self.batch_size = batch
assert ((self.batch_size % 2) == 0)
self.maxlen = maxlen
self.n_words_trg = n_words_target
self.end_of_data = False
def __iter__(self):
return self
def reset(self):
self.s_domain_data.seek(0)
self.t_domain_data.seek(0)
self.g_domain_data.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
x = []
y = []
try:
while True:
ss = self.s_domain_data.readline()
if (ss == ''):
raise IOError
ss = ss.strip().split()
ss = [(self.dic_target[w] if (w in self.dic_target) else 1) for w in ss]
if (self.n_words_trg > 0):
ss = [(w if (w < self.n_words_trg) else 1) for w in ss]
tt = self.t_domain_data.readline()
if (tt == ''):
raise IOError
tt = tt.strip().split()
tt = [(self.dic_target[w] if (w in self.dic_target) else 1) for w in tt]
if (self.n_words_trg > 0):
tt = [(w if (w < self.n_words_trg) else 1) for w in tt]
gg = self.g_domain_data.readline()
if (gg == ''):
raise IOError
gg = gg.strip().split()
gg = [(self.dic_target[w] if (w in self.dic_target) else 1) for w in gg]
if (self.n_words_trg > 0):
gg = [(w if (w < self.n_words_trg) else 1) for w in gg]
if ((len(ss) > self.maxlen) or (len(tt) > self.maxlen) or (len(gg) > self.maxlen)):
continue
x.append(ss)
y.append([1, 0, 0])
x.append(tt)
y.append([0, 1, 0])
x.append(gg)
y.append([0, 0, 1])
if ((len(x) >= self.batch_size) and (len(y) >= self.batch_size)):
shuffle_indices = numpy.random.permutation(numpy.arange(len(x)))
x_np = numpy.array(x)
y_np = numpy.array(y)
x_np_shuffled = x_np[shuffle_indices]
y_np_shuffled = y_np[shuffle_indices]
x_shuffled = x_np_shuffled.tolist()
y_shuffled = y_np_shuffled.tolist()
break
except IOError:
self.end_of_data = True
if ((len(x) <= 0) or (len(y) <= 0)):
self.end_of_data = False
self.reset()
raise StopIteration
if (len(x) >= self.batch_size):
return (x_shuffled[:self.batch_size], y_shuffled[:self.batch_size])
else:
return (x, y) |
def get_model(point_cloud, is_training, num_classes, bn_decay=None):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(point_cloud, (- 1))
net = tf_util.conv2d(input_image, 64, [1, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, (- 1)])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='fc3')
return (net, end_points) |
class SwishMe(nn.Module):
def __init__(self, inplace: bool=False):
super(SwishMe, self).__init__()
def forward(self, x):
return SwishJitAutoFn.apply(x) |
class VWEvent():
def __init__(self, kind=None, params=None, actions=None, grid=None, camera=None, position=None, step=None, turn=None):
self.kind = kind
self.params = params
if (actions is None):
actions = []
assert isinstance(actions, (list, tuple))
self.actions = list(actions)
if (grid is None):
grid = []
assert isinstance(grid, (list, tuple))
self.grid = list(grid)
if (camera is None):
camera = np.array([0.0, 0.0])
if isinstance(camera, (list, tuple)):
camera = np.array(camera)
assert isinstance(camera, np.ndarray)
self.camera = camera
if (position is None):
position = np.array([0.0, 0.0, 0.0])
if isinstance(position, (list, tuple)):
position = np.array(position)
assert isinstance(position, np.ndarray)
self.position = position
self.step = step
self.turn = turn |
class Item():
def __init__(self, attribute, value):
self.attribute = (repr(attribute) if (type(attribute) != str) else attribute)
self.value = (repr(value) if (type(value) != str) else value)
def __get_tuple(self):
return (self.attribute, self.value)
def __getitem__(self, idx):
item = self.__get_tuple()
return item[idx]
def __hash__(self):
return hash(self.__get_tuple())
def __eq__(self, other):
return (hash(self) == hash(other))
def __repr__(self):
return 'Item{{{}}}'.format(self.__get_tuple())
def string(self):
return '{}={}'.format(*self) |
def customized_export_ply(outfile_name, v, f=None, v_n=None, v_c=None, f_c=None, e=None):
v_n_flag = False
v_c_flag = False
f_c_flag = False
N_v = v.shape[0]
assert (v.shape[1] == 3)
if (not (type(v_n) == type(None))):
assert (v_n.shape[0] == N_v)
if (type(v_n) == 'torch.Tensor'):
v_n = v_n.detach().cpu().numpy()
v_n_flag = True
if (not (type(v_c) == type(None))):
assert (v_c.shape[0] == N_v)
v_c_flag = True
if (v_c.shape[1] == 3):
alpha_channel = (np.zeros((N_v, 1), dtype=np.ubyte) + 255)
v_c = np.hstack((v_c, alpha_channel))
N_f = 0
if (not (type(f) == type(None))):
N_f = f.shape[0]
assert (f.shape[1] == 3)
if (not (type(f_c) == type(None))):
assert (f_c.shape[0] == f.shape[0])
f_c_flag = True
if (f_c.shape[1] == 3):
alpha_channel = (np.zeros((N_f, 1), dtype=np.ubyte) + 255)
f_c = np.hstack((f_c, alpha_channel))
N_e = 0
if (not (type(e) == type(None))):
N_e = e.shape[0]
with open(outfile_name, 'w') as file:
file.write('ply\n')
file.write('format ascii 1.0\n')
file.write(('element vertex %d\n' % N_v))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
if v_n_flag:
file.write('property float nx\n')
file.write('property float ny\n')
file.write('property float nz\n')
if v_c_flag:
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('property uchar alpha\n')
file.write(('element face %d\n' % N_f))
file.write('property list uchar int vertex_indices\n')
if f_c_flag:
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('property uchar alpha\n')
if (not (N_e == 0)):
file.write(('element edge %d\n' % N_e))
file.write('property int vertex1\n')
file.write('property int vertex2\n')
file.write('end_header\n')
if (v_n_flag and v_c_flag):
for i in range(0, N_v):
file.write(('%f %f %f %f %f %f %d %d %d %d\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_n[(i, 0)], v_n[(i, 1)], v_n[(i, 2)], v_c[(i, 0)], v_c[(i, 1)], v_c[(i, 2)], v_c[(i, 3)])))
elif v_n_flag:
for i in range(0, N_v):
file.write(('%f %f %f %f %f %f\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_n[(i, 0)], v_n[(i, 1)], v_n[(i, 2)])))
elif v_c_flag:
for i in range(0, N_v):
file.write(('%f %f %f %d %d %d %d\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_c[(i, 0)], v_c[(i, 1)], v_c[(i, 2)], v_c[(i, 3)])))
else:
for i in range(0, N_v):
file.write(('%f %f %f\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)])))
if f_c_flag:
for i in range(0, N_f):
file.write(('3 %d %d %d %d %d %d %d\n' % (f[(i, 0)], f[(i, 1)], f[(i, 2)], f_c[(i, 0)], f_c[(i, 1)], f_c[(i, 2)], f_c[(i, 3)])))
else:
for i in range(0, N_f):
file.write(('3 %d %d %d\n' % (f[(i, 0)], f[(i, 1)], f[(i, 2)])))
if (not (N_e == 0)):
for i in range(0, N_e):
file.write(('%d %d\n' % (e[(i, 0)], e[(i, 1)]))) |
class _EmptyMapDataset(torch.utils.data.Dataset):
def __init__(self, dataset):
self.ds = dataset
def __len__(self):
return len(self.ds)
def __getitem__(self, idx):
_ = self.ds[idx]
return [0] |
('single_seq_model')
class SingleSeqModel(Model):
def from_params(cls, params):
params = deepcopy(params)
input_names = params['input_names']
target_names = params['target_names']
embedder_config = params['embedder']
encoder_config = params['encoder']
decoder_config = params['decoder']
loss_config = params['loss']
optimizer_config = params['optimizer']
metrics_config = params.get('metrics', {})
target_stats = params.get('target_stats', None)
msas_dim = params.get('msas_dim', 21)
window_size = params.get('window_size', None)
mask = tf.keras.Input(shape=(window_size,), name='mask', dtype=tf.int64)
inputs = []
for n in input_names:
if (n != 'msas'):
inputs += [tf.keras.Input(shape=(window_size,), name=n, dtype=tf.int64)]
else:
inputs += [tf.keras.Input(shape=(window_size, msas_dim), name=n, dtype=tf.float32)]
if ('position' in embedder_config['embedders']):
input_names = (['position'] + input_names)
position_emb_dim = embedder_config['embedders']['position']['input_dim']
inputs = ([tf.keras.Input(shape=(window_size, position_emb_dim), name='position_emb', dtype=tf.float32)] + inputs)
embedder_config['input_names'] = input_names
(embedder, output_dim) = ProteinEmbedder.from_params(embedder_config)
encoder_config['input_dim'] = output_dim
(encoder, output_dim) = Encoder.by_name(encoder_config['type']).from_params(encoder_config)
if ('feedforward' in params):
feedforward_config = params['feedforward']
feedforward_config['input_dim'] = output_dim
(feedforward, output_dim) = FeedForward.from_params(feedforward_config)
else:
feedforward = (lambda inp: inp)
(decoders, losses, metrics) = ([], [], [])
for n in target_names:
decoder_config[n]['input_dim'] = output_dim
decoders += [Decoder.by_name(decoder_config[n]['type']).from_params(decoder_config[n])]
losses += [Loss.by_name(loss_config[n]['type']).from_params(loss_config[n])]
if (n in metrics_config):
if (target_stats is not None):
metrics_config[n]['target_stats'] = target_stats[n]
metrics += [Loss.by_name(metrics_config[n]['type']).from_params(metrics_config[n])]
x = embedder(inputs)
x = encoder(x, mask)
x = feedforward(x)
x = [decoder_model(x) for decoder_model in decoders]
model = tf.keras.Model(inputs=([mask] + inputs), outputs=x)
optimizer_type = optimizer_config.pop('type')
optimizer = eval('tf.keras.optimizers.{}'.format(optimizer_type))(**optimizer_config)
model.compile(optimizer, loss={n: loss(mask=mask) for (n, loss) in zip(target_names, losses)}, metrics={n: metric(mask=mask) for (n, metric) in zip(target_names, metrics)})
return model |
def format_text(text, **format):
text = re.sub(' '', text, flags=re.MULTILINE)
if format['remove_mentions']:
text = re.sub('\\S+', '', text, flags=re.MULTILINE)
if format['unidecode']:
text = unidecode(text)
new_text = []
for word in re.split("[' ]", text):
if ((len(word) < 5) or (not word.isdigit())):
if (word.startswith('#') and format['hashtag_split']):
new_text.append(camel_case_split(word[1:]))
else:
new_text.append(word)
text = remove_repeted_characters(' '.join(new_text))
if format['lower']:
text = text.lower()
return text |
def pal2al(_annolist):
annotations = AnnotationLib.AnnoList()
for adesc in _annolist.attribute_desc:
annotations.attribute_desc[adesc.name] = adesc
print('attribute: ', adesc.name, adesc.id)
for valdesc in adesc.val_to_str:
annotations.add_attribute_val(adesc.name, valdesc.s, valdesc.id)
attribute_name_from_id = {adesc.id: aname for (aname, adesc) in annotations.attribute_desc.iteritems()}
attribute_dtype_from_id = {adesc.id: adesc.dtype for (aname, adesc) in annotations.attribute_desc.iteritems()}
for _a in _annolist.annotation:
anno = AnnotationLib.Annotation()
anno.imageName = _a.imageName
anno.rects = []
for _r in _a.rect:
rect = AnnotationLib.AnnoRect()
rect.x1 = _r.x1
rect.x2 = _r.x2
rect.y1 = _r.y1
rect.y2 = _r.y2
if _r.HasField('id'):
rect.id = _r.id
if _r.HasField('track_id'):
rect.track_id = _r.track_id
if _r.HasField('score'):
rect.score = _r.score
for _at in _r.attribute:
try:
cur_aname = attribute_name_from_id[_at.id]
cur_dtype = attribute_dtype_from_id[_at.id]
except KeyError as e:
print('attribute: ', _at.id)
print(e)
assert False
if (cur_dtype == AnnotationLib.AnnoList.TYPE_INT32):
rect.at[cur_aname] = _at.val
elif (cur_dtype == AnnotationLib.AnnoList.TYPE_FLOAT):
rect.at[cur_aname] = _at.fval
elif (cur_dtype == AnnotationLib.AnnoList.TYPE_STRING):
rect.at[cur_aname] = _at.strval
else:
assert False
anno.rects.append(rect)
annotations.append(anno)
return annotations |
def prc_auc(targets, preds):
(precision, recall, _) = precision_recall_curve(targets, preds)
return auc(recall, precision) |
class Token():
def __init__(self, tid: int, index: int, span_start: int, span_end: int, phrase: str):
self._tid = tid
self._index = index
self._span_start = span_start
self._span_end = span_end
self._phrase = phrase
def index(self):
return self._index
def span_start(self):
return self._span_start
def span_end(self):
return self._span_end
def span(self):
return (self._span_start, self._span_end)
def phrase(self):
return self._phrase
def __eq__(self, other):
if isinstance(other, Token):
return (self._tid == other._tid)
return False
def __hash__(self):
return hash(self._tid)
def __str__(self):
return self._phrase
def __repr__(self):
return self._phrase |
class TestParser(QiskitTestCase):
def setUp(self):
self.qasm_file_path = self._get_resource_path('example.qasm', Path.QASMS)
self.qasm_file_path_fail = self._get_resource_path('example_fail.qasm', Path.QASMS)
self.qasm_file_path_if = self._get_resource_path('example_if.qasm', Path.QASMS)
def test_parser(self):
res = parse(self.qasm_file_path)
self.log.info(res)
self.assertEqual(len(res), 1563)
self.assertEqual(res[:12], 'OPENQASM 2.0')
self.assertEqual(res[14:41], 'gate u3(theta,phi,lambda) q')
self.assertEqual(res[1547:1562], 'measure r -> d;')
def test_parser_fail(self):
self.assertRaisesRegex(QasmError, 'Perhaps there is a missing', parse, file_path=self.qasm_file_path_fail)
def test_all_valid_nodes(self):
def inspect(node):
for child in node.children:
self.assertTrue(isinstance(child, Node))
inspect(child)
qasm = Qasm(self.qasm_file_path)
res = qasm.parse()
inspect(res)
qasm_if = Qasm(self.qasm_file_path_if)
res_if = qasm_if.parse()
inspect(res_if)
def test_get_tokens(self):
qasm = Qasm(self.qasm_file_path)
for token in qasm.get_tokens():
self.assertTrue(isinstance(token, ply.lex.LexToken)) |
def _experiments_to_circuits(qobj):
if qobj.experiments:
circuits = []
for x in qobj.experiments:
quantum_registers = [QuantumRegister(i[1], name=i[0]) for i in x.header.qreg_sizes]
classical_registers = [ClassicalRegister(i[1], name=i[0]) for i in x.header.creg_sizes]
circuit = QuantumCircuit(*quantum_registers, *classical_registers, name=x.header.name)
qreg_dict = {}
creg_dict = {}
for reg in quantum_registers:
qreg_dict[reg.name] = reg
for reg in classical_registers:
creg_dict[reg.name] = reg
for i in x.instructions:
name = i.name
if (i.name == 'id'):
name = 'iden'
qubits = []
params = getattr(i, 'params', [])
try:
for qubit in i.qubits:
qubit_label = x.header.qubit_labels[qubit]
qubits.append(qreg_dict[qubit_label[0]][qubit_label[1]])
except Exception:
pass
clbits = []
try:
for clbit in i.memory:
clbit_label = x.header.clbit_labels[clbit]
clbits.append(creg_dict[clbit_label[0]][clbit_label[1]])
except Exception:
pass
if hasattr(circuit, name):
instr_method = getattr(circuit, name)
if (i.name in ['snapshot']):
instr_method(i.label, snapshot_type=i.snapshot_type, qubits=qubits, params=params)
elif (i.name == 'initialize'):
instr_method(params, qubits)
else:
instr_method(*params, *qubits, *clbits)
else:
temp_opaque_instruction = Instruction(name=name, num_qubits=len(qubits), num_clbits=len(clbits), params=params)
circuit.append(temp_opaque_instruction, qubits, clbits)
circuits.append(circuit)
return circuits
return None |
def conv_relu(input, size, depth, in_depth=None):
sqared = math.sqrt((size * size))
weights = tf.get_variable('weights', (size, size, in_depth, depth), initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', [depth], initializer=tf.constant_initializer(value=0.0))
conv = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID', use_cudnn_on_gpu=True)
return tf.nn.relu(tf.nn.bias_add(conv, bias)) |
def accuracy(output: torch.tensor, target: torch.tensor, topk=(1,)) -> List[torch.tensor]:
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
def main():
parser = ArgumentParser(description='Train or evaluate NeurWP models for LAM')
parser.add_argument('--dataset', type=str, default='meps_example', help='Dataset, corresponding to name in data directory (default: meps_example)')
parser.add_argument('--model', type=str, default='graph_lam', help='Model architecture to train/evaluate (default: graph_lam)')
parser.add_argument('--subset_ds', type=int, default=0, help='Use only a small subset of the dataset, for debugging (default: 0=false)')
parser.add_argument('--seed', type=int, default=42, help='random seed (default: 42)')
parser.add_argument('--n_workers', type=int, default=4, help='Number of workers in data loader (default: 4)')
parser.add_argument('--epochs', type=int, default=200, help='upper epoch limit (default: 200)')
parser.add_argument('--batch_size', type=int, default=4, help='batch size (default: 4)')
parser.add_argument('--load', type=str, help='Path to load model parameters from (default: None)')
parser.add_argument('--restore_opt', type=int, default=0, help='If optimizer state shoudl be restored with model (default: 0 (false))')
parser.add_argument('--precision', type=str, default=32, help='Numerical precision to use for model (32/16/bf16) (default: 32)')
parser.add_argument('--graph', type=str, default='multiscale', help='Graph to load and use in graph-based model (default: multiscale)')
parser.add_argument('--hidden_dim', type=int, default=64, help='Dimensionality of all hidden representations (default: 64)')
parser.add_argument('--hidden_layers', type=int, default=1, help='Number of hidden layers in all MLPs (default: 1)')
parser.add_argument('--processor_layers', type=int, default=4, help='Number of GNN layers in processor GNN (default: 4)')
parser.add_argument('--mesh_aggr', type=str, default='sum', help='Aggregation to use for m2m processor GNN layers (sum/mean) (default: sum)')
parser.add_argument('--ar_steps', type=int, default=1, help='Number of steps to unroll prediction for in loss (1-19) (default: 1)')
parser.add_argument('--control_only', type=int, default=0, help='Train only on control member of ensemble data (default: 0 (False))')
parser.add_argument('--loss', type=str, default='mse', help='Loss function to use (default: mse)')
parser.add_argument('--step_length', type=int, default=3, help='Step length in hours to consider single time step 1-3 (default: 3)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 0.001)')
parser.add_argument('--val_interval', type=int, default=1, help='Number of epochs training between each validation run (default: 1)')
parser.add_argument('--eval', type=str, help='Eval model on given data split (val/test) (default: None (train model))')
parser.add_argument('--n_example_pred', type=int, default=1, help='Number of example predictions to plot during evaluation (default: 1)')
args = parser.parse_args()
assert (args.model in MODELS), f'Unknown model: {args.model}'
assert (args.step_length <= 3), 'Too high step length'
assert (args.eval in (None, 'val', 'test')), f'Unknown eval setting: {args.eval}'
random_run_id = random.randint(0, 9999)
seed.seed_everything(args.seed)
train_loader = torch.utils.data.DataLoader(WeatherDataset(args.dataset, pred_length=args.ar_steps, split='train', subsample_step=args.step_length, subset=bool(args.subset_ds), control_only=args.control_only), args.batch_size, shuffle=True, num_workers=args.n_workers)
max_pred_length = ((65 // args.step_length) - 2)
val_loader = torch.utils.data.DataLoader(WeatherDataset(args.dataset, pred_length=max_pred_length, split='val', subsample_step=args.step_length, subset=bool(args.subset_ds), control_only=args.control_only), args.batch_size, shuffle=False, num_workers=args.n_workers)
if torch.cuda.is_available():
device_name = 'cuda'
torch.set_float32_matmul_precision('high')
else:
device_name = 'cpu'
model_class = MODELS[args.model]
if args.load:
model = model_class.load_from_checkpoint(args.load, args=args)
if args.restore_opt:
model.opt_state = torch.load(args.load)['optimizer_states'][0]
else:
model = model_class(args)
prefix = ('subset-' if args.subset_ds else '')
if args.eval:
prefix = (prefix + f'eval-{args.eval}-')
run_name = f"{prefix}{args.model}-{args.processor_layers}x{args.hidden_dim}-{time.strftime('%m_%d_%H')}-{random_run_id:04d}"
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=f'saved_models/{run_name}', filename='min_val_loss', monitor='val_mean_loss', mode='min', save_last=True)
logger = pl.loggers.WandbLogger(project=constants.wandb_project, name=run_name, config=args)
trainer = pl.Trainer(max_epochs=args.epochs, deterministic=True, strategy='ddp', accelerator=device_name, logger=logger, log_every_n_steps=1, callbacks=[checkpoint_callback], check_val_every_n_epoch=args.val_interval, precision=args.precision)
if (trainer.global_rank == 0):
utils.init_wandb_metrics(logger)
if args.eval:
if (args.eval == 'val'):
eval_loader = val_loader
else:
eval_loader = torch.utils.data.DataLoader(WeatherDataset(args.dataset, pred_length=max_pred_length, split='test', subsample_step=args.step_length, subset=bool(args.subset_ds)), args.batch_size, shuffle=False, num_workers=args.n_workers)
print(f'Running evaluation on {args.eval}')
trainer.test(model=model, dataloaders=eval_loader)
else:
trainer.fit(model=model, train_dataloaders=train_loader, val_dataloaders=val_loader) |
def clear_vocabs():
global _COG_LIST
global _VOCABS
_COG_LIST = None
_VOCABS = dict() |
class InputDataFields(object):
image = 'image'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
true_image_shape = 'true_image_shape' |
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean((- 1), keepdim=True)
std = x.std((- 1), keepdim=True)
return (((self.a_2 * (x - mean)) / (std + self.eps)) + self.b_2) |
def print_filtered_stacktrace():
(exc_type, exc_value, exc_traceback) = sys.exc_info()
current_tb = exc_traceback
while (current_tb.tb_next is not None):
current_tb = current_tb.tb_next
if ('__sacred__' in current_tb.tb_frame.f_globals):
print('Exception originated from within Sacred.\nTraceback (most recent calls):', file=sys.stderr)
tb.print_tb(exc_traceback)
tb.print_exception(exc_type, exc_value, None)
else:
print('Traceback (most recent calls WITHOUT Sacred internals):', file=sys.stderr)
current_tb = exc_traceback
while (current_tb is not None):
if ('__sacred__' not in current_tb.tb_frame.f_globals):
tb.print_tb(current_tb, 1)
current_tb = current_tb.tb_next
print('\n'.join(tb.format_exception_only(exc_type, exc_value)).strip(), file=sys.stderr) |
class StatusData(genpy.Message):
_md5sum = 'c70a4ecae176ad30f89553'
_type = 'quadrotor_msgs/StatusData'
_has_header = True
_full_text = "Header header\nuint16 loop_rate\nfloat64 voltage\nuint8 seq\n\n\nMSG: std_msgs/Header\n# Standard metadata for higher-level stamped data types.\n# This is generally used to communicate timestamped data \n# in a particular coordinate frame.\n# \n# sequence ID: consecutively increasing ID \nuint32 seq\n#Two-integer timestamp that is expressed as:\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n# time-handling sugar is provided by the client library\ntime stamp\n#Frame this data is associated with\n# 0: no frame\n# 1: global frame\nstring frame_id\n\n"
__slots__ = ['header', 'loop_rate', 'voltage', 'seq']
_slot_types = ['std_msgs/Header', 'uint16', 'float64', 'uint8']
def __init__(self, *args, **kwds):
if (args or kwds):
super(StatusData, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.loop_rate is None):
self.loop_rate = 0
if (self.voltage is None):
self.voltage = 0.0
if (self.seq is None):
self.seq = 0
else:
self.header = std_msgs.msg.Header()
self.loop_rate = 0
self.voltage = 0.0
self.seq = 0
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack(('<I%sB' % length), length, *_x))
else:
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_struct_HdB.pack(_x.loop_rate, _x.voltage, _x.seq))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))))
def deserialize(self, str):
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 11
(_x.loop_rate, _x.voltage, _x.seq) = _struct_HdB.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack(('<I%sB' % length), length, *_x))
else:
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_struct_HdB.pack(_x.loop_rate, _x.voltage, _x.seq))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))))
def deserialize_numpy(self, str, numpy):
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 11
(_x.loop_rate, _x.voltage, _x.seq) = _struct_HdB.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
def divide_cls(image_path_lst, train_set_cls, train_set_lst, valid_set_lst):
ratio = 0.8
image_path_size = len(image_path_lst)
train_set = image_path_lst[:int((ratio * image_path_size))]
valid_set = image_path_lst[int((ratio * image_path_size)):]
with open(train_set_cls, 'w') as f:
f.write('\n'.join(train_set))
train_set_lst += train_set
valid_set_lst += valid_set
for valid_path in valid_set:
assert (valid_path not in train_set) |
.skipif((not torch.cuda.is_available()), reason='requires CUDA to run')
def test_flash_standard_shapes():
assert (standard_attn(X).shape == flash_attn(X).shape) |
class FlaxWav2Vec2ForCTC(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def reorient_image(arr: np.ndarray, slice_axis: int, nib_ref: nib, nib_ref_canonical: nib) -> nd.ndarray:
arr_ras = orient_img_ras(arr, slice_axis)
ref_orientation = nib.orientations.io_orientation(nib_ref.affine)
ras_orientation = nib.orientations.io_orientation(nib_ref_canonical.affine)
trans_orient = nib.orientations.ornt_transform(ras_orientation, ref_orientation)
return nib.orientations.apply_orientation(arr_ras, trans_orient) |
class HfArgumentParser(ArgumentParser):
dataclass_types: Iterable[DataClassType]
def __init__(self, dataclass_types: Union[(DataClassType, Iterable[DataClassType])], **kwargs):
if ('formatter_class' not in kwargs):
kwargs['formatter_class'] = ArgumentDefaultsHelpFormatter
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype)
def _add_dataclass_arguments(self, dtype: DataClassType):
if hasattr(dtype, '_argument_group_name'):
parser = self.add_argument_group(dtype._argument_group_name)
else:
parser = self
for field in dataclasses.fields(dtype):
if (not field.init):
continue
field_name = f'--{field.name}'
kwargs = field.metadata.copy()
if isinstance(field.type, str):
raise ImportError('This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563), which can be opted in from Python 3.7 with `from __future__ import annotations`. We will add compatibility when Python 3.9 is released.')
typestring = str(field.type)
for prim_type in (int, float, str):
for collection in (List,):
if ((typestring == f'typing.Union[{collection[prim_type]}, NoneType]') or (typestring == f'typing.Optional[{collection[prim_type]}]')):
field.type = collection[prim_type]
if ((typestring == f'typing.Union[{prim_type.__name__}, NoneType]') or (typestring == f'typing.Optional[{prim_type.__name__}]')):
field.type = prim_type
bool_kwargs = {}
if (isinstance(field.type, type) and issubclass(field.type, Enum)):
kwargs['choices'] = [x.value for x in field.type]
kwargs['type'] = type(kwargs['choices'][0])
if (field.default is not dataclasses.MISSING):
kwargs['default'] = field.default
else:
kwargs['required'] = True
elif ((field.type is bool) or (field.type == Optional[bool])):
bool_kwargs = copy(kwargs)
kwargs['type'] = string_to_bool
if ((field.type is bool) or ((field.default is not None) and (field.default is not dataclasses.MISSING))):
default = (False if (field.default is dataclasses.MISSING) else field.default)
kwargs['default'] = default
kwargs['nargs'] = '?'
kwargs['const'] = True
elif (hasattr(field.type, '__origin__') and (re.search('^typing\\.List\\[(.*)\\]$', str(field.type)) is not None)):
kwargs['nargs'] = '+'
kwargs['type'] = field.type.__args__[0]
if (not all(((x == kwargs['type']) for x in field.type.__args__))):
raise ValueError(f'{field.name} cannot be a List of mixed types')
if (field.default_factory is not dataclasses.MISSING):
kwargs['default'] = field.default_factory()
elif (field.default is dataclasses.MISSING):
kwargs['required'] = True
else:
kwargs['type'] = field.type
if (field.default is not dataclasses.MISSING):
kwargs['default'] = field.default
elif (field.default_factory is not dataclasses.MISSING):
kwargs['default'] = field.default_factory()
else:
kwargs['required'] = True
parser.add_argument(field_name, **kwargs)
if ((field.default is True) and ((field.type is bool) or (field.type == Optional[bool]))):
bool_kwargs['default'] = False
parser.add_argument(f'--no_{field.name}', action='store_false', dest=field.name, **bool_kwargs)
def parse_args_into_dataclasses(self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None) -> Tuple[(DataClass, ...)]:
if (args_filename or (look_for_args_file and len(sys.argv))):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix('.args')
if args_file.exists():
fargs = args_file.read_text().split()
args = ((fargs + args) if (args is not None) else (fargs + sys.argv[1:]))
(namespace, remaining_args) = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in vars(namespace).items() if (k in keys)}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if (len(namespace.__dict__) > 0):
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def parse_json_file(self, json_file: str) -> Tuple[(DataClass, ...)]:
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in data.items() if (k in keys)}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
def parse_dict(self, args: dict) -> Tuple[(DataClass, ...)]:
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in args.items() if (k in keys)}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,) |
def recall(rank, ground_truth, N):
return (len((set(rank[:N]) & set(ground_truth))) / float(len(set(ground_truth)))) |
def resnext20_32x2d_cifar10(num_classes=10, **kwargs):
return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=2, model_name='resnext20_32x2d_cifar10', **kwargs) |
def get_micro_f1(guess_entities, gold_entities, mode='strong'):
precision = get_micro_precision(guess_entities, gold_entities, mode)
recall = get_micro_recall(guess_entities, gold_entities, mode)
return (((2 * (precision * recall)) / (precision + recall)) if (precision + recall) else 0) |
def setup_logger(output=None):
if (output is None):
return
if (output.endswith('.txt') or output.endswith('.log')):
fpath = output
else:
fpath = osp.join(output, 'log.txt')
if osp.exists(fpath):
fpath += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(fpath) |
class Adam(torch.optim.Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def supports_memory_efficient_fp16(self):
return True
def supports_flat_params(self):
return True
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
if amsgrad:
state['max_exp_avg_sq'] = state['max_exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * group['lr']))
p_data_fp32.addcdiv_(exp_avg, denom, value=(- step_size))
if (p.data_ptr() != p_data_fp32.data_ptr()):
p.data.copy_(p_data_fp32)
return loss |
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print('Some arguments are wrong:')
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
is_overwritten_training = (logdir != restore_from)
with open(args.wavenet_params, 'r') as f:
wavenet_params = json.load(f)
coord = tf.train.Coordinator()
with tf.name_scope('create_inputs'):
silence_threshold = (args.silence_threshold if (args.silence_threshold > EPSILON) else None)
gc_enabled = (args.gc_channels is not None)
reader = AudioReader(args.data_dir, coord, sample_rate=wavenet_params['sample_rate'], gc_enabled=gc_enabled, receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params['filter_width'], wavenet_params['dilations'], wavenet_params['scalar_input'], wavenet_params['initial_filter_width']), sample_size=args.sample_size, silence_threshold=silence_threshold)
audio_batch = reader.dequeue(args.batch_size)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
net = WaveNetModel(batch_size=args.batch_size, dilations=wavenet_params['dilations'], filter_width=wavenet_params['filter_width'], residual_channels=wavenet_params['residual_channels'], dilation_channels=wavenet_params['dilation_channels'], skip_channels=wavenet_params['skip_channels'], quantization_channels=wavenet_params['quantization_channels'], use_biases=wavenet_params['use_biases'], scalar_input=wavenet_params['scalar_input'], initial_filter_width=wavenet_params['initial_filter_width'], histograms=args.histograms, global_condition_channels=args.gc_channels, global_condition_cardinality=reader.gc_category_cardinality)
if (args.l2_regularization_strength == 0):
args.l2_regularization_strength = None
loss = net.loss(input_batch=audio_batch, global_condition_batch=gc_id_batch, l2_regularization_strength=args.l2_regularization_strength)
optimizer = optimizer_factory[args.optimizer](learning_rate=args.learning_rate, momentum=args.momentum)
trainable = tf.trainable_variables()
optim = optimizer.minimize(loss, var_list=trainable)
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if (is_overwritten_training or (saved_global_step is None)):
saved_global_step = (- 1)
except:
print('Something went wrong while restoring checkpoint. We will terminate training to avoid accidentally overwriting the previous model.')
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
try:
for step in range((saved_global_step + 1), args.num_steps):
start_time = time.time()
if (args.store_metadata and ((step % 50) == 0)):
print('Storing metadata')
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
(summary, loss_value, _) = sess.run([summaries, loss, optim], options=run_options, run_metadata=run_metadata)
writer.add_summary(summary, step)
writer.add_run_metadata(run_metadata, 'step_{:04d}'.format(step))
tl = timeline.Timeline(run_metadata.step_stats)
timeline_path = os.path.join(logdir, 'timeline.trace')
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace_format(show_memory=True))
else:
(summary, loss_value, _) = sess.run([summaries, loss, optim])
writer.add_summary(summary, step)
duration = (time.time() - start_time)
print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
if ((step % args.checkpoint_every) == 0):
save(saver, sess, logdir, step)
last_saved_step = step
except KeyboardInterrupt:
print()
finally:
if (step > last_saved_step):
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads) |
_dataset_obj('mnist')
class MNIST(datasets.MNIST):
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
super(MNIST, self).__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) |
def test_rank1_symmetric_convex_solver():
(XYXY_rank1, XYXY_missing_rank1) = create_rank1_data(symmetric=True)
solver = NuclearNormMinimization(require_symmetric_solution=True)
completed = solver.fit_transform(XYXY_missing_rank1)
assert (abs((completed[(1, 2)] - XYXY_rank1[(1, 2)])) < 0.01), ('Expected %0.4f but got %0.4f' % (XYXY_rank1[(1, 2)], completed[(1, 2)])) |
def get_latest_version(folder):
versions = [int(pathlib.PurePath(path).name.split('_')[(- 1)]) for path in glob(f'{folder}/version_*/')]
if (len(versions) == 0):
return None
versions.sort()
return versions[(- 1)] |
def prompt_to_chatml(prompt: str, start_token: str='<|im_start|>', end_token: str='<|im_end|>'):
prompt = prompt.strip()
assert prompt.startswith(start_token)
assert prompt.endswith(end_token)
message = []
for p in prompt.split('<|im_start|>')[1:]:
newline_splitted = p.split('\n', 1)
role = newline_splitted[0].strip()
content = newline_splitted[1].split(end_token, 1)[0].strip()
if (role.startswith('system') and (role != 'system')):
other_params = string_to_dict(role.split('system', 1)[(- 1)])
role = 'system'
else:
other_params = dict()
message.append(dict(content=content, role=role, **other_params))
return message |
class CascadingBanditEpsilonGreedy(Agent):
def __init__(self, num_items, num_positions, a0=1, b0=1, epsilon=0.0, optimism=1.0):
self.num_items = num_items
self.num_positions = num_positions
self.a0 = a0
self.b0 = b0
self.prior_success = np.array([a0 for item in range(num_items)])
self.prior_failure = np.array([b0 for item in range(num_items)])
self.epsilon = epsilon
self.timestep = 1
self.optimism = optimism
def set_prior(self, prior_success, prior_failure):
self.prior_success = np.array(prior_success)
self.prior_failure = np.array(prior_failure)
def get_posterior_mean(self):
return (self.prior_success / (self.prior_success + self.prior_failure))
def get_posterior_sample(self):
return np.random.beta(self.prior_success, self.prior_failure)
def update_observation(self, observation, action, reward):
for action in observation['round_failure']:
self.prior_failure[action] += 1
for action in observation['round_success']:
self.prior_success[action] += 1
self.timestep += 1
def pick_action(self, observation):
if (np.random.rand() < self.epsilon):
action_list = np.random.randint(low=0, high=self.num_items, size=self.num_positions)
else:
posterior_means = self.get_posterior_mean()
action_list = posterior_means.argsort()[::(- 1)][:self.num_positions]
return action_list |
def get_well_conditioned_gaussian_datasets(dim, std, oos_std):
train_dset = get_gaussian_dataset(role='train', size=50000, dim=dim, std=std)
valid_dset = get_gaussian_dataset(role='valid', size=5000, dim=dim, std=std)
test_dsets = [get_gaussian_dataset(role='test', size=10000, dim=dim, std=std), get_gaussian_dataset(role='test', size=10000, dim=dim, std=oos_std)]
return (train_dset, valid_dset, test_dsets) |
class GRU(Model):
_compatible_windows = (window_module.Global, window_module.Sliding, window_module.Expanding, window_module.Dyadic)
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, bias=True, dropout=0):
super(GRU, self).__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.gru = nn.GRU(input_size=in_channels, hidden_size=hidden_channels, num_layers=num_layers, bias=bias, dropout=dropout, batch_first=True)
self.total_hidden_size = (num_layers * hidden_channels)
self.linear = nn.Linear(self.total_hidden_size, out_channels)
def forward(self, signatures):
assert (len(signatures) == 1)
signatures = signatures[0]
x = torch.stack(signatures, dim=1)
hidden = self.gru(x)[1]
hidden = hidden.transpose(0, 1)
hidden = hidden.reshape(hidden.size(0), self.total_hidden_size)
return self.linear(hidden) |
def toVerticalPotential(Pot, R, phi=None, t0=0.0):
Pot = flatten(Pot)
if _isDissipative(Pot):
raise NotImplementedError('Converting dissipative forces to 1D vertical potentials is currently not supported')
try:
conversion.get_physical(Pot)
except:
raise PotentialError("Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances")
R = conversion.parse_length(R, **conversion.get_physical(Pot))
phi = conversion.parse_angle(phi)
t0 = conversion.parse_time(t0, **conversion.get_physical(Pot))
if isinstance(Pot, list):
out = []
for pot in Pot:
if isinstance(pot, linearPotential):
out.append(pot)
elif isinstance(pot, Potential):
out.append(verticalPotential(pot, R, phi=phi, t0=t0))
elif isinstance(pot, planarPotential):
raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential")
else:
raise PotentialError("Input to 'toVerticalPotential' is neither an RZPotential-instance or a list of such instances")
return out
elif isinstance(Pot, Potential):
return verticalPotential(Pot, R, phi=phi, t0=t0)
elif isinstance(Pot, linearPotential):
return Pot
elif isinstance(Pot, planarPotential):
raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential")
else:
raise PotentialError("Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances") |
def modify_densenets(model):
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
setattr(model.__class__, 'logits', logits)
setattr(model.__class__, 'forward', forward)
return model |
class Metric(ABC):
def get_metric(self, backend: str='bigdl'):
if (backend == 'bigdl'):
metric_impl = self.get_bigdl_metric()
elif (backend == 'pytorch'):
metric_impl = self.get_pytorch_metric()
elif (backend == 'tf'):
metric_impl = self.get_tf_metric()
elif (backend == 'mxnet'):
metric_impl = self.get_mxnet_metric()
else:
valid_backends = {'bigdl', 'pytorch', 'tf', 'mxnet'}
invalidInputError(False, f'backend should be one of {valid_backends}, but got {backend}')
return metric_impl
def get_bigdl_metric(self):
invalidInputError(False, 'not implemented')
def get_tf_metric(self):
invalidInputError(False, 'not implemented')
def get_pytorch_metric(self):
invalidInputError(False, 'not implemented')
def get_mxnet_metric(self):
invalidInputError(False, 'not implemented')
def get_name(self) -> str:
pass
def convert_metrics_list(metrics, backend: str='bigdl'):
if (metrics is None):
return None
if (not isinstance(metrics, list)):
metrics = [metrics]
metric_impls = []
for m in metrics:
if isinstance(m, Metric):
metric_impls.append(m.get_metric(backend))
elif isinstance(m, types.FunctionType):
customized_metric = CustomizedMetric(m)
metric_impls.append(customized_metric.get_metric(backend))
else:
invalidInputError(False, ('Only orca metrics and customized functions are supported, but get ' + m.__class__.__name__))
return metric_impls
def convert_metrics_dict(metrics, backend: str='bigdl'):
if (metrics is None):
return {}
if (not isinstance(metrics, list)):
metrics = [metrics]
metric_impls = {}
for m in metrics:
if isinstance(m, Metric):
metric_impls[m.get_name()] = m.get_metric(backend)
elif isinstance(m, types.FunctionType):
my_metric = CustomizedMetric(m)
metric_impls[my_metric.get_name()] = my_metric.get_metric(backend)
else:
invalidInputError(False, ('Only orca metrics and customized functions are supported, but get ' + m.__class__.__name__))
return metric_impls |
class GraphVisualization():
def __init__(self, env):
self.connections = env.connections.T
self.G = nx.DiGraph()
self.G.add_edges_from(self.connections)
self.pos = nx.kamada_kawai_layout(self.G)
self.colors = [COLOR_DOWN, COLOR_RUNNING, COLOR_SELECTED_D, COLOR_SELECTED_R]
self.update_state(env)
def update_state(self, env, a=None, probs=None):
states = env.running.copy()
if (a is not None):
states[a] += 2
self.edge_colors = np.array([self.colors[int(x)] for x in states])
self.edge_colors = self.edge_colors[self.G.nodes]
if (probs is not None):
self.node_labels = {i: f'{probs[i]:.1f}'.lstrip('0') for i in self.G.nodes}
self.node_colors = np.array([((1 - x), (1 - x), (1 - x)) for x in probs])
self.node_colors = self.node_colors[self.G.nodes]
else:
self.node_labels = None
self.node_colors = (['w'] * len(states))
def plot(self):
plt.clf()
nx.draw_networkx(self.G, pos=self.pos, labels=self.node_labels, node_color=self.node_colors, edgecolors=self.edge_colors, linewidths=3.0, arrows=True)
return plt |
class Blip2CaptionProcessor():
def __init__(self, prompt='', max_words=50):
self.prompt = prompt
self.max_words = max_words
def __call__(self, caption):
caption = (self.prompt + self.pre_caption(caption))
return caption
def pre_caption(self, caption):
caption = re.sub('([.!\\"()*#:;~])', ' ', caption.lower())
caption = re.sub('\\s{2,}', ' ', caption)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
caption_words = caption.split(' ')
if (len(caption_words) > self.max_words):
caption = ' '.join(caption_words[:self.max_words])
return caption |
('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
.parametrize('dataset', ['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if (dataset in ['CocoDataset', 'CityscapesDataset']):
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=('bus', 'car'), test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ('bus', 'car'))
print(custom_dataset)
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=['bus', 'car'], test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ['bus', 'car'])
print(custom_dataset)
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=['foo'], test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ['foo'])
print(custom_dataset)
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=None, test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES == original_classes)
print(custom_dataset)
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
path = (tmpdir + 'classes.txt')
with open(path, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=path, test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ['bus', 'car'])
print(custom_dataset) |
def row_csv2dict(csv_file):
dict_club = {}
with open(csv_file) as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
dict_club[(row[0], row[1])] = row[2]
return dict_club |
class SetDataset():
def __init__(self, batch_size, transform):
self.sub_meta = {}
self.cl_list = range(47)
for cl in self.cl_list:
self.sub_meta[cl] = []
d = ImageFolder(DTD_path)
for (i, (data, label)) in enumerate(d):
self.sub_meta[label].append(data)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform=transform)
self.sub_dataloader.append(torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params))
def __getitem__(self, i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.sub_dataloader) |
def test():
net = PNASNetB()
print(net)
x = Variable(torch.randn(1, 3, 32, 32))
y = net(x)
print(y) |
def generate_labels(img_id, detail, out_dir):
def _class_to_index(mask, _mapping, _key):
values = np.unique(mask)
for i in range(len(values)):
assert (values[i] in _mapping)
index = np.digitize(mask.ravel(), _mapping, right=True)
return _key[index].reshape(mask.shape)
mask = Image.fromarray(_class_to_index(detail.getMask(img_id), _mapping=_mapping, _key=_key))
filename = img_id['file_name']
mask.save(osp.join(out_dir, filename.replace('jpg', 'png')))
return osp.splitext(osp.basename(filename))[0] |
def load_args():
parser = argparse.ArgumentParser(description='Transformer baseline', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--dataset', type=str, default='ZINC', help='name of dataset')
parser.add_argument('--nb-heads', type=int, default=8)
parser.add_argument('--nb-layers', type=int, default=10)
parser.add_argument('--dim-hidden', type=int, default=64)
parser.add_argument('--pos-enc', choices=[None, 'diffusion', 'pstep', 'adj'], default=None)
parser.add_argument('--gckn-dim', type=int, default=32, help='dimension for laplacian PE')
parser.add_argument('--gckn-path', type=int, default=8, help='path size for gckn')
parser.add_argument('--gckn-sigma', type=float, default=0.6)
parser.add_argument('--gckn-pooling', default='sum', choices=['mean', 'sum'])
parser.add_argument('--gckn-agg', action='store_false', help='do not use aggregated GCKN features')
parser.add_argument('--gckn-normalize', action='store_false', help='do not normalize gckn features')
parser.add_argument('--p', type=int, default=1, help='p step random walk kernel')
parser.add_argument('--beta', type=float, default=1.0, help='bandwidth for the diffusion kernel')
parser.add_argument('--normalization', choices=[None, 'sym', 'rw'], default='sym', help='normalization for Laplacian')
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--epochs', type=int, default=500, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate')
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
parser.add_argument('--outdir', type=str, default='', help='output path')
parser.add_argument('--warmup', type=int, default=2000)
parser.add_argument('--layer-norm', action='store_true', help='use layer norm instead of batch norm')
parser.add_argument('--zero-diag', action='store_true', help='zero diagonal for PE matrix')
args = parser.parse_args()
args.use_cuda = torch.cuda.is_available()
args.batch_norm = (not args.layer_norm)
args.save_logs = False
if (args.outdir != ''):
args.save_logs = True
outdir = args.outdir
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
outdir = (outdir + '/transformer')
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
outdir = (outdir + '/{}'.format(args.dataset))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
if args.zero_diag:
outdir = (outdir + '/zero_diag')
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
lapdir = 'gckn_{}_{}_{}_{}_{}_{}'.format(args.gckn_path, args.gckn_dim, args.gckn_sigma, args.gckn_pooling, args.gckn_agg, args.gckn_normalize)
outdir = (outdir + '/{}'.format(lapdir))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
bn = ('BN' if args.batch_norm else 'LN')
outdir = (outdir + '/{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(args.lr, args.nb_layers, args.nb_heads, args.dim_hidden, bn, args.pos_enc, args.normalization, args.p, args.beta))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
args.outdir = outdir
return args |
class XceptionBlock(nn.Module):
def __init__(self, channel_list, stride=1, dilation=1, skip_connection_type='conv', relu_first=True, low_feat=False, norm_layer=nn.BatchNorm2d):
super().__init__()
assert (len(channel_list) == 4)
self.skip_connection_type = skip_connection_type
self.relu_first = relu_first
self.low_feat = low_feat
if (self.skip_connection_type == 'conv'):
self.conv = nn.Conv2d(channel_list[0], channel_list[(- 1)], 1, stride=stride, bias=False)
self.bn = norm_layer(channel_list[(- 1)])
self.sep_conv1 = SeparableConv2d(channel_list[0], channel_list[1], dilation=dilation, relu_first=relu_first, norm_layer=norm_layer)
self.sep_conv2 = SeparableConv2d(channel_list[1], channel_list[2], dilation=dilation, relu_first=relu_first, norm_layer=norm_layer)
self.sep_conv3 = SeparableConv2d(channel_list[2], channel_list[3], dilation=dilation, relu_first=relu_first, stride=stride, norm_layer=norm_layer)
self.last_inp_channels = channel_list[3]
def forward(self, inputs):
sc1 = self.sep_conv1(inputs)
sc2 = self.sep_conv2(sc1)
residual = self.sep_conv3(sc2)
if (self.skip_connection_type == 'conv'):
shortcut = self.conv(inputs)
shortcut = self.bn(shortcut)
outputs = (residual + shortcut)
elif (self.skip_connection_type == 'sum'):
outputs = (residual + inputs)
elif (self.skip_connection_type == 'none'):
outputs = residual
else:
raise ValueError('Unsupported skip connection type.')
if self.low_feat:
return (outputs, sc2)
else:
return outputs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.