repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
sharpDARTS | sharpDARTS-master/cnn/genotypes.py | from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat layout')
# Primitives for the dilation, sep_conv, flood, and choke 3x3 only search space
PRIMITIVES = [
'none',
'max_pool_3x3',
# 'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
# 'sep_conv_5x5',
'dil_conv_3x3',
# 'dil_conv_5x5',
# 'nor_conv_3x3',
# 'nor_conv_5x5',
# 'nor_conv_7x7',
'flood_conv_3x3',
'dil_flood_conv_3x3',
'choke_conv_3x3',
'dil_choke_conv_3x3',
]
SHARPER_PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'sep_conv_7x7',
'dil_conv_3x3',
'dil_conv_5x5',
# 'nor_conv_3x3',
# 'nor_conv_5x5',
# 'nor_conv_7x7',
'flood_conv_3x3',
'flood_conv_5x5',
'dil_flood_conv_3x3',
# TODO(ahundt) sharpsepconv doesn't correctly support dil_flood_conv_5x5, padding is not sufficient
# w shape: torch.Size([]) op type: <class 'operations.SharpSepConv'> i: 12 self._primitives[i]: dil_flood_conv_5x5x size: torch.Size([16, 16, 32, 32]) stride: 1
# op_out size: torch.Size([16, 16, 28, 28])
# 'dil_flood_conv_5x5',
# 'choke_conv_3x3',
# 'dil_choke_conv_3x3',
]
# Primitives for the original darts search space
DARTS_PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
# Primitives for the multichannelnet search space
MULTICHANNELNET_PRIMITIVES = [
'ResizablePool',
'SharpSepConv'
]
NASNet = Genotype(
normal = [
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat = [2, 3, 4, 5, 6],
reduce = [
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat = [4, 5, 6],
layout='cell',
)
AmoebaNet = Genotype(
normal = [
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat = [4, 5, 6],
reduce = [
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat = [3, 4, 6],
layout='cell',
)
'''
2019_01_13_23_24_09 args = Namespace(arch_learning_rate=0.0003, arch_weight_decay=0.001, batch_size=32, cutout=False, cutout_length=16, data='../data', drop_path_prob=0.3, epochs=60, gpu=0, grad_clip=5, init_channels=16, layers=8, learning_rate=0.025, learning_rate_min=0.001, model_path='saved_models', momentum=0.9, random_eraser=False, report_freq=50, save='search-choke_flood_45b2033_branch_merge_mixed_aux-20190113-232409', seed=2, train_portion=0.5, unrolled=False, weight_decay=0.0003)
2019_01_13_23_24_13 param size = 5.867642MB
2019_01_15_18_59_41 epoch, 56, train_acc, 99.852000, valid_acc, 91.428000, train_loss, 0.013050, valid_loss, 0.289964, lr, 1.262229e-03, best_epoch, 56, best_valid_acc, 91.428000
2019_01_15_18_59_41 genotype = Genotype(normal=[('choke_conv_3x3', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 1), ('skip_connect', 0)
, ('skip_connect', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect
', 2), ('flood_conv_3x3', 0)], reduce_concat=range(2, 6))
2019_01_15_18_59_41 alphas_normal = tensor([[0.1227, 0.0541, 0.1552, 0.1318, 0.0737, 0.1543, 0.0597, 0.1619, 0.0866],
[0.5278, 0.0389, 0.0549, 0.0694, 0.0384, 0.0499, 0.0367, 0.1205, 0.0634],
[0.3518, 0.0780, 0.2044, 0.0623, 0.0452, 0.0943, 0.0437, 0.0722, 0.0482],
[0.7436, 0.0338, 0.0540, 0.0221, 0.0204, 0.0241, 0.0178, 0.0555, 0.0289],
[0.8558, 0.0148, 0.0292, 0.0198, 0.0113, 0.0152, 0.0116, 0.0251, 0.0172],
[0.6509, 0.0425, 0.0983, 0.0329, 0.0262, 0.0426, 0.0209, 0.0483, 0.0376],
[0.8584, 0.0184, 0.0246, 0.0181, 0.0129, 0.0195, 0.0110, 0.0209, 0.0161],
[0.8986, 0.0105, 0.0198, 0.0105, 0.0103, 0.0117, 0.0098, 0.0172, 0.0116],
[0.9225, 0.0084, 0.0135, 0.0082, 0.0078, 0.0088, 0.0069, 0.0134, 0.0105],
[0.6484, 0.0409, 0.1115, 0.0283, 0.0309, 0.0314, 0.0253, 0.0414, 0.0421],
[0.8666, 0.0169, 0.0248, 0.0194, 0.0132, 0.0142, 0.0112, 0.0217, 0.0121],
[0.9063, 0.0106, 0.0195, 0.0108, 0.0088, 0.0098, 0.0083, 0.0152, 0.0108],
[0.9359, 0.0070, 0.0109, 0.0083, 0.0070, 0.0072, 0.0068, 0.0089, 0.0080],
[0.9238, 0.0069, 0.0132, 0.0127, 0.0069, 0.0087, 0.0072, 0.0122, 0.0084]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_01_15_18_59_41 alphas_reduce = tensor([[0.0785, 0.1742, 0.1547, 0.0916, 0.0640, 0.1167, 0.0835, 0.1516, 0.0852],
[0.1119, 0.1278, 0.1660, 0.0930, 0.0867, 0.1177, 0.0962, 0.1181, 0.0825],
[0.0724, 0.2795, 0.0808, 0.0926, 0.0741, 0.1522, 0.0712, 0.0876, 0.0895],
[0.1023, 0.1814, 0.1192, 0.0859, 0.0959, 0.1292, 0.0849, 0.1003, 0.1009],
[0.1553, 0.1172, 0.2175, 0.0948, 0.0801, 0.0805, 0.0803, 0.0903, 0.0841],
[0.0763, 0.2053, 0.1521, 0.0969, 0.0904, 0.1321, 0.0729, 0.1067, 0.0674],
[0.1005, 0.1378, 0.1372, 0.0933, 0.0854, 0.1270, 0.1148, 0.1048, 0.0992],
[0.1385, 0.1063, 0.1778, 0.1054, 0.1055, 0.0974, 0.1002, 0.0795, 0.0894],
[0.1626, 0.0849, 0.1274, 0.1046, 0.0919, 0.1014, 0.1248, 0.1103, 0.0921],
[0.0761, 0.1416, 0.1477, 0.1104, 0.0809, 0.1483, 0.1008, 0.1024, 0.0917],
[0.1175, 0.1253, 0.1357, 0.1014, 0.0903, 0.1062, 0.1185, 0.1115, 0.0935],
[0.1564, 0.1003, 0.1710, 0.1140, 0.0702, 0.1052, 0.0877, 0.0980, 0.0972],
[0.2252, 0.0806, 0.1401, 0.1061, 0.0844, 0.0950, 0.0902, 0.1027, 0.0756],
[0.2697, 0.0890, 0.1412, 0.0795, 0.0863, 0.0738, 0.0654, 0.1088, 0.0863]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_01_15_21_19_12 epoch, 59, train_acc, 99.900000, valid_acc, 91.232000, train_loss, 0.013466, valid_loss, 0.282533, lr, 1.016446e-03, best_epoch, 56, best_valid_acc, 91.428000
100%|| 60/60 [45:54:58<00:00, 2783.23s/it]
2019_01_15_21_19_12 genotype = Genotype(normal=[('choke_conv_3x3', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 1), ('skip_connect', 0), ('skip_connect', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('flood_conv_3x3', 0)], reduce_concat=range(2, 6))
2019_01_15_21_19_12 Search for Model Complete! Save dir: search-choke_flood_45b2033_branch_merge_mixed_aux-20190113-232409
2019_01_21_23_21_59 gpu device = 0
2019_01_21_23_21_59 args = Namespace(arch='DARTS', autoaugment=True, auxiliary=True, auxiliary_weight=0.4, batch_size=64, cutout=True, cutout_length=16, data='../data', dataset='cifar10', drop_path
_prob=0.2, epochs=1000, gpu=0, grad_clip=5, init_channels=36, layers=20, learning_rate=0.025, learning_rate_min=1e-07, mixed_auxiliary=False, model_path='saved_models', momentum=0.9, ops='OPS', opt
imizer='sgd', partial=0.125, primitives='PRIMITIVES', random_eraser=False, report_freq=50, save='eval-20190121-232159-AUTOAUGMENT_V2_KEY_PADDING_d5dda02_BUGFIX-cifar10-DARTS', seed=4, warm_restarts
=20, weight_decay=0.0003)
2019_01_21_23_22_02 param size = 3.529270MB
2019_01_25_20_26_22 best_epoch, 988, best_train_acc, 95.852000, best_valid_acc, 97.890000, best_train_loss, 0.196667, best_valid_loss, 0.076396, lr, 8.881592e-06, best_epoch, 988, best_valid_acc, 97.890000 cifar10.1_valid_acc, 93.750000, cifar10.1_valid_loss, 0.218554
2019_01_25_20_26_22 Training of Final Model Complete! Save dir: eval-20190121-232159-AUTOAUGMENT_V2_KEY_PADDING_d5dda02_BUGFIX-cifar10-DARTS
'''
CHOKE_FLOOD_DIL_IS_SEP_CONV = Genotype(normal=[('choke_conv_3x3', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 1), ('skip_connect', 0), ('skip_connect', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('flood_conv_3x3', 0)], reduce_concat=range(2, 6), layout='cell')
SHARP_DARTS = CHOKE_FLOOD_DIL_IS_SEP_CONV
DARTS_V1 = Genotype(normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5], layout='cell')
DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5], layout='cell')
DARTS = DARTS_V2
"""
Save dir: search-SEARCH_REPRODUCTION_ATTEMPT_KEY_PADDING_56b8fe9_BUGFIX_Dil_is_SepConv-20190113-231854
2019_01_15_02_35_16 epoch, 50, train_acc, 99.592000, valid_acc, 90.892000, train_loss, 0.019530, valid_loss, 0.330776, lr, 2.607695e-03, best_epoch, 50, best_valid_acc, 90.892000
2019_01_15_02_35_16 genotype = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 2), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('se
p_conv_3x3', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 2), ('avg_pool_3x3', 0), ('skip_connect', 2), ('avg_pool_3x3', 0), ('skip_connect', 2),
('skip_connect', 3)], reduce_concat=range(2, 6))
2019_01_15_02_35_16 alphas_normal = tensor([[0.1797, 0.0438, 0.0386, 0.0838, 0.3695, 0.1124, 0.1174, 0.0549],
[0.4426, 0.0311, 0.0249, 0.0465, 0.1600, 0.1146, 0.1076, 0.0728],
[0.5580, 0.0543, 0.0353, 0.0886, 0.1094, 0.0682, 0.0497, 0.0365],
[0.6387, 0.0380, 0.0251, 0.0598, 0.0863, 0.0605, 0.0545, 0.0372],
[0.7500, 0.0194, 0.0159, 0.0430, 0.0884, 0.0320, 0.0283, 0.0231],
[0.6430, 0.0518, 0.0424, 0.0960, 0.0508, 0.0493, 0.0349, 0.0317],
[0.7367, 0.0274, 0.0203, 0.0383, 0.0779, 0.0358, 0.0367, 0.0268],
[0.8526, 0.0150, 0.0127, 0.0276, 0.0364, 0.0201, 0.0166, 0.0190],
[0.9045, 0.0083, 0.0082, 0.0125, 0.0259, 0.0170, 0.0121, 0.0114],
[0.7205, 0.0538, 0.0398, 0.0811, 0.0352, 0.0233, 0.0218, 0.0244],
[0.6869, 0.0426, 0.0296, 0.0604, 0.0792, 0.0390, 0.0337, 0.0285],
[0.9148, 0.0113, 0.0098, 0.0176, 0.0143, 0.0106, 0.0108, 0.0109],
[0.9354, 0.0068, 0.0069, 0.0102, 0.0117, 0.0100, 0.0098, 0.0090],
[0.9294, 0.0068, 0.0073, 0.0112, 0.0125, 0.0120, 0.0106, 0.0101]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_01_15_02_35_16 alphas_reduce = tensor([[0.0757, 0.2186, 0.2142, 0.1140, 0.1221, 0.1052, 0.0843, 0.0659],
[0.1503, 0.1184, 0.1191, 0.1474, 0.1392, 0.1070, 0.1051, 0.1135],
[0.0742, 0.2551, 0.2631, 0.0914, 0.0927, 0.0778, 0.0800, 0.0656],
[0.1337, 0.1702, 0.1886, 0.1128, 0.0885, 0.1077, 0.1070, 0.0915],
[0.1277, 0.0884, 0.1102, 0.3171, 0.1124, 0.0774, 0.0781, 0.0886],
[0.0838, 0.1910, 0.2192, 0.1022, 0.0987, 0.1073, 0.1029, 0.0949],
[0.1147, 0.1692, 0.2006, 0.1156, 0.1039, 0.1055, 0.1026, 0.0879],
[0.1195, 0.0778, 0.1036, 0.2572, 0.1311, 0.0998, 0.0992, 0.1117],
[0.2289, 0.0652, 0.0827, 0.2181, 0.1189, 0.0921, 0.0883, 0.1059],
[0.0807, 0.1987, 0.2584, 0.1142, 0.1083, 0.0770, 0.0872, 0.0754],
[0.1019, 0.1489, 0.1791, 0.2150, 0.0844, 0.0986, 0.0964, 0.0757],
[0.1322, 0.0671, 0.1000, 0.3702, 0.0891, 0.0758, 0.0937, 0.0718],
[0.2760, 0.0609, 0.0833, 0.2941, 0.0900, 0.0576, 0.0620, 0.0761],
[0.3477, 0.0537, 0.0752, 0.1774, 0.1061, 0.0828, 0.0791, 0.0781]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_01_15_06_53_06 alphas_normal = tensor([[0.2425, 0.0422, 0.0396, 0.0830, 0.3289, 0.1043, 0.1095, 0.0501],
[0.5780, 0.0277, 0.0235, 0.0404, 0.1082, 0.0867, 0.0774, 0.0581],
[0.6560, 0.0415, 0.0281, 0.0678, 0.0829, 0.0519, 0.0427, 0.0292],
[0.7764, 0.0247, 0.0177, 0.0391, 0.0483, 0.0376, 0.0328, 0.0235],
[0.8400, 0.0133, 0.0113, 0.0259, 0.0547, 0.0203, 0.0193, 0.0153],
[0.7062, 0.0414, 0.0372, 0.0807, 0.0400, 0.0407, 0.0283, 0.0255],
[0.8420, 0.0182, 0.0142, 0.0242, 0.0447, 0.0211, 0.0202, 0.0154],
[0.8965, 0.0113, 0.0101, 0.0180, 0.0245, 0.0140, 0.0120, 0.0136],
[0.9272, 0.0072, 0.0072, 0.0121, 0.0173, 0.0115, 0.0092, 0.0083],
[0.7692, 0.0434, 0.0354, 0.0685, 0.0301, 0.0175, 0.0168, 0.0192],
[0.7816, 0.0323, 0.0235, 0.0458, 0.0513, 0.0253, 0.0220, 0.0183],
[0.9317, 0.0093, 0.0083, 0.0133, 0.0112, 0.0086, 0.0087, 0.0088],
[0.9445, 0.0063, 0.0066, 0.0103, 0.0095, 0.0078, 0.0082, 0.0068],
[0.9430, 0.0064, 0.0069, 0.0115, 0.0084, 0.0087, 0.0076, 0.0076]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_01_15_06_53_06 alphas_reduce = tensor([[0.0770, 0.2144, 0.2315, 0.1136, 0.1204, 0.0999, 0.0813, 0.0619],
[0.1485, 0.1163, 0.1218, 0.1499, 0.1427, 0.1029, 0.1034, 0.1145],
[0.0747, 0.2417, 0.2707, 0.0923, 0.0982, 0.0748, 0.0807, 0.0669],
[0.1287, 0.1730, 0.2038, 0.1092, 0.0911, 0.0996, 0.1101, 0.0845],
[0.1265, 0.0857, 0.1139, 0.3273, 0.1120, 0.0762, 0.0742, 0.0841],
[0.0849, 0.1803, 0.2166, 0.1040, 0.0994, 0.1073, 0.1037, 0.1039],
[0.1143, 0.1651, 0.2031, 0.1176, 0.1033, 0.1082, 0.1031, 0.0854],
[0.1198, 0.0768, 0.1047, 0.2503, 0.1362, 0.1031, 0.1022, 0.1070],
[0.2233, 0.0662, 0.0858, 0.2163, 0.1215, 0.0968, 0.0879, 0.1023],
[0.0837, 0.1847, 0.2635, 0.1174, 0.1096, 0.0781, 0.0877, 0.0753],
[0.1021, 0.1434, 0.1805, 0.2217, 0.0802, 0.0952, 0.1003, 0.0767],
[0.1314, 0.0662, 0.0994, 0.3936, 0.0815, 0.0713, 0.0887, 0.0680],
[0.2715, 0.0612, 0.0849, 0.3124, 0.0838, 0.0562, 0.0598, 0.0703],
[0.3690, 0.0554, 0.0788, 0.1813, 0.0963, 0.0740, 0.0747, 0.0705]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_01_15_07_25_08 epoch, 59, train_acc, 99.752000, valid_acc, 90.732000, train_loss, 0.018408, valid_loss, 0.312794, lr, 1.016446e-03, best_epoch, 50, best_valid_acc, 90.892000
100%|| 60/60 [32:06:09<00:00, 1929.41s/it]
2019_01_15_07_25_08 genotype = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 2), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1)], normal_concat=range(2, 6), reduce=[('avg_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 2), ('avg_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 3), ('skip_connect', 2), ('skip_connect', 3)], reduce_concat=range(2, 6))
2019_01_15_07_25_08 Search for Model Complete! Save dir: search-SEARCH_REPRODUCTION_ATTEMPT_KEY_PADDING_56b8fe9_BUGFIX_Dil_is_SepConv-20190113-231854
± export CUDA_VISIBLE_DEVICES="1" && python3 train.py --autoaugment --auxiliary --cutout --batch_size 48 --epochs 1000 --save REPRODUCTION_ATTEMPT_KEY_PADDING_`git rev-parse --short HEAD`_AUTOAUGME
NT --arch DARTS_PRIMITIVES_DIL_IS_SEPCONV
Experiment dir : eval-20190121-225901-REPRODUCTION_ATTEMPT_KEY_PADDING_2a88102_AUTOAUGMENT-cifar10-DARTS_PRIMITIVES_DIL_IS_SEPCONV
2019_01_21_22_59_01 gpu device = 0
2019_01_21_22_59_01 args = Namespace(arch='DARTS_PRIMITIVES_DIL_IS_SEPCONV', autoaugment=True, auxiliary=True, auxiliary_weight=0.4, batch_size=48, cutout=True, cutout_length=16, data='../data', da
taset='cifar10', drop_path_prob=0.2, epochs=1000, gpu=0, grad_clip=5, init_channels=36, layers=20, learning_rate=0.025, learning_rate_min=1e-07, mixed_auxiliary=False, model_path='saved_models', mo
mentum=0.9, ops='OPS', optimizer='sgd', partial=0.125, primitives='PRIMITIVES', random_eraser=False, report_freq=50, save='eval-20190121-225901-REPRODUCTION_ATTEMPT_KEY_PADDING_2a88102_AUTOAUGMENT-
cifar10-DARTS_PRIMITIVES_DIL_IS_SEPCONV', seed=0, warm_restarts=20, weight_decay=0.0003)
loading op dict: operations.OPS
loading primitives: genotypes.PRIMITIVES
Validation step: 41, loss: 0.24891, top 1: 93.40 top 5: 99.85 progress: 100%|| 42/42 [00:03<00:00, 12.56it/s]
2019_01_27_09_14_32 best_epoch, 940, best_train_acc, 94.665997, best_valid_acc, 97.519998, best_train_loss, 0.235156, best_valid_loss, 0.087909, lr, 2.214094e-04, best_epoch, 940, best_valid_acc, 97.519998 cifar10.1_valid_acc, 93.399997, cifar10.1_valid_loss, 0.248915
2019_01_27_09_14_32 Training of Final Model Complete! Save dir: eval-20190121-225901-REPRODUCTION_ATTEMPT_KEY_PADDING_2a88102_AUTOAUGMENT-cifar10-DARTS_PRIMITIVES_DIL_IS_SEPCONV
"""
DARTS_PRIMITIVES_DIL_IS_SEPCONV = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 2), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 2), ('avg_pool_3x3', 0), ('skip_connect', 2), ('avg_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 3)], reduce_concat=range(2, 6), layout='cell')
SHARPSEPCONV_DARTS = DARTS_PRIMITIVES_DIL_IS_SEPCONV
"""
2019_02_20_03_15_15 epoch, 120, train_acc, 92.372000, valid_acc, 85.140000, train_loss, 0.221692, valid_loss, 0.451555, lr, 1.000000e-04, best_epoch, 106, best_valid_acc, 85.488000
Overview ***** best_epoch: 106 best_valid_acc: 85.49 ***** Progress: 100%|| 120/120 [32:46:14<00:00, 1050.83s/it]
2019_02_20_03_15_16 genotype =
2019_02_20_03_15_16 Search for Model Complete! Save dir: search-20190218-182855-MULTI_CHANNEL_SEARCH_2cdd546_search_weighting_max_w-cifar10-PRIMITIVES-OPS-0
"""
"""
export CUDA_VISIBLE_DEVICES="1" && python3 train_search.py --dataset cifar10 --batch_size 48 --save MULTI_CHANNEL_SEARCH_`git rev-parse --short HEAD`_search_weighting_generate_genotype --init_channels 36 --epochs 120 --cutout --autoaugment --seed 30 --weighting_algorithm max_w --multi_channel --load_genotype MULTI_CHANNEL_MAX_W
"""
MULTI_CHANNEL_MAX_W = Genotype(normal=[[[[0.031705040484666824, 0.03185523673892021], [0.03177893906831741, 0.03134448081254959], [0.03166716545820236, 0.031712375581264496], [0.029017647728323936, 0.03185588866472244]], [[0.029935121536254883, 0.03062211535871029], [0.030779147520661354, 0.03137855976819992], [0.029251324012875557, 0.0314946286380291], [0.03025045432150364, 0.03166542947292328]], [[0.03056972473859787, 0.03078850358724594], [0.03152700886130333, 0.03200467303395271], [0.03034617006778717, 0.030499886721372604], [0.03244936838746071, 0.03154132887721062]], [[0.03180636093020439, 0.03089098632335663], [0.031697362661361694, 0.03184359520673752], [0.03322812542319298, 0.03393062949180603], [0.030843427404761314, 0.029719246551394463]]], [[[0.03128419816493988, 0.031194495037198067], [0.03058856725692749, 0.03147657588124275], [0.031216900795698166, 0.03170192241668701], [0.031173493713140488, 0.03137543052434921]], [[0.031009048223495483, 0.031437475234270096], [0.03093050792813301, 0.032270822674036026], [0.0310247540473938, 0.031824786216020584], [0.03070482611656189, 0.031772714108228683]], [[0.03092092275619507, 0.03189568594098091], [0.031120117753744125, 0.03147495165467262], [0.03019261360168457, 0.03115837462246418], [0.030209895223379135, 0.032053761184215546]], [[0.03122881054878235, 0.032255493104457855], [0.03103666380047798, 0.03217817842960358], [0.03211415931582451, 0.03217983618378639], [0.031776174902915955, 0.027217810973525047]]], [[[0.031139764934778214, 0.031343117356300354], [0.031348712742328644, 0.0311704333871603], [0.031114375218749046, 0.031393397599458694], [0.03135799989104271, 0.031376857310533524]], [[0.031363870948553085, 0.03133983165025711], [0.03131990507245064, 0.031386278569698334], [0.03129395470023155, 0.03141247481107712], [0.03139813244342804, 0.03134961426258087]], [[0.03136737272143364, 0.03135362267494202], [0.031361229717731476, 0.03139583021402359], [0.03121591918170452, 0.03143315017223358], [0.03138110414147377, 0.031383417546749115]], [[0.03139226883649826, 0.03140714764595032], [0.030216185376048088, 0.031235458329319954], [0.0313195176422596, 0.03124529868364334], [0.031390056014060974, 0.02979360893368721]]], [[[0.031185977160930634, 0.0347476452589035], [0.031276751309633255, 0.03181716054677963], [0.03155045956373215, 0.03125309571623802], [0.03140901401638985, 0.030443238094449043]], [[0.03139765188097954, 0.031110990792512894], [0.031140387058258057, 0.0347706563770771], [0.031181395053863525, 0.031136374920606613], [0.03144082427024841, 0.030691733583807945]], [[0.031139248982071877, 0.030506059527397156], [0.031309712678194046, 0.030356379225850105], [0.031275711953639984, 0.03479333594441414], [0.031114540994167328, 0.028683168813586235]], [[0.031051797792315483, 0.029256442561745644], [0.03109460510313511, 0.02885911427438259], [0.03137160465121269, 0.026584701612591743], [0.03130620718002319, 0.0347440205514431]]]], normal_concat=[], reduce=[[[[0.03184084966778755, 0.03022356890141964], [0.031105801463127136, 0.031179415062069893], [0.030835246667265892, 0.03152412548661232], [0.030335750430822372, 0.03146739676594734]], [[0.0312722884118557, 0.030680980533361435], [0.03111124038696289, 0.03419611230492592], [0.030018579214811325, 0.0313275121152401], [0.030679989606142044, 0.030776049941778183]], [[0.03137199953198433, 0.03137969598174095], [0.03083980642259121, 0.03173601254820824], [0.0307187270373106, 0.03199375793337822], [0.03031729720532894, 0.03262593224644661]], [[0.03139664605259895, 0.03198783099651337], [0.03134854882955551, 0.032299160957336426], [0.031395602971315384, 0.033140938729047775], [0.02979094348847866, 0.02908225916326046]]], [[[0.03108000010251999, 0.03168174996972084], [0.031100235879421234, 0.03115084394812584], [0.0312507227063179, 0.031540852040052414], [0.03107316978275776, 0.031749702990055084]], [[0.03112880326807499, 0.03155761584639549], [0.030918046832084656, 0.031701233237981796], [0.030843490734696388, 0.03167719021439552], [0.030324121937155724, 0.0316300094127655]], [[0.031587373465299606, 0.03129081800580025], [0.031133320182561874, 0.03152334317564964], [0.03168809786438942, 0.03165533021092415], [0.03135747089982033, 0.03168513998389244]], [[0.031229818239808083, 0.03168436884880066], [0.031002137809991837, 0.031722329556941986], [0.03130635246634483, 0.031684760004282], [0.031463395804166794, 0.02757817879319191]]], [[[0.03080432116985321, 0.03146462142467499], [0.031355828046798706, 0.03139540180563927], [0.031218519434332848, 0.0312686488032341], [0.03146140277385712, 0.031395960599184036]], [[0.031333137303590775, 0.031254902482032776], [0.031280551105737686, 0.031448788940906525], [0.0313376784324646, 0.0313015952706337], [0.03138606250286102, 0.03128112107515335]], [[0.031144285574555397, 0.03141070157289505], [0.030991872772574425, 0.031274985522031784], [0.030961886048316956, 0.03148787468671799], [0.03144081309437752, 0.031142987310886383]], [[0.031290002167224884, 0.03111068159341812], [0.03080933541059494, 0.030690569430589676], [0.03141043335199356, 0.031086774542927742], [0.03126226365566254, 0.031495995819568634]]], [[[0.030837170779705048, 0.036189883947372437], [0.030746731907129288, 0.03082641214132309], [0.03060324676334858, 0.031165866181254387], [0.030544260516762733, 0.029803266748785973]], [[0.03094310127198696, 0.03066748008131981], [0.0308150053024292, 0.036842137575149536], [0.03100808709859848, 0.030431579798460007], [0.030625727027654648, 0.030208293348550797]], [[0.0308038592338562, 0.029830224812030792], [0.03025251068174839, 0.029800914227962494], [0.030794011428952217, 0.037474796175956726], [0.029639746993780136, 0.027296157553792]], [[0.03045455925166607, 0.02877660281956196], [0.030689792707562447, 0.028666401281952858], [0.030625801533460617, 0.027602000162005424], [0.030382489785552025, 0.04465189948678017]]]], reduce_concat=[], layout='raw_weights')
MULTI_CHANNEL_MAX_W_PATH = ['Source', 'Conv3x3_3', 'BatchNorm_3', 'layer_0_stride_1_c_in_256_c_out_128_op_type_ResizablePool', 'layer_0_add_c_out_128_stride_1', 'layer_0_stride_2_c_in_128_c_out_256_op_type_ResizablePool', 'layer_0_add_c_out_256_stride_2', 'layer_1_stride_1_c_in_256_c_out_32_op_type_ResizablePool', 'layer_1_add_c_out_32_stride_1', 'layer_1_stride_2_c_in_32_c_out_256_op_type_ResizablePool', 'layer_1_add_c_out_256_stride_2', 'layer_2_stride_1_c_in_256_c_out_256_op_type_SharpSepConv', 'layer_2_add_c_out_256_stride_1', 'layer_2_stride_2_c_in_256_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_2', 'layer_3_stride_1_c_in_256_c_out_256_op_type_ResizablePool', 'layer_3_add_c_out_256_stride_1', 'layer_3_stride_2_c_in_256_c_out_256_op_type_ResizablePool', 'layer_3_add_c_out_256_stride_2', 'SharpSepConv256', 'add-SharpSep', 'global_pooling', 'Linear']
"""
± export CUDA_VISIBLE_DEVICES="0" && python3 train_search.py --dataset cifar10 --batch_size 48 --save MULTI_CHANNEL_SEARCH_`git rev-parse --short HEAD`_search_weighting_original_darts --init_channels 36 --epochs 120 --cutout --autoaugment --seed 30 --weighting_algorithm scalar --multi_channel
2019_02_24_04_38_04 Search for Model Complete! Save dir: search-20190223-003007-MULTI_CHANNEL_SEARCH_938225a_search_weighting_original_darts-cifar10-PRIMITIVES-OPS-0
2019_02_24_03_56_17 epoch, 117, train_acc, 89.999998, valid_acc, 85.579998, train_loss, 0.294225, valid_loss, 0.424499, lr, 1.289815e-04, best_epoch, 117, best_valid_acc, 85.579998
2019_02_24_03_56_17 genotype =
"""
MULTI_CHANNEL_SCALAR = Genotype(normal=[[[[0.08428546786308289, 0.019649818539619446], [0.029455358162522316, 0.026891281828284264], [0.027873601764440536, 0.026621485128998756], [0.027566319331526756, 0.027208974584937096]], [[0.02464308589696884, 0.02305542305111885], [0.03325537592172623, 0.02161835879087448], [0.036233533173799515, 0.023530790582299232], [0.04525946453213692, 0.02315031923353672]], [[0.05040294677019119, 0.02056518942117691], [0.030842378735542297, 0.024993691593408585], [0.04064971208572388, 0.014905016869306564], [0.07537227869033813, 0.0182951632887125]], [[0.02582853101193905, 0.020147651433944702], [0.027859963476657867, 0.021051088348031044], [0.06657709181308746, 0.017026707530021667], [0.03375856950879097, 0.011425447650253773]]], [[[0.03005879931151867, 0.035663411021232605], [0.025182029232382774, 0.04607615992426872], [0.038091227412223816, 0.038178831338882446], [0.024393698200583458, 0.029284125193953514]], [[0.025640638545155525, 0.03322795405983925], [0.02253263257443905, 0.037514571100473404], [0.0183589868247509, 0.033491283655166626], [0.015858527272939682, 0.04169301316142082]], [[0.02627536468207836, 0.030852915719151497], [0.027742547914385796, 0.05268079787492752], [0.05241338908672333, 0.03677228465676308], [0.03717765957117081, 0.04306963086128235]], [[0.023739686235785484, 0.02377346344292164], [0.018741942942142487, 0.030636483803391457], [0.016585536301136017, 0.033416468650102615], [0.016086628660559654, 0.0347893163561821]]], [[[0.03236594796180725, 0.0732831209897995], [0.05554317310452461, 0.03167788311839104], [0.015518044121563435, 0.03799673169851303], [0.021326560527086258, 0.08206182718276978]], [[0.03787698224186897, 0.026206783950328827], [0.01189108844846487, 0.06706617027521133], [0.010926412418484688, 0.04731278121471405], [0.010900018736720085, 0.0639415979385376]], [[0.028068145737051964, 0.02306116558611393], [0.009883608669042587, 0.022720031440258026], [0.010248321108520031, 0.07221531122922897], [0.011321073397994041, 0.028630713000893593]], [[0.01708007976412773, 0.013192839920520782], [0.008508067578077316, 0.02285042405128479], [0.01724369265139103, 0.02031189575791359], [0.012589368969202042, 0.056180018931627274]]], [[[0.04276231303811073, 0.21309605240821838], [0.022647246718406677, 0.01787652261555195], [0.0067022559233009815, 0.010822849348187447], [0.0047399611212313175, 0.016015738248825073]], [[0.028339920565485954, 0.00891443807631731], [0.004901951644569635, 0.2276517152786255], [0.01379478070884943, 0.007287896703928709], [0.0033110049553215504, 0.011687851510941982]], [[0.018152590841054916, 0.006466378923505545], [0.022874118760228157, 0.011997316963970661], [0.013169433921575546, 0.10174134373664856], [0.006852362770587206, 0.009393713437020779]], [[0.005789881572127342, 0.0029638162814080715], [0.0041101728565990925, 0.0031539236661046743], [0.0014213536633178592, 0.002380270743742585], [0.0011982121504843235, 0.1477825939655304]]]], normal_concat=[], reduce=[[[[0.028326164931058884, 0.032454293221235275], [0.02686143107712269, 0.029518621042370796], [0.03313954547047615, 0.04891181364655495], [0.025960393249988556, 0.042642317712306976]], [[0.018554436042904854, 0.0273482296615839], [0.02059074118733406, 0.03055424988269806], [0.023065906018018723, 0.026168793439865112], [0.017394432798027992, 0.029353225603699684]], [[0.029137184843420982, 0.04715714231133461], [0.02788364700973034, 0.03222033008933067], [0.05128740146756172, 0.02894807606935501], [0.027765892446041107, 0.04117625951766968]], [[0.020344581454992294, 0.05894557386636734], [0.021750222891569138, 0.027226511389017105], [0.022402610629796982, 0.04984541982412338], [0.025405606254935265, 0.027658965438604355]]], [[[0.025873463600873947, 0.05752668157219887], [0.018333958461880684, 0.049362268298864365], [0.015626557171344757, 0.03343683108687401], [0.015159770846366882, 0.037002503871917725]], [[0.028882542625069618, 0.05437920615077019], [0.024060335010290146, 0.044413063675165176], [0.026867222040891647, 0.035240404307842255], [0.023217199370265007, 0.045194290578365326]], [[0.02042427659034729, 0.05703655630350113], [0.021850237622857094, 0.03791709989309311], [0.015018402598798275, 0.04467809945344925], [0.02511998824775219, 0.05425426736474037]], [[0.016492484137415886, 0.03154151141643524], [0.017121894285082817, 0.01935427449643612], [0.014856543391942978, 0.02986033819615841], [0.014744272455573082, 0.04515323415398598]]], [[[0.0597408190369606, 0.09758277982473373], [0.010528073646128178, 0.02998235821723938], [0.010792501270771027, 0.05588904768228531], [0.007236948702484369, 0.03156544268131256]], [[0.017808040603995323, 0.018275177106261253], [0.005358295980840921, 0.20611651241779327], [0.009535307064652443, 0.03866533190011978], [0.0031887770164757967, 0.01161785889416933]], [[0.006274309009313583, 0.01825445331633091], [0.013663525693118572, 0.016340306028723717], [0.007381833158433437, 0.17026008665561676], [0.0030986962374299765, 0.014190435409545898]], [[0.009256887249648571, 0.02237699367105961], [0.00712384469807148, 0.007060194853693247], [0.009489973075687885, 0.018868396058678627], [0.0026137656532227993, 0.05986303836107254]]], [[[0.0009627753752283752, 0.005614960100501776], [0.0009754917700774968, 0.0017817521002143621], [0.0009748890879563987, 0.0016846026992425323], [0.00192651420366019, 0.002251992467790842]], [[0.0012877885019406676, 0.0009805896552279592], [0.0016325361793860793, 0.052085988223552704], [0.0021779246162623167, 0.0018919521244242787], [0.000974901660811156, 0.0026733994018286467]], [[0.002875175792723894, 0.0009731581667438149], [0.0009763489360921085, 0.0015420051058754325], [0.0009765044087544084, 0.21534165740013123], [0.0009751239558681846, 0.002350056543946266]], [[0.015049988403916359, 0.000973944494035095], [0.002387009793892503, 0.0010801200987771153], [0.0009752536425366998, 0.0010066847316920757], [0.0009852066868916154, 0.671653687953949]]]], reduce_concat=[], layout='raw_weights')
MULTI_CHANNEL_SCALAR_PATH = ['Source', 'Conv3x3_0', 'BatchNorm_0', 'layer_0_stride_1_c_in_32_c_out_32_op_type_SharpSepConv', 'layer_0_add_c_out_32_stride_1','layer_0_stride_2_c_in_32_c_out_128_op_type_ResizablePool', 'layer_0_add_c_out_128_stride_2', 'layer_1_stride_1_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_1_add_c_out_128_stride_1', 'layer_1_stride_2_c_in_128_c_out_32_op_type_ResizablePool', 'layer_1_add_c_out_32_stride_2', 'layer_2_stride_1_c_in_32_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_1', 'layer_2_stride_2_c_in_256_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_2', 'layer_3_stride_1_c_in_256_c_out_256_op_type_ResizablePool', 'layer_3_add_c_out_256_stride_1', 'layer_3_stride_2_c_in_256_c_out_256_op_type_ResizablePool', 'layer_3_add_c_out_256_stride_2', 'SharpSepConv256', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_HANDMADE_PATH = ['Source', 'Conv3x3_0', 'BatchNorm_0', 'layer_0_stride_1_c_in_32_c_out_32_op_type_SharpSepConv', 'layer_0_add_c_out_32_stride_1', 'layer_0_stride_2_c_in_32_c_out_64_op_type_ResizablePool', 'layer_0_add_c_out_64_stride_2', 'layer_1_stride_1_c_in_64_c_out_64_op_type_SharpSepConv', 'layer_1_add_c_out_64_stride_1', 'layer_1_stride_2_c_in_64_c_out_128_op_type_ResizablePool', 'layer_1_add_c_out_128_stride_2', 'layer_2_stride_1_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_2_add_c_out_128_stride_1', 'layer_2_stride_2_c_in_128_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_2', 'layer_3_stride_1_c_in_256_c_out_256_op_type_SharpSepConv', 'layer_3_add_c_out_256_stride_1', 'layer_3_stride_2_c_in_256_c_out_256_op_type_ResizablePool', 'layer_3_add_c_out_256_stride_2', 'SharpSepConv256', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_GREEDY_SCALAR_TOP_DOWN = ['Source', 'Conv3x3_0', 'BatchNorm_0', 'layer_0_stride_1_c_in_32_c_out_64_op_type_ResizablePool', 'layer_0_add_c_out_64_stride_1', 'layer_0_stride_2_c_in_64_c_out_64_op_type_ResizablePool', 'layer_0_add_c_out_64_stride_2', 'layer_1_stride_1_c_in_64_c_out_256_op_type_ResizablePool', 'layer_1_add_c_out_256_stride_1', 'layer_1_stride_2_c_in_256_c_out_32_op_type_SharpSepConv', 'layer_1_add_c_out_32_stride_2', 'layer_2_stride_1_c_in_32_c_out_256_op_type_SharpSepConv', 'layer_2_add_c_out_256_stride_1', 'layer_2_stride_2_c_in_256_c_out_32_op_type_SharpSepConv', 'layer_2_add_c_out_32_stride_2', 'layer_3_stride_1_c_in_32_c_out_32_op_type_ResizablePool', 'layer_3_add_c_out_32_stride_1', 'layer_3_stride_2_c_in_32_c_out_64_op_type_ResizablePool', 'layer_3_add_c_out_64_stride_2', 'SharpSepConv64', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_GREEDY_SCALAR_BOTTOM_UP = ['Source', 'Conv3x3_2', 'BatchNorm_2', 'layer_0_stride_1_c_in_128_c_out_256_op_type_SharpSepConv', 'layer_0_add_c_out_256_stride_1', 'layer_0_stride_2_c_in_256_c_out_32_op_type_ResizablePool', 'layer_0_add_c_out_32_stride_2', 'layer_1_stride_1_c_in_32_c_out_32_op_type_ResizablePool', 'layer_1_add_c_out_32_stride_1', 'layer_1_stride_2_c_in_32_c_out_32_op_type_ResizablePool', 'layer_1_add_c_out_32_stride_2', 'layer_2_stride_1_c_in_32_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_1', 'layer_2_stride_2_c_in_256_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_2', 'layer_3_stride_1_c_in_256_c_out_256_op_type_ResizablePool', 'layer_3_add_c_out_256_stride_1', 'layer_3_stride_2_c_in_256_c_out_32_op_type_SharpSepConv', 'layer_3_add_c_out_32_stride_2', 'SharpSepConv32', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_GREEDY_MAX_W_TOP_DOWN = ['Source', 'Conv3x3_0', 'BatchNorm_0', 'layer_0_stride_1_c_in_32_c_out_32_op_type_ResizablePool', 'layer_0_add_c_out_32_stride_1', 'layer_0_stride_2_c_in_32_c_out_128_op_type_SharpSepConv', 'layer_0_add_c_out_128_stride_2', 'layer_1_stride_1_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_1_add_c_out_128_stride_1', 'layer_1_stride_2_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_1_add_c_out_128_stride_2', 'layer_2_stride_1_c_in_128_c_out_64_op_type_SharpSepConv', 'layer_2_add_c_out_64_stride_1', 'layer_2_stride_2_c_in_64_c_out_64_op_type_SharpSepConv', 'layer_2_add_c_out_64_stride_2', 'layer_3_stride_1_c_in_64_c_out_128_op_type_SharpSepConv', 'layer_3_add_c_out_128_stride_1', 'layer_3_stride_2_c_in_128_c_out_128_op_type_ResizablePool', 'layer_3_add_c_out_128_stride_2', 'SharpSepConv128', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_GREEDY_MAX_W_BOTTOM_UP = ['Source', 'Conv3x3_3', 'BatchNorm_3', 'layer_0_stride_1_c_in_256_c_out_128_op_type_ResizablePool', 'layer_0_add_c_out_128_stride_1', 'layer_0_stride_2_c_in_128_c_out_256_op_type_ResizablePool', 'layer_0_add_c_out_256_stride_2', 'layer_1_stride_1_c_in_256_c_out_128_op_type_ResizablePool', 'layer_1_add_c_out_128_stride_1', 'layer_1_stride_2_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_1_add_c_out_128_stride_2', 'layer_2_stride_1_c_in_128_c_out_64_op_type_ResizablePool', 'layer_2_add_c_out_64_stride_1', 'layer_2_stride_2_c_in_64_c_out_64_op_type_ResizablePool', 'layer_2_add_c_out_64_stride_2', 'layer_3_stride_1_c_in_64_c_out_64_op_type_ResizablePool', 'layer_3_add_c_out_64_stride_1', 'layer_3_stride_2_c_in_64_c_out_64_op_type_ResizablePool', 'layer_3_add_c_out_64_stride_2', 'SharpSepConv64', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_RANDOM_PATH = ['Source', 'Conv3x3_1', 'BatchNorm_1', 'layer_0_stride_1_c_in_64_c_out_128_op_type_ResizablePool', 'layer_0_add_c_out_128_stride_1', 'layer_0_stride_2_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_0_add_c_out_128_stride_2', 'layer_1_stride_1_c_in_128_c_out_256_op_type_ResizablePool', 'layer_1_add_c_out_256_stride_1', 'layer_1_stride_2_c_in_256_c_out_256_op_type_SharpSepConv', 'layer_1_add_c_out_256_stride_2', 'layer_2_stride_1_c_in_256_c_out_128_op_type_SharpSepConv', 'layer_2_add_c_out_128_stride_1', 'layer_2_stride_2_c_in_128_c_out_32_op_type_SharpSepConv', 'layer_2_add_c_out_32_stride_2', 'layer_3_stride_1_c_in_32_c_out_64_op_type_ResizablePool', 'layer_3_add_c_out_64_stride_1', 'layer_3_stride_2_c_in_64_c_out_32_op_type_SharpSepConv', 'layer_3_add_c_out_32_stride_2', 'SharpSepConv32', 'add-SharpSep', 'global_pooling', 'Linear']
MULTI_CHANNEL_RANDOM_OPTIMAL = ['Source', 'Conv3x3_1', 'BatchNorm_1', 'layer_0_stride_1_c_in_64_c_out_64_op_type_SharpSepConv', 'layer_0_add_c_out_64_stride_1', 'layer_0_stride_2_c_in_64_c_out_128_op_type_ResizablePool', 'layer_0_add_c_out_128_stride_2', 'layer_1_stride_1_c_in_128_c_out_32_op_type_ResizablePool', 'layer_1_add_c_out_32_stride_1', 'layer_1_stride_2_c_in_32_c_out_256_op_type_SharpSepConv', 'layer_1_add_c_out_256_stride_2', 'layer_2_stride_1_c_in_256_c_out_32_op_type_ResizablePool', 'layer_2_add_c_out_32_stride_1', 'layer_2_stride_2_c_in_32_c_out_256_op_type_ResizablePool', 'layer_2_add_c_out_256_stride_2', 'layer_3_stride_1_c_in_256_c_out_128_op_type_SharpSepConv', 'layer_3_add_c_out_128_stride_1', 'layer_3_stride_2_c_in_128_c_out_128_op_type_SharpSepConv', 'layer_3_add_c_out_128_stride_2', 'SharpSepConv128', 'add-SharpSep', 'global_pooling', 'Linear']
'''
costar@ubuntu|~/src/sharpDARTS/cnn on multi_channel_search!?
± export CUDA_VISIBLE_DEVICES="0" && python3 train_search.py --dataset cifar10 --batch_size 48 --layers_of_cells 8 --layers_in_cells 4 --save max_w_SharpSepConvDARTS_SEARCH_`git rev-parse --short HEAD` --init_channels
16 --epochs 120 --cutout --autoaugment --seed 22 --weighting_algorithm max_w --primitives DARTS_PRIMITIVES
Tensorflow is not installed. Skipping tf related imports
Experiment dir : search-20190321-024555-max_w_SharpSepConvDARTS_SEARCH_5e49783-cifar10-DARTS_PRIMITIVES-OPS-0
2019_03_21_19_11_44 epoch, 47, train_acc, 76.215997, valid_acc, 74.855997, train_loss, 0.687799, valid_loss, 0.734363, lr, 1.580315e-02, best_epoch, 47, best_valid_acc, 74.855997
2019_03_21_19_11_44 genotype =
2019_03_21_19_11_44 alphas_normal = tensor([[0.1134, 0.0945, 0.0894, 0.1007, 0.1466, 0.1334, 0.1964, 0.1256],
[0.1214, 0.0983, 0.1000, 0.1072, 0.1675, 0.1383, 0.1167, 0.1507],
[0.1251, 0.1066, 0.1043, 0.1674, 0.1295, 0.1237, 0.1209, 0.1225],
[0.1238, 0.1066, 0.1054, 0.1108, 0.1331, 0.1418, 0.1145, 0.1641],
[0.1009, 0.0843, 0.0801, 0.0802, 0.2970, 0.1168, 0.1329, 0.1078],
[0.1257, 0.1115, 0.1087, 0.1158, 0.1641, 0.1312, 0.1305, 0.1125],
[0.1662, 0.1154, 0.1152, 0.1194, 0.1234, 0.1248, 0.1222, 0.1134],
[0.1177, 0.0943, 0.1892, 0.0836, 0.1285, 0.1317, 0.1286, 0.1263],
[0.3851, 0.0835, 0.0718, 0.0554, 0.1031, 0.1047, 0.1011, 0.0953],
[0.1249, 0.1119, 0.1096, 0.1156, 0.1284, 0.1177, 0.1157, 0.1762],
[0.1249, 0.1186, 0.1197, 0.1244, 0.1254, 0.1319, 0.1238, 0.1312],
[0.1126, 0.0932, 0.2448, 0.0838, 0.1132, 0.1141, 0.1263, 0.1120],
[0.0791, 0.4590, 0.0665, 0.0518, 0.0843, 0.0804, 0.0846, 0.0944],
[0.0715, 0.0665, 0.4912, 0.0401, 0.0822, 0.0816, 0.0888, 0.0780]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
SHARPSEPCONV_DARTS_MAX_W = Genotype(normal=[('dil_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 2), ('skip_connect', 0), ('avg_pool_3x3', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 4), ('max_pool_3x3', 3)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('dil_conv_5x5', 2), ('sep_conv_5x5', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 2), ('dil_conv_3x3', 0), ('dil_conv_5x5', 3)], reduce_concat=range(2, 6), layout='cell')
2019_03_22_20_40_54 alphas_normal = tensor([[0.0232, 0.0151, 0.0171, 0.0194, 0.0332, 0.0312, 0.8298, 0.0309],
[0.0286, 0.0168, 0.0197, 0.0224, 0.0504, 0.0883, 0.0418, 0.7321],
[0.0877, 0.0575, 0.0670, 0.3865, 0.0951, 0.0988, 0.1257, 0.0818],
[0.0540, 0.0393, 0.0405, 0.0442, 0.0624, 0.0779, 0.0628, 0.6189],
[0.0489, 0.0368, 0.0358, 0.0387, 0.6566, 0.0585, 0.0651, 0.0596],
[0.0855, 0.0645, 0.0748, 0.0851, 0.4185, 0.0954, 0.1091, 0.0671],
[0.5734, 0.0475, 0.0503, 0.0550, 0.0668, 0.0779, 0.0643, 0.0649],
[0.0789, 0.0542, 0.4509, 0.0610, 0.0802, 0.0793, 0.0861, 0.1094],
[0.8564, 0.0183, 0.0183, 0.0173, 0.0212, 0.0231, 0.0226, 0.0227],
[0.0453, 0.0343, 0.0378, 0.0412, 0.0481, 0.0476, 0.0459, 0.6999],
[0.1087, 0.0705, 0.0891, 0.1052, 0.1184, 0.1329, 0.1179, 0.2574],
[0.0525, 0.0375, 0.6463, 0.0456, 0.0520, 0.0581, 0.0550, 0.0530],
[0.0195, 0.8692, 0.0158, 0.0147, 0.0208, 0.0193, 0.0199, 0.0207],
[0.0090, 0.0080, 0.9410, 0.0070, 0.0084, 0.0087, 0.0087, 0.0092]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_03_22_20_40_54 alphas_reduce = tensor([[0.0583, 0.6199, 0.0478, 0.0560, 0.0569, 0.0585, 0.0557, 0.0469],
[0.1099, 0.1485, 0.1486, 0.1488, 0.1488, 0.0791, 0.1487, 0.0677],
[0.1059, 0.0870, 0.0895, 0.1150, 0.1011, 0.2977, 0.1077, 0.0960],
[0.1216, 0.1476, 0.1479, 0.1478, 0.1141, 0.1016, 0.1196, 0.0997],
[0.1176, 0.1342, 0.1314, 0.1998, 0.0986, 0.1033, 0.1004, 0.1146],
[0.1195, 0.1052, 0.1077, 0.1202, 0.1218, 0.1956, 0.1175, 0.1124],
[0.1238, 0.1394, 0.1396, 0.1395, 0.1117, 0.1157, 0.1115, 0.1189],
[0.1234, 0.1338, 0.1321, 0.1505, 0.1169, 0.1140, 0.1139, 0.1152],
[0.1222, 0.1158, 0.1136, 0.1532, 0.1059, 0.1151, 0.1133, 0.1609],
[0.1238, 0.1120, 0.1141, 0.1222, 0.1256, 0.1337, 0.1503, 0.1184],
[0.1248, 0.1335, 0.1334, 0.1336, 0.1166, 0.1221, 0.1178, 0.1184],
[0.1242, 0.1317, 0.1300, 0.1353, 0.1307, 0.1120, 0.1128, 0.1233],
[0.1238, 0.1194, 0.1174, 0.1649, 0.1167, 0.1173, 0.1207, 0.1199],
[0.1192, 0.1132, 0.1116, 0.1128, 0.1130, 0.2097, 0.1126, 0.1079]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_03_22_21_01_53 epoch, 120, train_acc, 89.499997, valid_acc, 83.815997, train_loss, 0.302826, valid_loss, 0.483055, lr, 1.000000e-04, best_epoch, 119, best_valid_acc, 84.159997
Overview ***** best_epoch: 119 best_valid_acc: 84.16 ***** Progress: 100%|| 120/120 [42:15:54<00:00, 1265.52s/it]
2019_03_22_21_01_54 genotype =
2019_03_22_21_01_54 Search for Model Complete! Save dir: search-20190321-024555-max_w_SharpSepConvDARTS_SEARCH_5e49783-cifar10-DARTS_PRIMITIVES-OPS-0
export CUDA_VISIBLE_DEVICES="1" && python3 main_fp16_optimizer.py --autoaugment --auxiliary --cutout --batch_size 128 --epochs 2000 --save flops_SHARPSEPCONV_DARTS_MAX_W_`git rev-parse --short HEAD`_cospower_min_1e-8 --learning_rate 0.025 --learning_rate_min 1e-8 --cutout_length 16 --init_channels 36 --dataset cifar10 --arch SHARPSEPCONV_DARTS_MAX_W --flops
2019_03_27_13_51_30 param size = 2.477062MB
2019_03_27_13_51_30 flops_shape = [1, 3, 32, 32]
2019_03_27_13_51_30 flops = 405.84MMac
Full training run command:
costar@ubuntu|~/src/sharpDARTS/cnn on multi_channel_search!?
± for i in {1..8}; do export CUDA_VISIBLE_DEVICES="0" && python3 train.py --autoaugment --auxiliary --cutout --batch_size 64 --epochs 2000 --save SHARPSEPCONV_DARTS_MAX_W_2k_`git rev-parse --short HEAD`_cospower_min_1e-8 --learning_rate 0.025 --learning_rate_min 1e-8 --cutout_length 16 --init_channels 36 --dataset cifar10 --arch SHARPSEPCONV_DARTS_MAX_W ; done;
Experiment dir : eval-20190327-140904-SHARPSEPCONV_DARTS_MAX_W_2k_c1059c7_cospower_min_1e-8-cifar10-SHARPSEPCONV_DARTS_MAX_W-0
'''
SHARPSEPCONV_DARTS_MAX_W = Genotype(normal=[('dil_conv_3x3', 0), ('dil_conv_5x5', 1), ('sep_conv_3x3', 2), ('dil_conv_5x5', 1), ('avg_pool_3x3', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 4), ('max_pool_3x3', 3)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('sep_conv_5x5', 0), ('skip_connect', 2), ('sep_conv_5x5', 0), ('dil_conv_5x5', 3), ('sep_conv_5x5', 4), ('skip_connect', 3)], reduce_concat=range(2, 6), layout='cell')
"""
± export CUDA_VISIBLE_DEVICES="0" && python3 train_search.py --dataset cifar10 --batch_size 24 --layers_of_cells 8 --layers_in_cells 4 --save max_w_SHARP_DARTS_SEARCH_`git rev-parse --short HEAD` --i
nit_channels 16 --epochs 120 --cutout --autoaugment --seed 23 --weighting_algorithm max_w
Tensorflow is not installed. Skipping tf related imports
Experiment dir : search-20190323-002241-max_w_SHARP_DARTS_SEARCH_e79c097-cifar10-PRIMITIVES-OPS-0
2019_03_23_00_22_42 gpu device = 0
2019_03_23_00_22_42 args = Namespace(arch='PRIMITIVES-OPS', arch_learning_rate=0.0003, arch_weight_decay=0.001, autoaugment=True, batch_size=24, cutout=True, cutout_length=16, data='../data', dataset
='cifar10', drop_path_prob=0.3, epoch_stats_file='search-20190323-002241-max_w_SHARP_DARTS_SEARCH_e79c097-cifar10-PRIMITIVES-OPS-0/eval-epoch-stats-20190323-002241.json', epochs=120, evaluate='', fin
al_path=None, gpu=0, grad_clip=5, init_channels=16, layers_in_cells=4, layers_of_cells=8, learning_rate=0.025, learning_rate_min=0.0001, load='', load_args='', load_genotype=None, log_file_path='sear
ch-20190323-002241-max_w_SHARP_DARTS_SEARCH_e79c097-cifar10-PRIMITIVES-OPS-0/log.txt', lr_power_annealing_exponent_order=2, mid_channels=32, model_path='saved_models', momentum=0.9, multi_channel=Fal
se, no_architect=False, ops='OPS', primitives='PRIMITIVES', random_eraser=False, report_freq=50, save='search-20190323-002241-max_w_SHARP_DARTS_SEARCH_e79c097-cifar10-PRIMITIVES-OPS-0', seed=23, star
t_epoch=1, stats_file='search-20190323-002241-max_w_SHARP_DARTS_SEARCH_e79c097-cifar10-PRIMITIVES-OPS-0/eval-stats-20190323-002241.json', train_portion=0.5, unrolled=False, warmup_epochs=5, weight_de
cay=0.0003, weighting_algorithm='max_w')
2019_03_26_22_47_44 epoch, 116, train_acc, 89.159997, valid_acc, 84.891997, train_loss, 0.309159, valid_loss, 0.457367, lr, 1.515235e-04, best_epoch, 116, best_valid_acc, 84.891997
2019_03_26_22_47_45 genotype = Genotype(normal=[('sep_conv_3x3', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 2), ('dil_flood_conv_3x3', 3), ('flood_conv_3x3', 0), ('dil_flood_conv_3x3', 0), ('flood_conv_3x3', 3)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('dil_choke_conv_3x3', 0), ('dil_flood_conv_3x3', 2), ('dil_conv_3x3', 0), ('skip_connect', 3), ('flood_conv_3x3', 0), ('skip_connect', 3)], reduce_concat=range(2, 6), layout='cell')
2019_03_26_22_47_45 alphas_normal = tensor([[0.0143, 0.0093, 0.0108, 0.8871, 0.0147, 0.0156, 0.0162, 0.0170, 0.0150],
[0.0372, 0.0194, 0.0285, 0.0438, 0.0300, 0.0522, 0.0522, 0.6729, 0.0637],
[0.0251, 0.0151, 0.8027, 0.0271, 0.0251, 0.0254, 0.0278, 0.0270, 0.0247],
[0.0345, 0.0207, 0.0290, 0.0423, 0.0373, 0.7211, 0.0346, 0.0405, 0.0400],
[0.0286, 0.0187, 0.0208, 0.0393, 0.7678, 0.0325, 0.0281, 0.0344, 0.0297],
[0.0093, 0.0072, 0.0078, 0.0091, 0.0092, 0.9293, 0.0093, 0.0094, 0.0094],
[0.0316, 0.0187, 0.0277, 0.7467, 0.0331, 0.0378, 0.0360, 0.0359, 0.0325],
[0.0174, 0.0128, 0.0138, 0.0182, 0.8653, 0.0169, 0.0178, 0.0196, 0.0182],
[0.0056, 0.0047, 0.0046, 0.0056, 0.0055, 0.0057, 0.9572, 0.0057, 0.0055],
[0.0655, 0.0446, 0.0509, 0.0755, 0.0643, 0.0739, 0.4925, 0.0671, 0.0657],
[0.0944, 0.0634, 0.0887, 0.0924, 0.0902, 0.0897, 0.0884, 0.1377, 0.2551],
[0.0962, 0.0696, 0.0673, 0.1256, 0.2202, 0.1166, 0.0974, 0.1173, 0.0898],
[0.0873, 0.0595, 0.0547, 0.0967, 0.1199, 0.2716, 0.1071, 0.1155, 0.0877],
[0.0894, 0.0562, 0.0553, 0.1606, 0.1609, 0.1461, 0.1476, 0.0995, 0.0843]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_03_26_22_47_45 alphas_reduce = tensor([[0.0459, 0.6524, 0.0408, 0.0427, 0.0427, 0.0430, 0.0429, 0.0456, 0.0440],
[0.1075, 0.1086, 0.1400, 0.1091, 0.0917, 0.1211, 0.0922, 0.1397, 0.0902],
[0.0716, 0.0625, 0.0676, 0.0689, 0.0687, 0.0656, 0.0670, 0.0699, 0.4582],
[0.1106, 0.1075, 0.1119, 0.1118, 0.1119, 0.1115, 0.1112, 0.1119, 0.1117],
[0.0874, 0.0745, 0.0705, 0.0915, 0.0785, 0.0846, 0.3297, 0.1040, 0.0791],
[0.1047, 0.0953, 0.1007, 0.1024, 0.1937, 0.1101, 0.0961, 0.1001, 0.0968],
[0.1106, 0.1100, 0.1123, 0.1068, 0.1120, 0.1121, 0.1120, 0.1121, 0.1121],
[0.1099, 0.1003, 0.1048, 0.1257, 0.1073, 0.1093, 0.1094, 0.1255, 0.1078],
[0.1100, 0.0988, 0.1329, 0.1059, 0.1087, 0.1060, 0.0981, 0.1306, 0.1090],
[0.1104, 0.1038, 0.1071, 0.1142, 0.1058, 0.1321, 0.1037, 0.1131, 0.1099],
[0.1107, 0.1110, 0.1120, 0.1117, 0.1087, 0.1117, 0.1108, 0.1117, 0.1117],
[0.1101, 0.1040, 0.1121, 0.1239, 0.1100, 0.1121, 0.1047, 0.1171, 0.1062],
[0.1104, 0.1030, 0.1313, 0.1152, 0.1055, 0.1081, 0.1073, 0.1093, 0.1099],
[0.1105, 0.1001, 0.1220, 0.1197, 0.1094, 0.1140, 0.0999, 0.1195, 0.1049]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_03_27_02_04_35 epoch, 120, train_acc, 89.451997, valid_acc, 84.519997, train_loss, 0.308387, valid_loss, 0.461957, lr, 1.000000e-04, best_epoch, 116, best_valid_acc, 84.891997
Overview ***** best_epoch: 116 best_valid_acc: 84.89 ***** Progress: 100%|| 120/120 [97:41:46<00:00, 2952.55s/it]
2019_03_27_02_04_37 genotype = Genotype(normal=[('sep_conv_3x3', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 2), ('dil_flood_conv_3x3', 3), ('flood_conv_3x3', 0), ('dil_flood_conv_3x3', 0), ('flood_conv_3x3', 3)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('dil_choke_conv_3x3', 0), ('dil_flood_conv_3x3', 2), ('dil_conv_3x3', 0), ('skip_connect', 3), ('flood_conv_3x3', 0), ('skip_connect', 3)], reduce_concat=range(2, 6), layout='cell')
2019_03_27_02_04_37 Search for Model Complete! Save dir: search-20190323-002241-max_w_SHARP_DARTS_SEARCH_e79c097-cifar10-PRIMITIVES-OPS-0
export CUDA_VISIBLE_DEVICES="1" && python3 main_fp16_optimizer.py --autoaugment --auxiliary --cutout --batch_size 2 --epochs 2000 --save flops_SHARP_DARTS_MAX_W_`git rev-parse --short HEAD`_cospower_min_1e-8 --learning_rate 0.025 --learning_rate_min 1e-8 --cutout_length 16 --init_channels 36 --dataset cifar10 --arch SHARP_DARTS_MAX_W --flops
2019_03_27_13_48_41 param size = 5.902558MB
2019_03_27_13_48_41 flops_shape = [1, 3, 32, 32]
2019_03_27_13_48_41 flops = 935.22MMac
Full training run command:
costar@ubuntu|~/src/sharpDARTS/cnn on multi_channel_search!?
± for i in {1..8}; do export CUDA_VISIBLE_DEVICES="1" && python3 train.py --autoaugment --auxiliary --cutout --batch_size 32 --epochs 2000 --save SHARP_DARTS_MAX_W_2k_`git rev-parse --short HEAD`_cospower_min_1e-8 --learning_rate 0.025 --learning_rate_min 1e-8 --cutout_length 16 --init_channels 36 --dataset cifar10 --arch SHARP_DARTS_MAX_W ; done;
Experiment dir : eval-20190327-141933-SHARP_DARTS_MAX_W_2k_c1059c7_cospower_min_1e-8-cifar10-SHARP_DARTS_MAX_W-0
"""
SHARP_DARTS_MAX_W = Genotype(normal=[('sep_conv_3x3', 0), ('choke_conv_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 2), ('dil_flood_conv_3x3', 3), ('flood_conv_3x3', 0), ('dil_flood_conv_3x3', 0), ('flood_conv_3x3', 3)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('dil_choke_conv_3x3', 0), ('dil_flood_conv_3x3', 2), ('dil_conv_3x3', 0), ('skip_connect', 3), ('flood_conv_3x3', 0), ('skip_connect', 3)], reduce_concat=range(2, 6), layout='cell')
"""
ahundt@femur|~/src/darts/cnn on sharper?
± export CUDA_VISIBLE_DEVICES="2" && python3 train_search.py --dataset cifar10 --batch_size 16 --layers_of_cells 8 --layers_in_cells 4 --save SHARPER_SEARCH_`git rev-parse --short HEAD` --init_channels 16 --epochs 120 --cutout --autoaugment --seed 22 --primitives SHARPER_PRIMITIVES
2019_04_09_18_33_45 gpu device = 0
2019_04_09_18_33_45 args = Namespace(arch='SHARPER_PRIMITIVES-OPS', arch_learning_rate=0.0003, arch_weight_decay=0.001, autoaugment=True, batch_size=16, cutout=True, cutout_length=16, data='../data', dataset='cifar10', drop_path_prob=0.3, epoch_stats_file='search-20190409-183345-SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0/eval-epoch-stats-20190409-183345.json', epochs=120, evaluate='', final_path=None, gpu=0, grad_clip=5, init_channels=16, layers_in_cells=4, layers_of_cells=8, learning_rate=0.025, learning_rate_min=0.0001, load='', load_args='', load_genotype=None, log_file_path='search-20190409-183345-SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0/log.txt', lr_power_annealing_exponent_order=2, mid_channels=32, model_path='saved_models', momentum=0.9, multi_channel=False, no_architect=False, ops='OPS', primitives='SHARPER_PRIMITIVES', random_eraser=False, report_freq=50, save='search-20190409-183345-SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0', seed=22, start_epoch=1, stats_file='search-20190409-183345-SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0/eval-stats-20190409-183345.json', train_portion=0.5, unrolled=False, warmup_epochs=5, weight_decay=0.0003, weighting_algorithm='scalar')
2019_04_09_18_33_45 loading op dict: operations.OPS
2019_04_09_18_33_45 loading primitives:genotypes.SHARPER_PRIMITIVES
2019_04_09_18_33_45 primitives: ['none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'sep_conv_7x7', 'dil_conv_3x3', 'dil_conv_5x5', 'flood_conv_3x3', 'flood_conv_5x5', 'dil_flood_conv_3x3']
2019_04_09_18_33_49 param size = 9.707002MB
2019_04_19_14_33_25 alphas_normal = tensor([[0.1108, 0.0556, 0.0575, 0.2509, 0.1022, 0.0461, 0.0347, 0.0274, 0.0378, 0.2145, 0.0266, 0.0359],
[0.3534, 0.0249, 0.0230, 0.0314, 0.1636, 0.0603, 0.0392, 0.0490, 0.0627, 0.1044, 0.0464, 0.0417],
[0.5115, 0.0438, 0.0384, 0.0831, 0.0495, 0.0549, 0.0467, 0.0462, 0.0292, 0.0390, 0.0229, 0.0348],
[0.6162, 0.0238, 0.0217, 0.0320, 0.0882, 0.0679, 0.0205, 0.0213, 0.0237, 0.0291, 0.0289, 0.0267],
[0.7525, 0.0170, 0.0157, 0.0279, 0.0271, 0.0264, 0.0367, 0.0240, 0.0161, 0.0198, 0.0203, 0.0165],
[0.3173, 0.0881, 0.0614, 0.1120, 0.0474, 0.0473, 0.0461, 0.0410, 0.0378, 0.0895, 0.0414, 0.0707],
[0.3855, 0.0335, 0.0304, 0.0456, 0.0678, 0.0496, 0.0579, 0.0441, 0.0467, 0.1161, 0.0841, 0.0389],
[0.5562, 0.0272, 0.0226, 0.0429, 0.0706, 0.0511, 0.0392, 0.0321, 0.0275, 0.0596, 0.0366, 0.0344],
[0.1158, 0.0256, 0.0253, 0.0423, 0.1826, 0.0349, 0.0435, 0.0868, 0.0274, 0.0752, 0.1449, 0.1957],
[0.2988, 0.0673, 0.0460, 0.0676, 0.0678, 0.0567, 0.0483, 0.0704, 0.0604, 0.1230, 0.0485, 0.0452],
[0.3221, 0.0363, 0.0330, 0.0455, 0.0809, 0.0457, 0.0519, 0.0636, 0.0689, 0.1469, 0.0629, 0.0421],
[0.4835, 0.0269, 0.0227, 0.0398, 0.0528, 0.0671, 0.0407, 0.0762, 0.0554, 0.0495, 0.0554, 0.0300],
[0.0593, 0.0200, 0.0193, 0.0318, 0.0606, 0.0445, 0.0292, 0.0412, 0.0520, 0.1620, 0.0341, 0.4460],
[0.0821, 0.0228, 0.0230, 0.0340, 0.1011, 0.0903, 0.0396, 0.1702, 0.0370, 0.1469, 0.0921, 0.1609]], device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_04_19_14_33_25 alphas_reduce = tensor([[0.0628, 0.2237, 0.1198, 0.0752, 0.0712, 0.0598, 0.0775, 0.0537, 0.0651, 0.0707, 0.0613, 0.0594],
[0.0812, 0.1069, 0.1021, 0.1551, 0.0841, 0.0636, 0.0473, 0.0726, 0.0584, 0.0799, 0.0786, 0.0701],
[0.0625, 0.1197, 0.1379, 0.0985, 0.1186, 0.0799, 0.0425, 0.0679, 0.0458, 0.0692, 0.0832, 0.0743],
[0.0708, 0.0994, 0.1058, 0.1389, 0.0632, 0.0556, 0.0569, 0.0937, 0.0654, 0.1025, 0.0836, 0.0641],
[0.0874, 0.0765, 0.0767, 0.1159, 0.0823, 0.1001, 0.0772, 0.0783, 0.0534, 0.1009, 0.0804, 0.0708],
[0.0731, 0.0977, 0.1059, 0.1180, 0.0564, 0.1049, 0.0580, 0.0632, 0.0664, 0.0704, 0.0640, 0.1219],
[0.0816, 0.1009, 0.1261, 0.0929, 0.0817, 0.0604, 0.0824, 0.0925, 0.0606, 0.0622, 0.0848, 0.0740],
[0.0923, 0.0670, 0.0673, 0.0952, 0.1105, 0.0709, 0.0742, 0.0857, 0.1044, 0.0679, 0.0793, 0.0852],
[0.0977, 0.0673, 0.0777, 0.1163, 0.0792, 0.0727, 0.0850, 0.0836, 0.1078, 0.0856, 0.0502, 0.0769],
[0.0722, 0.1031, 0.1275, 0.0822, 0.0937, 0.0941, 0.0848, 0.0808, 0.0673, 0.0681, 0.0698, 0.0565],
[0.0762, 0.1123, 0.1090, 0.0942, 0.0699, 0.0770, 0.0775, 0.0765, 0.0812, 0.0897, 0.0716, 0.0650],
[0.0903, 0.0703, 0.0717, 0.1145, 0.0846, 0.0823, 0.0826, 0.0938, 0.0651, 0.0900, 0.0846, 0.0701],
[0.0935, 0.0614, 0.0651, 0.1099, 0.1085, 0.0799, 0.0833, 0.0786, 0.0622, 0.1417, 0.0515, 0.0644],
[0.1492, 0.0676, 0.0754, 0.1314, 0.0717, 0.1051, 0.0829, 0.0670, 0.0863, 0.0683, 0.0518, 0.0432]], device='cuda:0', grad_fn=<SoftmaxBackward>)
Overview ***** best_epoch: 113 best_valid_acc: 86.48 ***** Progress: 99%|| 119/120 [235:59:34<1:59:19, 7159.36s/itTraceback (most recent call last):91.89, top 5: 98.82 progress: 5%|| 74/1563 [05:08<1:43:31, 4.17s/it]
2019_04_19_16_32_32 epoch, 120, train_acc, 91.084000, valid_acc, 86.732000, train_loss, 0.260623, valid_loss, 0.394877, lr, 1.000000e-04, best_epoch, 120, best_valid_acc, 86.732000
Overview ***** best_epoch: 120 best_valid_acc: 86.73 ***** Progress: 100%|| 120/120 [237:58:43<00:00, 7156.24s/it]
2019_04_19_16_32_34 genotype = Genotype(normal=[('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('dil_flood_conv_3x3', 3), ('flood_conv_3x3', 1), ('dil_flood_conv_3x3', 3), ('dil_conv_3x3', 4)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 1), ('avg_pool_3x3', 0), ('avg_pool_3x3', 1), ('dil_flood_conv_3x3', 0), ('flood_conv_3x3', 3), ('skip_connect', 4)], reduce_concat=range(2, 6), layout='cell')
2019_04_19_16_32_34 Search for Model Complete! Save dir: search-20190409-183345-SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0
2019_04_20_12_52_14 param size = 7.109470MB
2019_04_20_12_52_14 flops_shape = [1, 3, 32, 32]
2019_04_20_12_52_14 flops = 1.1GMac
for i in {1..8}; do export CUDA_VISIBLE_DEVICES="0" && python3 train.py --b 48 --save SHARPER_SCALAR_2k_`git rev-parse --short HEAD` --arch SHARPER_SCALAR --epochs 2000 --cutout --autoaugment --auxiliary ; done;
"""
SHARPER_SCALAR = Genotype(normal=[('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('dil_flood_conv_3x3', 3), ('flood_conv_3x3', 1), ('dil_flood_conv_3x3', 3), ('dil_conv_3x3', 4)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 1), ('avg_pool_3x3', 0), ('avg_pool_3x3', 1), ('dil_flood_conv_3x3', 0), ('flood_conv_3x3', 3), ('skip_connect', 4)], reduce_concat=range(2, 6), layout='cell')
SHARPER_SCALAR_WEIGHTS = Genotype(normal=[[0.1108, 0.0556, 0.0575, 0.2509, 0.1022, 0.0461, 0.0347, 0.0274, 0.0378, 0.2145, 0.0266, 0.0359],
[0.3534, 0.0249, 0.0230, 0.0314, 0.1636, 0.0603, 0.0392, 0.0490, 0.0627, 0.1044, 0.0464, 0.0417],
[0.5115, 0.0438, 0.0384, 0.0831, 0.0495, 0.0549, 0.0467, 0.0462, 0.0292, 0.0390, 0.0229, 0.0348],
[0.6162, 0.0238, 0.0217, 0.0320, 0.0882, 0.0679, 0.0205, 0.0213, 0.0237, 0.0291, 0.0289, 0.0267],
[0.7525, 0.0170, 0.0157, 0.0279, 0.0271, 0.0264, 0.0367, 0.0240, 0.0161, 0.0198, 0.0203, 0.0165],
[0.3173, 0.0881, 0.0614, 0.1120, 0.0474, 0.0473, 0.0461, 0.0410, 0.0378, 0.0895, 0.0414, 0.0707],
[0.3855, 0.0335, 0.0304, 0.0456, 0.0678, 0.0496, 0.0579, 0.0441, 0.0467, 0.1161, 0.0841, 0.0389],
[0.5562, 0.0272, 0.0226, 0.0429, 0.0706, 0.0511, 0.0392, 0.0321, 0.0275, 0.0596, 0.0366, 0.0344],
[0.1158, 0.0256, 0.0253, 0.0423, 0.1826, 0.0349, 0.0435, 0.0868, 0.0274, 0.0752, 0.1449, 0.1957],
[0.2988, 0.0673, 0.0460, 0.0676, 0.0678, 0.0567, 0.0483, 0.0704, 0.0604, 0.1230, 0.0485, 0.0452],
[0.3221, 0.0363, 0.0330, 0.0455, 0.0809, 0.0457, 0.0519, 0.0636, 0.0689, 0.1469, 0.0629, 0.0421],
[0.4835, 0.0269, 0.0227, 0.0398, 0.0528, 0.0671, 0.0407, 0.0762, 0.0554, 0.0495, 0.0554, 0.0300],
[0.0593, 0.0200, 0.0193, 0.0318, 0.0606, 0.0445, 0.0292, 0.0412, 0.0520, 0.1620, 0.0341, 0.4460],
[0.0821, 0.0228, 0.0230, 0.0340, 0.1011, 0.0903, 0.0396, 0.1702, 0.0370, 0.1469, 0.0921, 0.1609]],
reduce=[[0.0628, 0.2237, 0.1198, 0.0752, 0.0712, 0.0598, 0.0775, 0.0537, 0.0651, 0.0707, 0.0613, 0.0594],
[0.0812, 0.1069, 0.1021, 0.1551, 0.0841, 0.0636, 0.0473, 0.0726, 0.0584, 0.0799, 0.0786, 0.0701],
[0.0625, 0.1197, 0.1379, 0.0985, 0.1186, 0.0799, 0.0425, 0.0679, 0.0458, 0.0692, 0.0832, 0.0743],
[0.0708, 0.0994, 0.1058, 0.1389, 0.0632, 0.0556, 0.0569, 0.0937, 0.0654, 0.1025, 0.0836, 0.0641],
[0.0874, 0.0765, 0.0767, 0.1159, 0.0823, 0.1001, 0.0772, 0.0783, 0.0534, 0.1009, 0.0804, 0.0708],
[0.0731, 0.0977, 0.1059, 0.1180, 0.0564, 0.1049, 0.0580, 0.0632, 0.0664, 0.0704, 0.0640, 0.1219],
[0.0816, 0.1009, 0.1261, 0.0929, 0.0817, 0.0604, 0.0824, 0.0925, 0.0606, 0.0622, 0.0848, 0.0740],
[0.0923, 0.0670, 0.0673, 0.0952, 0.1105, 0.0709, 0.0742, 0.0857, 0.1044, 0.0679, 0.0793, 0.0852],
[0.0977, 0.0673, 0.0777, 0.1163, 0.0792, 0.0727, 0.0850, 0.0836, 0.1078, 0.0856, 0.0502, 0.0769],
[0.0722, 0.1031, 0.1275, 0.0822, 0.0937, 0.0941, 0.0848, 0.0808, 0.0673, 0.0681, 0.0698, 0.0565],
[0.0762, 0.1123, 0.1090, 0.0942, 0.0699, 0.0770, 0.0775, 0.0765, 0.0812, 0.0897, 0.0716, 0.0650],
[0.0903, 0.0703, 0.0717, 0.1145, 0.0846, 0.0823, 0.0826, 0.0938, 0.0651, 0.0900, 0.0846, 0.0701],
[0.0935, 0.0614, 0.0651, 0.1099, 0.1085, 0.0799, 0.0833, 0.0786, 0.0622, 0.1417, 0.0515, 0.0644],
[0.1492, 0.0676, 0.0754, 0.1314, 0.0717, 0.1051, 0.0829, 0.0670, 0.0863, 0.0683, 0.0518, 0.0432]], normal_concat=[], reduce_concat=[], layout='raw_weights')
# Retrieved from SHARPER_SCALAR_WEIGHTS by running genotype_extractor.py
SHARPER_SCALAR_genotype_skip_none = Genotype(normal=[('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('dil_flood_conv_3x3', 3), ('flood_conv_3x3', 1), ('dil_flood_conv_3x3', 3), ('dil_conv_3x3', 4)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 1), ('avg_pool_3x3', 0), ('avg_pool_3x3', 1), ('dil_flood_conv_3x3', 0), ('flood_conv_3x3', 3), ('skip_connect', 4)], reduce_concat=range(2, 6), layout='cell')
"""
costar@ubuntu|/media/costar/7d094c19-d61f-48fe-93cb-0f7287e05292/datasets/sharpDARTS/cnn on sharper!?
± for i in {1..8}; do export CUDA_VISIBLE_DEVICES="0" && python3 train.py --autoaugment --auxiliary --cutout --batch_size 64 --epochs 2000 --save SHARPER_SCALAR_genotype_no_hack_2k_`git rev-parse --short HEAD`_cospower_min_1e-8 --learning_rate 0.025 --learning_rate_min 1e-8 --cutout_length 16 --init_channels 36 --dataset cifar10 --arch SHARPER_SCALAR_genotype_no_hack --primitives SHARPER_PRIMITIVES ; done;
Tensorflow is not installed. Skipping tf related imports
Experiment dir : eval-20190515-140449-SHARPER_SCALAR_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_SCALAR_genotype_no_hack-0
2019_05_15_14_04_49 gpu device = 0
2019_05_15_14_04_49 args = Namespace(arch='SHARPER_SCALAR_genotype_no_hack', autoaugment=True, auxiliary=True, auxiliary_weight=0.4, batch_size=64, cutout=True, cutout_length=16, data='../data', dataset='cifar10', drop_path_prob=0.2, epoch_stats_file='eval-20190515-140449-SHARPER_SCALAR_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_SCALAR_genotype_no_hack-0/eval-epoch-stats-20190515-140449.json', epochs=2000, evaluate='', flops=False, gpu=0, grad_clip=5, init_channels=36, layers=20, layers_in_cells=4, layers_of_cells=8, learning_rate=0.025, learning_rate_min=1e-08, load='', load_args='', load_genotype=None, log_file_path='eval-20190515-140449-SHARPER_SCALAR_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_SCALAR_genotype_no_hack-0/log.txt', lr_power_annealing_exponent_order=2, mid_channels=32, mixed_auxiliary=False, model_path='saved_models', momentum=0.9, multi_channel=False, ops='OPS', optimizer='sgd', partial=0.125, primitives='SHARPER_PRIMITIVES', random_eraser=False, report_freq=50, save='eval-20190515-140449-SHARPER_SCALAR_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_SCALAR_genotype_no_hack-0', seed=0, start_epoch=1, stats_file='eval-20190515-140449-SHARPER_SCALAR_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_SCALAR_genotype_no_hack-0/eval-stats-20190515-140449.json', warm_restarts=20, warmup_epochs=5, weight_decay=0.0003, weighting_algorithm='scalar')
2019_05_15_14_04_49 output channels: 10
2019_05_15_14_04_49 loading op dict: operations.OPS
2019_05_15_14_04_49 loading primitives:genotypes.SHARPER_PRIMITIVES
2019_05_15_14_04_49 primitives: ['none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'sep_conv_7x7', 'dil_conv_3x3', 'dil_conv_5x5', 'flood_conv_3x3', 'flood_conv_5x5', 'dil_flood_conv_3x3']
2019_05_15_14_04_51 param size = 3.250846MB
"""
SHARPER_SCALAR_genotype_no_hack = Genotype(normal=[('none', 1), ('skip_connect', 0), ('none', 2), ('none', 1), ('none', 2), ('none', 1), ('none', 2), ('dil_flood_conv_3x3', 3)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('skip_connect', 1), ('skip_connect', 1), ('avg_pool_3x3', 0), ('avg_pool_3x3', 1), ('dil_flood_conv_3x3', 0), ('none', 4), ('flood_conv_3x3', 3)], reduce_concat=range(2, 6), layout='cell')
"""
ahundt@femur|~/src/darts/cnn on sharper?
± export CUDA_VISIBLE_DEVICES="0" && python3 train_search.py --dataset cifar10 --batch_size 16 --layers_of_cells 8 --layers_in_cells 4 --save max_w_SHARPER_SEARCH_`git rev-parse --short HEAD` --init_channels 16 --epochs 120 --cutout --autoaugment --seed 22 --weighting_algorithm max_w --primitives SHARPER_PRIMITIVES
2019_04_09_18_04_03 gpu device = 0
2019_04_09_18_04_03 args = Namespace(arch='SHARPER_PRIMITIVES-OPS', arch_learning_rate=0.0003, arch_weight_decay=0.001, autoaugment=True, batch_size=16, cutout=True, cutout_length=16, data='../data', dataset='cifar10', drop_path_prob=0.3, epoch_stats_file='search-20190409-180403-max_w_SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0/eval-epoch-stats-20190409-180403.json', epochs=120, evaluate='', final_path=None, gpu=0, grad_clip=5, init_channels=16, layers_in_cells=4, layers_of_cells=8, learning_rate=0.025, learning_rate_min=0.0001, load='', load_args='', load_genotype=None, log_file_path='search-20190409-180403-max_w_SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0/log.txt', lr_power_annealing_exponent_order=2, mid_channels=32, model_path='saved_models', momentum=0.9, multi_channel=False, no_architect=False, ops='OPS', primitives='SHARPER_PRIMITIVES', random_eraser=False, report_freq=50, save='search-20190409-180403-max_w_SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0', seed=22, start_epoch=1, stats_file='search-20190409-180403-max_w_SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0/eval-stats-20190409-180403.json', train_portion=0.5, unrolled=False, warmup_epochs=5, weight_decay=0.0003, weighting_algorithm='max_w')
2019_04_09_18_04_03 loading op dict: operations.OPS
2019_04_09_18_04_03 loading primitives:genotypes.SHARPER_PRIMITIVES
2019_04_09_18_04_03 primitives: ['none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'sep_conv_7x7', 'dil_conv_3x3', 'dil_conv_5x5', 'flood_conv_3x3', 'flood_conv_5x5', 'dil_flood_conv_3x3']
2019_04_09_18_04_07 param size = 9.707002MB
2019_04_19_20_07_23 epoch, 119, train_acc, 88.696000, valid_acc, 84.868000, train_loss, 0.327119, valid_loss, 0.456210, lr, 1.032201e-04, best_epoch, 119, best_valid_acc, 84.868000
2019_04_19_20_07_25 genotype = Genotype(normal=[('sep_conv_3x3', 0), ('dil_conv_5x5', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('avg_pool_3x3', 3), ('flood_conv_3x3', 0), ('flood_conv_3x3', 1), ('skip_connect', 2)], normal_concat=range(2, 6), reduce=[('flood_conv_5x5', 0), ('dil_flood_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2), ('sep_conv_3x3', 0), ('skip_connect', 2), ('dil_conv_5x5', 4), ('sep_conv_3x3', 0)], reduce_concat=range(2, 6), layout='cell')
2019_04_19_20_07_25 alphas_normal = tensor([[0.0064, 0.0045, 0.0048, 0.0053, 0.9306, 0.0070, 0.0069, 0.0070, 0.0064, 0.0066, 0.0069, 0.0076],
[0.0527, 0.0296, 0.0307, 0.0398, 0.0742, 0.0773, 0.0469, 0.1153, 0.2443, 0.1075, 0.0677, 0.1140],
[0.0392, 0.5563, 0.0226, 0.0266, 0.0424, 0.0451, 0.0452, 0.0414, 0.0485, 0.0419, 0.0433, 0.0476],
[0.0692, 0.0517, 0.0510, 0.0607, 0.0794, 0.2549, 0.0680, 0.0732, 0.0677, 0.0699, 0.0744, 0.0799],
[0.0454, 0.0421, 0.0343, 0.0414, 0.0426, 0.0447, 0.5136, 0.0453, 0.0458, 0.0522, 0.0464, 0.0460],
[0.0111, 0.0090, 0.0093, 0.0099, 0.0111, 0.0114, 0.0113, 0.0108, 0.0111, 0.8829, 0.0108, 0.0113],
[0.0610, 0.0434, 0.0440, 0.0507, 0.0652, 0.0654, 0.0673, 0.0664, 0.0790, 0.0715, 0.3231, 0.0629],
[0.0512, 0.0389, 0.0340, 0.4399, 0.0558, 0.0542, 0.0536, 0.0563, 0.0582, 0.0515, 0.0535, 0.0529],
[0.0081, 0.0071, 0.9128, 0.0058, 0.0081, 0.0083, 0.0082, 0.0083, 0.0082, 0.0085, 0.0083, 0.0082],
[0.0772, 0.0519, 0.0568, 0.0651, 0.0911, 0.1073, 0.1002, 0.1074, 0.0702, 0.0717, 0.0935, 0.1075],
[0.0779, 0.0605, 0.0629, 0.0704, 0.0858, 0.0788, 0.0745, 0.0790, 0.0771, 0.1685, 0.0820, 0.0824],
[0.0795, 0.0674, 0.0584, 0.1416, 0.0774, 0.0742, 0.0791, 0.0843, 0.0848, 0.0799, 0.0824, 0.0909],
[0.5488, 0.0359, 0.0321, 0.0295, 0.0464, 0.0443, 0.0413, 0.0444, 0.0459, 0.0438, 0.0430, 0.0445],
[0.6040, 0.0317, 0.0297, 0.0233, 0.0423, 0.0379, 0.0389, 0.0354, 0.0369, 0.0379, 0.0445, 0.0376]], device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_04_19_20_07_25 alphas_reduce = tensor([[0.0370, 0.0326, 0.0338, 0.0345, 0.0368, 0.0377, 0.0339, 0.0378, 0.0322, 0.0370, 0.6131, 0.0336],
[0.0800, 0.0891, 0.0890, 0.0891, 0.0888, 0.0890, 0.0672, 0.0886, 0.0890, 0.0880, 0.0530, 0.0892],
[0.0461, 0.0431, 0.0443, 0.0448, 0.5159, 0.0455, 0.0426, 0.0427, 0.0423, 0.0434, 0.0458, 0.0435],
[0.0825, 0.0883, 0.0882, 0.0886, 0.0820, 0.0883, 0.0698, 0.0882, 0.0715, 0.0776, 0.0869, 0.0882],
[0.0795, 0.0849, 0.0849, 0.1471, 0.0828, 0.0768, 0.0816, 0.0687, 0.0692, 0.0897, 0.0749, 0.0598],
[0.0694, 0.0658, 0.0676, 0.0679, 0.2674, 0.0675, 0.0653, 0.0684, 0.0669, 0.0629, 0.0648, 0.0661],
[0.0831, 0.0887, 0.0884, 0.0885, 0.0827, 0.0841, 0.0801, 0.0793, 0.0780, 0.0852, 0.0821, 0.0799],
[0.0795, 0.0818, 0.0828, 0.1563, 0.0836, 0.0785, 0.0753, 0.0717, 0.0760, 0.0711, 0.0712, 0.0721],
[0.0812, 0.0808, 0.0787, 0.1316, 0.0800, 0.0816, 0.0862, 0.0779, 0.0833, 0.0724, 0.0753, 0.0709],
[0.0801, 0.0770, 0.0786, 0.0791, 0.1442, 0.0784, 0.0740, 0.0758, 0.0773, 0.0812, 0.0776, 0.0767],
[0.0832, 0.0874, 0.0869, 0.0881, 0.0839, 0.0808, 0.0828, 0.0830, 0.0818, 0.0830, 0.0753, 0.0838],
[0.0823, 0.0844, 0.0854, 0.1030, 0.0826, 0.0882, 0.0793, 0.0819, 0.0845, 0.0774, 0.0792, 0.0719],
[0.0827, 0.0825, 0.0813, 0.1029, 0.0854, 0.0818, 0.0835, 0.0824, 0.0828, 0.0799, 0.0774, 0.0774],
[0.0799, 0.0759, 0.0747, 0.0774, 0.0808, 0.0749, 0.0827, 0.0802, 0.1554, 0.0747, 0.0713, 0.0722]], device='cuda:0', grad_fn=<SoftmaxBackward>)
2019_04_19_22_09_50 epoch, 120, train_acc, 89.072000, valid_acc, 84.676000, train_loss, 0.310882, valid_loss, 0.459557, lr, 1.000000e-04, best_epoch, 119, best_valid_acc, 84.868000
Overview ***** best_epoch: 119 best_valid_acc: 84.87 ***** Progress: 100%|| 120/120 [244:05:44<00:00, 7367.80s/it]
2019_04_19_22_09_52 genotype = Genotype(normal=[('sep_conv_3x3', 0), ('dil_conv_5x5', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('avg_pool_3x3', 3), ('flood_conv_3x3', 0), ('flood_conv_3x3', 1), ('skip_connect', 2)], normal_concat=range(2, 6), reduce=[('flood_conv_5x5', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2), ('sep_conv_3x3', 0), ('skip_connect', 2), ('dil_conv_5
x5', 4), ('sep_conv_3x3', 0)], reduce_concat=range(2, 6), layout='cell')
2019_04_19_22_09_52 Search for Model Complete! Save dir: search-20190409-180403-max_w_SHARPER_SEARCH_efa1168-cifar10-SHARPER_PRIMITIVES-OPS-0
2019_04_20_12_51_18 param size = 6.087142MB
2019_04_20_12_51_18 flops_shape = [1, 3, 32, 32]
2019_04_20_12_51_18 flops = 950.22MMac
for i in {1..8}; do export CUDA_VISIBLE_DEVICES="2" && python3 train.py --b 64 --save SHARPER_MAX_W_2k_`git rev-parse --short HEAD` --arch SHARPER_MAX_W --epochs 2000 --cutout --autoaugment --auxiliary ; done;
"""
SHARPER_MAX_W = Genotype(normal=[('sep_conv_3x3', 0), ('dil_conv_5x5', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('avg_pool_3x3', 3), ('flood_conv_3x3', 0), ('flood_conv_3x3', 1), ('skip_connect', 2)], normal_concat=range(2, 6), reduce=[('flood_conv_5x5', 0), ('dil_flood_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2), ('sep_conv_3x3', 0), ('skip_connect', 2), ('dil_conv_5x5', 4), ('sep_conv_3x3', 0)], reduce_concat=range(2, 6), layout='cell')
SHARPER_MAX_W_WEIGHTS = Genotype(normal=[[0.0064, 0.0045, 0.0048, 0.0053, 0.9306, 0.0070, 0.0069, 0.0070, 0.0064, 0.0066, 0.0069, 0.0076],
[0.0527, 0.0296, 0.0307, 0.0398, 0.0742, 0.0773, 0.0469, 0.1153, 0.2443, 0.1075, 0.0677, 0.1140],
[0.0392, 0.5563, 0.0226, 0.0266, 0.0424, 0.0451, 0.0452, 0.0414, 0.0485, 0.0419, 0.0433, 0.0476],
[0.0692, 0.0517, 0.0510, 0.0607, 0.0794, 0.2549, 0.0680, 0.0732, 0.0677, 0.0699, 0.0744, 0.0799],
[0.0454, 0.0421, 0.0343, 0.0414, 0.0426, 0.0447, 0.5136, 0.0453, 0.0458, 0.0522, 0.0464, 0.0460],
[0.0111, 0.0090, 0.0093, 0.0099, 0.0111, 0.0114, 0.0113, 0.0108, 0.0111, 0.8829, 0.0108, 0.0113],
[0.0610, 0.0434, 0.0440, 0.0507, 0.0652, 0.0654, 0.0673, 0.0664, 0.0790, 0.0715, 0.3231, 0.0629],
[0.0512, 0.0389, 0.0340, 0.4399, 0.0558, 0.0542, 0.0536, 0.0563, 0.0582, 0.0515, 0.0535, 0.0529],
[0.0081, 0.0071, 0.9128, 0.0058, 0.0081, 0.0083, 0.0082, 0.0083, 0.0082, 0.0085, 0.0083, 0.0082],
[0.0772, 0.0519, 0.0568, 0.0651, 0.0911, 0.1073, 0.1002, 0.1074, 0.0702, 0.0717, 0.0935, 0.1075],
[0.0779, 0.0605, 0.0629, 0.0704, 0.0858, 0.0788, 0.0745, 0.0790, 0.0771, 0.1685, 0.0820, 0.0824],
[0.0795, 0.0674, 0.0584, 0.1416, 0.0774, 0.0742, 0.0791, 0.0843, 0.0848, 0.0799, 0.0824, 0.0909],
[0.5488, 0.0359, 0.0321, 0.0295, 0.0464, 0.0443, 0.0413, 0.0444, 0.0459, 0.0438, 0.0430, 0.0445],
[0.6040, 0.0317, 0.0297, 0.0233, 0.0423, 0.0379, 0.0389, 0.0354, 0.0369, 0.0379, 0.0445, 0.0376]],
reduce=[[0.0370, 0.0326, 0.0338, 0.0345, 0.0368, 0.0377, 0.0339, 0.0378, 0.0322, 0.0370, 0.6131, 0.0336],
[0.0800, 0.0891, 0.0890, 0.0891, 0.0888, 0.0890, 0.0672, 0.0886, 0.0890, 0.0880, 0.0530, 0.0892],
[0.0461, 0.0431, 0.0443, 0.0448, 0.5159, 0.0455, 0.0426, 0.0427, 0.0423, 0.0434, 0.0458, 0.0435],
[0.0825, 0.0883, 0.0882, 0.0886, 0.0820, 0.0883, 0.0698, 0.0882, 0.0715, 0.0776, 0.0869, 0.0882],
[0.0795, 0.0849, 0.0849, 0.1471, 0.0828, 0.0768, 0.0816, 0.0687, 0.0692, 0.0897, 0.0749, 0.0598],
[0.0694, 0.0658, 0.0676, 0.0679, 0.2674, 0.0675, 0.0653, 0.0684, 0.0669, 0.0629, 0.0648, 0.0661],
[0.0831, 0.0887, 0.0884, 0.0885, 0.0827, 0.0841, 0.0801, 0.0793, 0.0780, 0.0852, 0.0821, 0.0799],
[0.0795, 0.0818, 0.0828, 0.1563, 0.0836, 0.0785, 0.0753, 0.0717, 0.0760, 0.0711, 0.0712, 0.0721],
[0.0812, 0.0808, 0.0787, 0.1316, 0.0800, 0.0816, 0.0862, 0.0779, 0.0833, 0.0724, 0.0753, 0.0709],
[0.0801, 0.0770, 0.0786, 0.0791, 0.1442, 0.0784, 0.0740, 0.0758, 0.0773, 0.0812, 0.0776, 0.0767],
[0.0832, 0.0874, 0.0869, 0.0881, 0.0839, 0.0808, 0.0828, 0.0830, 0.0818, 0.0830, 0.0753, 0.0838],
[0.0823, 0.0844, 0.0854, 0.1030, 0.0826, 0.0882, 0.0793, 0.0819, 0.0845, 0.0774, 0.0792, 0.0719],
[0.0827, 0.0825, 0.0813, 0.1029, 0.0854, 0.0818, 0.0835, 0.0824, 0.0828, 0.0799, 0.0774, 0.0774],
[0.0799, 0.0759, 0.0747, 0.0774, 0.0808, 0.0749, 0.0827, 0.0802, 0.1554, 0.0747, 0.0713, 0.0722]], normal_concat=[], reduce_concat=[], layout='raw_weights')
# Retrieved from SHARPER_MAX_W_WEIGHTS by running genotype_extractor.py
SHARPER_MAX_W_genotype_skip_none = Genotype(normal=[('sep_conv_3x3', 0), ('dil_conv_5x5', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('avg_pool_3x3', 3), ('flood_conv_3x3', 0), ('flood_conv_3x3', 1), ('skip_connect', 2)], normal_concat=range(2, 6), reduce=[('flood_conv_5x5', 0), ('dil_flood_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2), ('sep_conv_3x3', 0), ('skip_connect', 2), ('dil_conv_5x5', 4), ('sep_conv_3x3', 0)], reduce_concat=range(2, 6), layout='cell')
"""
costar@ubuntu|/media/costar/7d094c19-d61f-48fe-93cb-0f7287e05292/datasets/sharpDARTS/cnn on sharper!?
± for i in {1..8}; do export CUDA_VISIBLE_DEVICES="1" && python3 train.py --autoaugment --auxiliary --cutout --batch_size 48 --epochs 2000 --save SHARPER_MAX_W_genotype_no_hack_2k_`git rev-parse --short HEAD`_cospower_min_1e-8 --learning_rate 0.025 --learning_rate_min 1e-8 --cutout_length 16 --init_channels 36 --dataset cifar10 --arch SHARPER_MAX_W_genotype_no_hack --primitives SHARPER_PRIMITIVES ; done;
Tensorflow is not installed. Skipping tf related imports
Experiment dir : eval-20190515-142614-SHARPER_MAX_W_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_MAX_W_genotype_no_hack-0
2019_05_15_14_26_14 gpu device = 0
2019_05_15_14_26_14 args = Namespace(arch='SHARPER_MAX_W_genotype_no_hack', autoaugment=True, auxiliary=True, auxiliary_weight=0.4, batch_size=48, cutout=True, cutout_length=16, data='../data', dataset='cifar10', drop_path_prob=0.2, epoch_stats_file='eval-20190515-142614-SHARPER_MAX_W_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_MAX_W_genotype_no_hack-0/eval-epoch-stats-20190515-142614.json', epochs=2000, evaluate='', flops=False, gpu=0, grad_clip=5, init_channels=36, layers=20, layers_in_cells=4, layers_of_cells=8, learning_rate=0.025, learning_rate_min=1e-08, load='', load_args='', load_genotype=None, log_file_path='eval-20190515-142614-SHARPER_MAX_W_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_MAX_W_genotype_no_hack-0/log.txt', lr_power_annealing_exponent_order=2, mid_channels=32, mixed_auxiliary=False, model_path='saved_models', momentum=0.9, multi_channel=False, ops='OPS', optimizer='sgd', partial=0.125, primitives='SHARPER_PRIMITIVES', random_eraser=False, report_freq=50, save='eval-20190515-142614-SHARPER_MAX_W_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_MAX_W_genotype_no_hack-0', seed=0, start_epoch=1, stats_file='eval-20190515-142614-SHARPER_MAX_W_genotype_no_hack_2k_b374f37_cospower_min_1e-8-cifar10-SHARPER_MAX_W_genotype_no_hack-0/eval-stats-20190515-142614.json', warm_restarts=20, warmup_epochs=5, weight_decay=0.0003, weighting_algorithm='scalar')
2019_05_15_14_26_14 output channels: 10
2019_05_15_14_26_14 loading op dict: operations.OPS
2019_05_15_14_26_14 loading primitives:genotypes.SHARPER_PRIMITIVES
2019_05_15_14_26_14 primitives: ['none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'sep_conv_7x7', 'dil_conv_3x3', 'dil_conv_5x5', 'flood_conv_3x3', 'flood_conv_5x5', 'dil_flood_conv_3x3']
2019_05_15_14_26_17 param size = 4.697614MB
"""
SHARPER_MAX_W_genotype_no_hack = Genotype(normal=[('sep_conv_3x3', 0), ('dil_conv_5x5', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('avg_pool_3x3', 3), ('flood_conv_3x3', 0), ('none', 4), ('none', 3)], normal_concat=range(2, 6), reduce=[('flood_conv_5x5', 0), ('dil_flood_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2), ('sep_conv_3x3', 0), ('skip_connect', 2), ('dil_conv_5x5', 4), ('sep_conv_3x3', 0)], reduce_concat=range(2, 6), layout='cell') | 81,512 | 126.165367 | 5,998 | py |
sharpDARTS | sharpDARTS-master/cnn/operations.py | import torch
import torch.nn as nn
# Simplified new version based on actual results, partially adapted from PNASNet https://github.com/chenxi116/PNASNet.pytorch
OPS = {
'none': lambda C_in, C_out, stride, affine, C_mid=None: Zero(stride),
'avg_pool_3x3': lambda C_in, C_out, stride, affine, C_mid=None: ResizablePool(C_in, C_out, 3, stride, padding=1, affine=affine, pool_type=nn.AvgPool2d),
'max_pool_3x3': lambda C_in, C_out, stride, affine, C_mid=None: ResizablePool(C_in, C_out, 3, stride, padding=1, affine=affine),
'skip_connect': lambda C_in, C_out, stride, affine, C_mid=None: Identity() if stride == 1 else FactorizedReduce(C_in, C_out, 1, stride, 0, affine=affine),
'sep_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 3, stride, padding=1, affine=affine),
'sep_conv_5x5': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 5, stride, padding=2, affine=affine),
'flood_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 3, stride, padding=2, affine=affine, C_mid_mult=4),
'flood_conv_5x5': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 5, stride, padding=2, affine=affine, C_mid_mult=4),
'sep_conv_7x7': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 7, stride, padding=3, affine=affine),
'dil_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 3, stride, padding=2, dilation=2, affine=affine),
'dil_conv_5x5': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 5, stride, padding=4, dilation=2, affine=affine),
'conv_7x1_1x7': lambda C_in, C_out, stride, affine, C_mid=None: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, (1, 7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C_in, C_out, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C_out, eps=1e-3, affine=affine)
),
'flood_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 3, stride, padding=1, affine=affine, C_mid_mult=4),
'dil_flood_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 3, stride, padding=2, dilation=2, affine=affine, C_mid_mult=4),
'dil_flood_conv_5x5': lambda C_in, C_out, stride, affine, C_mid=None: SharpSepConv(C_in, C_out, 5, stride, padding=2, dilation=2, affine=affine, C_mid_mult=4),
'choke_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=32: SharpSepConv(C_in, C_out, 3, stride, padding=1, affine=affine, C_mid=C_mid),
'dil_choke_conv_3x3': lambda C_in, C_out, stride, affine, C_mid=32: SharpSepConv(C_in, C_out, 3, stride, padding=2, dilation=2, affine=affine, C_mid=C_mid),
}
# Old Version from original DARTS paper
DARTS_OPS = {
'none': lambda C, C_out, stride, affine, C_mid=None: Zero(stride),
'avg_pool_3x3': lambda C, C_out, stride, affine, C_mid=None: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3': lambda C, C_out, stride, affine, C_mid=None: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect': lambda C, C_out, stride, affine, C_mid=None: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3': lambda C, C_out, stride, affine, C_mid=None: SharpSepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5': lambda C, C_out, stride, affine, C_mid=None: SharpSepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7': lambda C, C_out, stride, affine, C_mid=None: SharpSepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda C, C_out, stride, affine, C_mid=None: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5': lambda C, C_out, stride, affine, C_mid=None: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7': lambda C, C_out, stride, affine, C_mid=None: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
'nor_conv_3x3': lambda C, C_out, stride, affine, C_mid=None: ConvBNReLU(C, C, 3, stride, 1, affine=affine),
'nor_conv_5x5': lambda C, C_out, stride, affine, C_mid=None: ConvBNReLU(C, C, 5, stride, 2, affine=affine),
'nor_conv_7x7': lambda C, C_out, stride, affine, C_mid=None: ConvBNReLU(C, C, 7, stride, 3, affine=affine),
}
MULTICHANNELNET_OPS = {
'ResizablePool': lambda C_in, C_out, stride, C_mid=None: ResizablePool(C_in, C_out, 3, stride, padding=1, affine=True),
'SharpSepConv': lambda C_in, C_out, stride, C_mid=None: SharpSepConv(C_in, C_out, 3, stride, padding=1, affine=True),
}
class ResizablePool(nn.Module):
def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=1, affine=True, pool_type=nn.MaxPool2d):
super(ResizablePool, self).__init__()
if C_in == C_out:
self.op = pool_type(kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.op = nn.Sequential(
pool_type(kernel_size=kernel_size, stride=stride, padding=padding),
nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, eps=1e-3, affine=affine)
)
def forward(self, x):
return self.op(x)
class ConvBNReLU(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ConvBNReLU, self).__init__()
self.op = nn.Sequential(
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
nn.ReLU(inplace=False)
)
def forward(self, x):
return self.op(x)
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size=1, stride=1, padding=None, dilation=1, affine=True):
super(SepConv, self).__init__()
if padding is None:
padding = (kernel_size-1)//2
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SharpSepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=1, dilation=1, affine=True, C_mid_mult=1, C_mid=None):
super(SharpSepConv, self).__init__()
if C_mid is not None:
c_mid = C_mid
else:
c_mid = int(C_out * C_mid_mult)
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, c_mid, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(c_mid, affine=affine),
nn.ReLU(inplace=False),
# Padding is set based on the kernel size for this convolution which is always stride 1 and not dilated
# https://pytorch.org/docs/stable/nn.html#conv2d
# H_out = (((H_in + (2*padding) − dilation * (kernel_size − 1) − 1 ) / stride) + 1)
nn.Conv2d(c_mid, c_mid, kernel_size=kernel_size, stride=1, padding=(kernel_size-1)//2, dilation=1, groups=c_mid, bias=False),
nn.Conv2d(c_mid, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, kernel_size=1, stride=2, padding=0, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, kernel_size, stride=stride, padding=padding, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.conv_1(x), self.conv_2(y[:,:,1:,1:])], dim=1)
out = self.bn(out)
return out
| 9,428 | 46.38191 | 161 | py |
sharpDARTS | sharpDARTS-master/cnn/genotype_extractor.py | '''
This file includes code from the DARTS and sharpDARTS https://arxiv.org/abs/1903.09900 papers.
'''
import numpy as np
import genotypes
def parse_cell(weights, primitives, steps=4, skip_primitive='none'):
""" Take a weight array and turn it into a list of pairs (primitive_string, node_index).
"""
gene = []
n = 2
start = 0
for add_node_index in range(steps):
# Each step is a separate "add node" in the graph, so i is the integer index of the current node.
# A better name for i might be add_node_index.
end = start + n
# Only look at the weights relevant to this node.
# "Nodes" 0 and 1 will always be the output of the previous cells.
#
# All other nodes will be add nodes which need edges connecting back to the previous nodes:
# add node 0 will need 2: rows 0, 1
# add node 1 will need 3: rows 2, 3, 4
# add node 2 will need 4: rows 5, 6, 7, 8
# add node 3 will need 5: rows 9, 10, 11, 12, 13
# ...and so on if there are more than 4 nodes.
W = weights[start:end].copy()
# print('add_node_index: ' + str(add_node_index) + ' start: ' + str(start) + ' end: ' + str(end) + ' W shape: ' + str(W.shape))
# Each row in the weights is a separate edge, and each column are the possible primitives that edge might use.
# The first "add node" can connect back to the two previous cells, which is why the edges are i + 2.
# The sorted function orders lists from lowest to highest, so we use -max in the lambda function to sort from highest to lowest.
# We currently say there will only be two edges connecting to each node, which is why there is [:2], to select the two highest score edges.
# Each later nodes can connect back to the previous cells or an internal node, so the range(i+2) of possible connections increases.
pre_edges = sorted(range(add_node_index + 2),
key=lambda x: -max(W[x][k] for k in range(len(W[x])) if skip_primitive is None or k != primitives.index(skip_primitive)))
edges = pre_edges[:2]
# print('edges: ' + str(edges))
# We've now selected the two edges we will use for this node, so next let's select the layer primitives.
# Each edge needs a particular primitive, so go through all the edges and compare all the possible primitives.
for j in edges:
k_best = None
# note: This probably could be simpler via argmax...
# Loop through all the columns to find the highest score primitive for the chosen edge, excluding none.
for k in range(len(W[j])):
if skip_primitive is None or k != primitives.index(skip_primitive):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
# Once the best primitive is chosen, create the new gene element, which is the
# string for the name of the primitive, and the index of the previous node to connect to,
gene.append((primitives[k_best], j))
start = end
n += 1
# Return the full list of (node, primitive) pairs for this set of weights.
return gene
def genotype_cell(alphas_normal, alphas_reduce, primitives, steps=4, multiplier=4, skip_primitive='none'):
# skip_primitive = 'none' is a hack in original DARTS, which removes a no-op primitive.
# skip_primitive = None means no hack is applied
# note the printed weights from a call to Network::arch_weights() in model_search.py
# are already post-softmax, so we don't need to apply softmax again
# alphas_normal = torch.FloatTensor(genotypes.SHARPER_SCALAR_WEIGHTS.normal)
# alphas_reduce = torch.FloatTensor(genotypes.SHARPER_SCALAR_WEIGHTS.reduce)
# F.softmax(alphas_normal, dim=-1).data.cpu().numpy()
# F.softmax(alphas_reduce, dim=-1).data.cpu().numpy()
gene_normal = parse_cell(alphas_normal, primitives, steps, skip_primitive=skip_primitive)
gene_reduce = parse_cell(alphas_reduce, primitives, steps, skip_primitive=skip_primitive)
concat = range(2+steps-multiplier, steps+2)
genotype = genotypes.Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat,
layout='cell',
)
return genotype
def main():
'''
Parse raw weights with the hack that excludes the 'none' primitive, and without that hack.
Then print out the final genotypes.
'''
# Set these variables to the ones you're using in this case from genotypes.py
skip_primitive = None
raw_weights_genotype = genotypes.SHARPER_SCALAR_WEIGHTS
primitives = genotypes.SHARPER_PRIMITIVES
# get the normal and reduce weights as a numpy array
alphas_normal = np.array(raw_weights_genotype.normal)
alphas_reduce = np.array(raw_weights_genotype.reduce)
# for steps, see layers_in_cells in train_search.py
steps = 4
# for multiplier, see multiplier for Network class in model_search.py
multiplier = 4
# note the printed weights from a call to Network::arch_weights() in model_search.py
# are already post-softmax, so we don't need to apply softmax again
# alphas_normal = torch.FloatTensor(genotypes.SHARPER_SCALAR_WEIGHTS.normal)
# alphas_reduce = torch.FloatTensor(genotypes.SHARPER_SCALAR_WEIGHTS.reduce)
# F.softmax(alphas_normal, dim=-1).data.cpu().numpy()
# F.softmax(alphas_reduce, dim=-1).data.cpu().numpy()
# skip_primitive = 'none' is a hack in original DARTS, which removes a no-op primitive.
# skip_primitive = None means no hack is applied
print('#################')
genotype = genotype_cell(alphas_normal, alphas_reduce, primitives, steps=4, multiplier=4, skip_primitive='none')
print('SHARPER_SCALAR_genotype_skip_none = ' + str(genotype))
genotype = genotype_cell(alphas_normal, alphas_reduce, primitives, steps=4, multiplier=4, skip_primitive=None)
print('SHARPER_SCALAR_genotype_no_hack = ' + str(genotype))
# Set these variables to the ones you're using in this case from genotypes.py
skip_primitive = None
raw_weights_genotype = genotypes.SHARPER_MAX_W_WEIGHTS
primitives = genotypes.SHARPER_PRIMITIVES
# get the normal and reduce weights as a numpy array
alphas_normal = np.array(raw_weights_genotype.normal)
alphas_reduce = np.array(raw_weights_genotype.reduce)
# for steps, see layers_in_cells in train_search.py
steps = 4
# for multiplier, see multiplier for Network class in model_search.py
multiplier = 4
# note the printed weights from a call to Network::arch_weights() in model_search.py
# are already post-softmax, so we don't need to apply softmax again
# alphas_normal = torch.FloatTensor(genotypes.SHARPER_SCALAR_WEIGHTS.normal)
# alphas_reduce = torch.FloatTensor(genotypes.SHARPER_SCALAR_WEIGHTS.reduce)
# F.softmax(alphas_normal, dim=-1).data.cpu().numpy()
# F.softmax(alphas_reduce, dim=-1).data.cpu().numpy()
# skip_primitive = 'none' is a hack in original DARTS, which removes a no-op primitive.
# skip_primitive = None means no hack is applied
print('#################')
genotype = genotype_cell(alphas_normal, alphas_reduce, primitives, steps=4, multiplier=4, skip_primitive='none')
print('SHARPER_MAX_W_genotype_skip_none = ' + str(genotype))
genotype = genotype_cell(alphas_normal, alphas_reduce, primitives, steps=4, multiplier=4, skip_primitive=None)
print('SHARPER_MAX_W_genotype_no_hack = ' + str(genotype))
if __name__ == '__main__':
main()
| 7,601 | 54.086957 | 148 | py |
sharpDARTS | sharpDARTS-master/rnn/test.py | import argparse
import os, sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import data
import model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=850,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=850,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=850,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.2,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1267,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--model_path', type=str, default='EXP/model.pt',
help='path to load the pretrained model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--n_experts', type=int, default=1,
help='number of experts')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
args = parser.parse_args()
def logging(s, print_=True, log_=True):
print(s)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
test_batch_size = 1
test_data = batchify(corpus.test, test_batch_size, args)
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
print(i, data_source.size(0)-1)
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# Load the best saved model.
model = torch.load(args.model_path)
total_params = sum(x.data.nelement() for x in model.parameters())
logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params))
parallel_model = model.cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging('=' * 89)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 89)
| 5,048 | 40.385246 | 118 | py |
sharpDARTS | sharpDARTS-master/rnn/architect.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def _clip(grads, max_norm):
total_norm = 0
for g in grads:
param_norm = g.data.norm(2)
total_norm += param_norm ** 2
total_norm = total_norm ** 0.5
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grads:
g.data.mul_(clip_coef)
return clip_coef
class Architect(object):
def __init__(self, model, args):
self.network_weight_decay = args.wdecay
self.network_clip = args.clip
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(), lr=args.arch_lr, weight_decay=args.arch_wdecay)
def _compute_unrolled_model(self, hidden, input, target, eta):
loss, hidden_next = self.model._loss(hidden, input, target)
theta = _concat(self.model.parameters()).data
grads = torch.autograd.grad(loss, self.model.parameters())
clip_coef = _clip(grads, self.network_clip)
dtheta = _concat(grads).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(eta, dtheta))
return unrolled_model, clip_coef
def step(self,
hidden_train, input_train, target_train,
hidden_valid, input_valid, target_valid,
network_optimizer, unrolled):
eta = network_optimizer.param_groups[0]['lr']
self.optimizer.zero_grad()
if unrolled:
hidden = self._backward_step_unrolled(hidden_train, input_train, target_train, hidden_valid, input_valid, target_valid, eta)
else:
hidden = self._backward_step(hidden_valid, input_valid, target_valid)
self.optimizer.step()
return hidden, None
def _backward_step(self, hidden, input, target):
loss, hidden_next = self.model._loss(hidden, input, target)
loss.backward()
return hidden_next
def _backward_step_unrolled(self,
hidden_train, input_train, target_train,
hidden_valid, input_valid, target_valid, eta):
unrolled_model, clip_coef = self._compute_unrolled_model(hidden_train, input_train, target_train, eta)
unrolled_loss, hidden_next = unrolled_model._loss(hidden_valid, input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
dtheta = [v.grad for v in unrolled_model.parameters()]
_clip(dtheta, self.network_clip)
vector = [dt.data for dt in dtheta]
implicit_grads = self._hessian_vector_product(vector, hidden_train, input_train, target_train, r=1e-2)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta * clip_coef, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
return hidden_next
def _construct_model_from_theta(self, theta):
model_new = self.model.new()
model_dict = self.model.state_dict()
params, offset = {}, 0
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, hidden, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
loss, _ = self.model._loss(hidden, input, target)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.sub_(2*R, v)
loss, _ = self.model._loss(hidden, input, target)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
| 4,003 | 34.122807 | 132 | py |
sharpDARTS | sharpDARTS-master/rnn/utils.py | import torch
import torch.nn as nn
import os, shutil
import numpy as np
from torch.autograd import Variable
def repackage_hidden(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
print(data.size())
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
target = Variable(source[i+1:i+1+seq_len])
return data, target
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def save_checkpoint(model, optimizer, epoch, path, finetune=False):
if finetune:
torch.save(model, os.path.join(path, 'finetune_model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'finetune_optimizer.pt'))
else:
torch.save(model, os.path.join(path, 'model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer.pt'))
torch.save({'epoch': epoch+1}, os.path.join(path, 'misc.pt'))
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = embed._backend.Embedding.apply(words, masked_embed_weight,
padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
class LockedDropout(nn.Module):
def __init__(self):
super(LockedDropout, self).__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = Variable(m.div_(1 - dropout), requires_grad=False)
mask = mask.expand_as(x)
return mask * x
def mask2d(B, D, keep_prob, cuda=True):
m = torch.floor(torch.rand(B, D) + keep_prob) / keep_prob
m = Variable(m, requires_grad=False)
if cuda:
m = m.cuda()
return m
| 2,955 | 30.446809 | 137 | py |
sharpDARTS | sharpDARTS-master/rnn/model.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from genotypes import STEPS
from utils import mask2d
from utils import LockedDropout
from utils import embedded_dropout
from torch.autograd import Variable
INITRANGE = 0.04
class DARTSCell(nn.Module):
def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):
super(DARTSCell, self).__init__()
self.nhid = nhid
self.dropouth = dropouth
self.dropoutx = dropoutx
self.genotype = genotype
# genotype is None when doing arch search
steps = len(self.genotype.recurrent) if self.genotype is not None else STEPS
self._W0 = nn.Parameter(torch.Tensor(ninp+nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE))
self._Ws = nn.ParameterList([
nn.Parameter(torch.Tensor(nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE)) for i in range(steps)
])
def forward(self, inputs, hidden):
T, B = inputs.size(0), inputs.size(1)
if self.training:
x_mask = mask2d(B, inputs.size(2), keep_prob=1.-self.dropoutx)
h_mask = mask2d(B, hidden.size(2), keep_prob=1.-self.dropouth)
else:
x_mask = h_mask = None
hidden = hidden[0]
hiddens = []
for t in range(T):
hidden = self.cell(inputs[t], hidden, x_mask, h_mask)
hiddens.append(hidden)
hiddens = torch.stack(hiddens)
return hiddens, hiddens[-1].unsqueeze(0)
def _compute_init_state(self, x, h_prev, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h_prev], dim=-1)
c0, h0 = torch.split(xh_prev.mm(self._W0), self.nhid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h_prev + c0 * (h0-h_prev)
return s0
def _get_activation(self, name):
if name == 'tanh':
f = F.tanh
elif name == 'relu':
f = F.relu
elif name == 'sigmoid':
f = F.sigmoid
elif name == 'identity':
f = lambda x: x
else:
raise NotImplementedError
return f
def cell(self, x, h_prev, x_mask, h_mask):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
states = [s0]
for i, (name, pred) in enumerate(self.genotype.recurrent):
s_prev = states[pred]
if self.training:
ch = (s_prev * h_mask).mm(self._Ws[i])
else:
ch = s_prev.mm(self._Ws[i])
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
fn = self._get_activation(name)
h = fn(h)
s = s_prev + c * (h-s_prev)
states += [s]
output = torch.mean(torch.stack([states[i] for i in self.genotype.concat], -1), -1)
return output
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nhidlast,
dropout=0.5, dropouth=0.5, dropoutx=0.5, dropouti=0.5, dropoute=0.1,
cell_cls=DARTSCell, genotype=None):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
assert ninp == nhid == nhidlast
if cell_cls == DARTSCell:
assert genotype is not None
self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx, genotype)]
else:
assert genotype is None
self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx)]
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(ninp, ntoken)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.dropout = dropout
self.dropouti = dropouti
self.dropoute = dropoute
self.ntoken = ntoken
self.cell_cls = cell_cls
def init_weights(self):
self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)
def forward(self, input, hidden, return_h=False):
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
logit = self.decoder(output.view(-1, self.ninp))
log_prob = nn.functional.log_softmax(logit, dim=-1)
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.ntoken)
if return_h:
return model_output, hidden, raw_outputs, outputs
return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return [Variable(weight.new(1, bsz, self.nhid).zero_())]
| 5,148 | 30.981366 | 102 | py |
sharpDARTS | sharpDARTS-master/rnn/data.py | import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class SentCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
sents = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if not line:
continue
words = line.split() + ['<eos>']
sent = torch.LongTensor(len(words))
for i, word in enumerate(words):
sent[i] = self.dictionary.word2idx[word]
sents.append(sent)
return sents
class BatchSentLoader(object):
def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):
self.sents = sents
self.batch_size = batch_size
self.sort_sents = sorted(sents, key=lambda x: x.size(0))
self.cuda = cuda
self.volatile = volatile
self.pad_id = pad_id
def __next__(self):
if self.idx >= len(self.sort_sents):
raise StopIteration
batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)
batch = self.sort_sents[self.idx:self.idx+batch_size]
max_len = max([s.size(0) for s in batch])
tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id)
for i in range(len(batch)):
s = batch[i]
tensor[:s.size(0),i].copy_(s)
if self.cuda:
tensor = tensor.cuda()
self.idx += batch_size
return tensor
next = __next__
def __iter__(self):
self.idx = 0
return self
if __name__ == '__main__':
corpus = SentCorpus('../penn')
loader = BatchSentLoader(corpus.test, 10)
for i, d in enumerate(loader):
print(i, d.size())
| 4,005 | 30.054264 | 80 | py |
sharpDARTS | sharpDARTS-master/rnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from genotypes import PRIMITIVES, STEPS, CONCAT, Genotype
from torch.autograd import Variable
from collections import namedtuple
from model import DARTSCell, RNNModel
class DARTSCellSearch(DARTSCell):
def __init__(self, ninp, nhid, dropouth, dropoutx):
super(DARTSCellSearch, self).__init__(ninp, nhid, dropouth, dropoutx, genotype=None)
self.bn = nn.BatchNorm1d(nhid, affine=False)
def cell(self, x, h_prev, x_mask, h_mask):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
s0 = self.bn(s0)
probs = F.softmax(self.weights, dim=-1)
offset = 0
states = s0.unsqueeze(0)
for i in range(STEPS):
if self.training:
masked_states = states * h_mask.unsqueeze(0)
else:
masked_states = states
ch = masked_states.view(-1, self.nhid).mm(self._Ws[i]).view(i+1, -1, 2*self.nhid)
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
s = torch.zeros_like(s0)
for k, name in enumerate(PRIMITIVES):
if name == 'none':
continue
fn = self._get_activation(name)
unweighted = states + c * (fn(h) - states)
s += torch.sum(probs[offset:offset+i+1, k].unsqueeze(-1).unsqueeze(-1) * unweighted, dim=0)
s = self.bn(s)
states = torch.cat([states, s.unsqueeze(0)], 0)
offset += i+1
output = torch.mean(states[-CONCAT:], dim=0)
return output
class RNNModelSearch(RNNModel):
def __init__(self, *args):
super(RNNModelSearch, self).__init__(*args, cell_cls=DARTSCellSearch, genotype=None)
self._args = args
self._initialize_arch_parameters()
def new(self):
model_new = RNNModelSearch(*self._args)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def _initialize_arch_parameters(self):
k = sum(i for i in range(1, STEPS+1))
weights_data = torch.randn(k, len(PRIMITIVES)).mul_(1e-3)
self.weights = Variable(weights_data.cuda(), requires_grad=True)
self._arch_parameters = [self.weights]
for rnn in self.rnns:
rnn.weights = self.weights
def arch_parameters(self):
return self._arch_parameters
def _loss(self, hidden, input, target):
log_prob, hidden_next = self(input, hidden, return_h=False)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), target)
return loss, hidden_next
def genotype(self):
def _parse(probs):
gene = []
start = 0
for i in range(STEPS):
end = start + i + 1
W = probs[start:end].copy()
j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[0]
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
return gene
gene = _parse(F.softmax(self.weights, dim=-1).data.cpu().numpy())
genotype = Genotype(recurrent=gene, concat=range(STEPS+1)[-CONCAT:])
return genotype
| 3,278 | 32.804124 | 125 | py |
sharpDARTS | sharpDARTS-master/rnn/train_search.py | import argparse
import os, sys, glob
import time
import math
import numpy as np
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from architect import Architect
import gc
import data
import model_search as model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=300,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=3,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_wdecay', type=float, default=1e-3,
help='weight decay for the architecture encoding alpha')
parser.add_argument('--arch_lr', type=float, default=3e-3,
help='learning rate for the architecture encoding alpha')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
search_data = batchify(corpus.valid, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
logging.info('initial genotype:')
logging.info(model.genotype())
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
architect = Architect(parallel_model, args)
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
# seq_len = max(5, int(np.random.normal(bptt, 5)))
# # There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
seq_len = int(bptt)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])
hidden_valid[s_id], grad_norm = architect.step(
hidden[s_id], cur_data, cur_targets,
hidden_valid[s_id], cur_data_valid, cur_targets_valid,
optimizer,
args.unrolled)
# assuming small_batch_size = batch_size so we don't accumulate gradients
optimizer.zero_grad()
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
logging.info(parallel_model.genotype())
print(F.softmax(parallel_model.weights, dim=-1))
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
best_val_loss.append(val_loss)
| 12,639 | 43.041812 | 132 | py |
sharpDARTS | sharpDARTS-master/rnn/train.py | import os
import gc
import sys
import glob
import time
import math
import numpy as np
import torch
import torch.nn as nn
import logging
import argparse
import genotypes
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import data
import model
from torch.autograd import Variable
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=850,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=850,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=850,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1267,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
genotype = eval("genotypes.%s" % args.arch)
model = model.RNNModel(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute,
cell_cls=model.DARTSCell, genotype=genotype)
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
logging.info('Genotype: {}'.format(genotype))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if np.isnan(total_loss[0]):
raise
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
# At any point you can hit Ctrl + C to break out of training early.
try:
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
epoch = 1
while epoch < args.epochs + 1:
epoch_start_time = time.time()
try:
train()
except:
logging.info('rolling back to the previous best model ...')
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = model.cuda()
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
epoch = torch.load(os.path.join(args.save, 'misc.pt'))['epoch']
continue
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
logging.info('-' * 89)
if val_loss2 < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
else:
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
if 't0' not in optimizer.param_groups[0] and (len(best_val_loss)>args.nonmono and val_loss > min(best_val_loss[:-args.nonmono])):
logging.info('Switching!')
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
best_val_loss.append(val_loss)
epoch += 1
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = model.cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging.info('=' * 89)
| 13,900 | 42.037152 | 141 | py |
lenspack | lenspack-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# package_name documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 24 16:46:22 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'numpydoc'
]
# Include init in class documentation.
autoclass_content = 'both'
# Order docstrings as in the source
autodoc_member_order = 'bysource'
# Include private class methods
autodoc_default_flags = ['members', 'private-members']
# Suppress class members in toctree.
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lenspack'
copyright = u'2019, Austin Peel'
author = u'Austin Peel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'navigation_depth': 3,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'], }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sftoolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lenspack.tex', u'lenspack Documentation',
u'Austin Peel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lenspack', u'lenspack Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lenspack', u'lenspack Documentation',
author, 'lenspack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 12,182 | 30.158568 | 100 | py |
RCA | RCA-main/data_process.py | from six.moves import cPickle as pickle
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.impute import SimpleImputer
import torch as torch
from torch.utils.data import Dataset
def load_dict(filename_):
with open(filename_, "rb") as f:
ret_di = pickle.load(f)
return ret_di
class RealDataset(Dataset):
def __init__(self, path, missing_ratio):
scaler = MinMaxScaler()
data = np.load(path, allow_pickle=True)
data = data.item()
self.missing_ratio = missing_ratio
self.x = data["x"]
self.y = data["y"]
n, d = self.x.shape
mask = np.random.rand(n, d)
mask = (mask > self.missing_ratio).astype(float)
if missing_ratio > 0.0:
self.x[mask == 0] = np.nan
imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
self.x = imputer.fit_transform(self.x)
scaler.fit(self.x)
self.x = scaler.transform(self.x)
else:
scaler.fit(self.x)
self.x = scaler.transform(self.x)
def __len__(self):
return self.x.shape[0]
def __dim__(self):
if len(self.x.shape) > 2:
raise Exception("only handles single channel data")
else:
return self.x.shape[1]
def __getitem__(self, idx):
return (
torch.from_numpy(np.array(self.x[idx, :])),
torch.from_numpy(np.array(self.y[idx])),
)
def __sample__(self, num):
len = self.__len__()
index = np.random.choice(len, num, replace=False)
return self.__getitem__(index)
def __anomalyratio__(self):
return self.y.sum() / self.y.shape[0]
| 1,721 | 27.7 | 75 | py |
RCA | RCA-main/train_DAGMM.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import argparse
import time
import datetime
import torch.utils.data as data
from torch.autograd import grad
from torch.autograd import Variable
from models.DAGMM import DaGMM
from data_process import RealDataset
import matplotlib.pyplot as plt
from utils import *
from tqdm import tqdm
'''
This implementation is based on https://github.com/danieltan07/dagmm and https://github.com/tnakae/DAGMM
We noticed that the training process is highly numerical unstable and two above implementation also mentioned that problem.
Specifically, we found when bottleneck dimension is high, the issue becomes severe. In the original paper of DAGMM,
the bottleneck dimension is 1 (without counting the cosine similarity and reconstruction loss).
For example, If we increase it to 10, in many datasets, it will have numerical issue.
Also, for unsupervised AD, it is very tricky to pick lambda, gmm_k, lambda_cov_diag, since there is no clean data to
evaluate the performance.
'''
class Solver():
DEFAULTS = {}
def __init__(self, data_name, lambda_energy=0.1, lambda_cov_diag=0.005, hidden_dim=128, z_dim=10, seed=0, learning_rate=1e-3, gmm_k=2,
batch_size=128, training_ratio=0.8, validation_ratio=0.1, max_epochs=100, missing_ratio=0.0):
# Data loader
self.gmm_k = gmm_k
self.lambda_energy = lambda_energy
self.lambda_cov_diag = lambda_cov_diag
# read data here
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
data_path = "./data/" + data_name + ".npy"
self.model_save_path = "./trained_model/{}/{}/DAGMM/{}/".format(data_name, missing_ratio, seed)
self.result_path = "./results/{}/{}/DAGMM/{}/".format(data_name, missing_ratio, seed)
os.makedirs(self.model_save_path, exist_ok=True)
self.learning_rate = learning_rate
self.missing_ratio = missing_ratio
self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
self.seed = seed
self.hidden_dim = hidden_dim
self.z_dim = z_dim
self.max_epochs = max_epochs
self.data_path = data_path
self.data_anomaly_ratio = self.dataset.__anomalyratio__()
self.input_dim = self.dataset.__dim__()
self.data_normaly_ratio = 1 - self.data_anomaly_ratio
n_sample = self.dataset.__len__()
self.n_train = int(n_sample * (training_ratio))
# self.n_validation = int(n_sample * validation_ratio)
self.n_test = n_sample - self.n_train
print('|data dimension: {}|data noise ratio:{}'.format(self.dataset.__dim__(), self.data_anomaly_ratio))
training_data, testing_data = data.random_split(dataset=self.dataset,
lengths=[
self.n_train, self.n_test
])
self.training_loader = data.DataLoader(training_data, batch_size=batch_size, shuffle=True)
# self.validation_loader = data.DataLoader(validation_data, batch_size=self.n_validation, shuffle=False)
self.testing_loader = data.DataLoader(testing_data, batch_size=self.n_test, shuffle=False)
self.build_model()
self.print_network()
def build_model(self):
# Define model
self.dagmm = DaGMM(input_dim=self.input_dim, hidden_dim=self.hidden_dim, z_dim=self.z_dim, n_gmm=self.gmm_k)
# Optimizers
self.optimizer = torch.optim.Adam(self.dagmm.parameters(), lr=self.learning_rate)
# Print networks
self.print_network()
if torch.cuda.is_available():
self.dagmm.cuda()
def print_network(self):
num_params = 0
for p in self.dagmm.parameters():
num_params += p.numel()
# print(name)
# print(model)
print("The number of parameters: {}".format(num_params))
def reset_grad(self):
self.dagmm.zero_grad()
def to_var(self, x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def train(self):
iters_per_epoch = len(self.training_loader)
start = 0
# Start training
iter_ctr = 0
start_time = time.time()
min_val_loss = 1e+15
for e in tqdm(range(start, self.max_epochs)):
for i, (input_data, labels) in enumerate(self.training_loader):
iter_ctr += 1
start_time = time.time()
input_data = self.to_var(input_data)
# training
total_loss, sample_energy, recon_error, cov_diag = self.dagmm_step(input_data)
# Logging
loss = {}
loss['total_loss'] = total_loss.data.item()
loss['sample_energy'] = sample_energy.item()
loss['recon_error'] = recon_error.item()
loss['cov_diag'] = cov_diag.item()
self.dagmm.eval()
def dagmm_step(self, input_data, validation_flag=False):
input_data = input_data.float()
if not validation_flag:
self.optimizer.zero_grad()
self.dagmm.train()
enc, dec, z, gamma = self.dagmm(input_data)
if torch.isnan(z.sum()):
for p in self.dagmm.parameters():
print(p)
print("pause")
total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(input_data, dec, z, gamma,
self.lambda_energy,
self.lambda_cov_diag)
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.dagmm.parameters(), 5)
self.optimizer.step()
else:
self.dagmm.eval()
enc, dec, z, gamma = self.dagmm(input_data)
total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(input_data, dec, z, gamma,
self.lambda_energy,
self.lambda_cov_diag)
return total_loss, sample_energy, recon_error, cov_diag
def test(self):
print("======================TEST MODE======================")
# self.dagmm.load_stat
# self.dagmm.load_state_dict(torch.load(self.model_save_path + 'parameter.pth'))
self.dagmm.eval()
# self.data_loader.dataset.mode = "train"
# compute the parameter of density estimation by using training and validation set
N = 0
mu_sum = 0
cov_sum = 0
gamma_sum = 0
for it, (input_data, labels) in enumerate(self.training_loader):
input_data = self.to_var(input_data)
input_data = input_data.float()
enc, dec, z, gamma = self.dagmm(input_data)
phi, mu, cov = self.dagmm.compute_gmm_params(z, gamma)
batch_gamma_sum = torch.sum(gamma, dim=0)
gamma_sum += batch_gamma_sum
mu_sum += mu * batch_gamma_sum.unsqueeze(-1) # keep sums of the numerator only
cov_sum += cov * batch_gamma_sum.unsqueeze(-1).unsqueeze(-1) # keep sums of the numerator only
N += input_data.size(0)
train_phi = gamma_sum / N
train_mu = mu_sum / gamma_sum.unsqueeze(-1)
train_cov = cov_sum / gamma_sum.unsqueeze(-1).unsqueeze(-1)
print("N:", N)
print("phi :\n", train_phi)
print("mu :\n", train_mu)
print("cov :\n", train_cov)
train_energy = []
train_labels = []
train_z = []
for it, (input_data, labels) in enumerate(self.training_loader):
input_data = self.to_var(input_data)
input_data = input_data.float()
enc, dec, z, gamma = self.dagmm(input_data)
sample_energy, cov_diag = self.dagmm.compute_energy(z, phi=train_phi, mu=train_mu, cov=train_cov,
size_average=False)
train_energy.append(sample_energy.data.cpu().numpy())
train_z.append(z.data.cpu().numpy())
train_labels.append(labels.numpy())
train_energy = np.concatenate(train_energy, axis=0)
train_z = np.concatenate(train_z, axis=0)
train_labels = np.concatenate(train_labels, axis=0)
test_energy = []
test_labels = []
test_z = []
for it, (input_data, labels) in enumerate(self.testing_loader):
input_data = self.to_var(input_data)
input_data = input_data.float()
enc, dec, z, gamma = self.dagmm(input_data)
sample_energy, cov_diag = self.dagmm.compute_energy(z, size_average=False)
test_energy.append(sample_energy.data.cpu().numpy())
test_z.append(z.data.cpu().numpy())
test_labels.append(labels.numpy())
test_energy = np.concatenate(test_energy, axis=0)
test_z = np.concatenate(test_z, axis=0)
test_labels = np.concatenate(test_labels, axis=0)
combined_energy = np.concatenate([train_energy, test_energy], axis=0)
combined_labels = np.concatenate([train_labels, test_labels], axis=0)
thresh = np.percentile(combined_energy, self.data_normaly_ratio * 100)
print("Threshold :", thresh)
pred = (test_energy > thresh).astype(int)
gt = test_labels.astype(int)
from sklearn.metrics import precision_recall_fscore_support as prf, accuracy_score
from sklearn.metrics import roc_auc_score
auc = roc_auc_score(gt, test_energy)
accuracy = accuracy_score(gt, pred)
precision, recall, f_score, support = prf(gt, pred, average='binary')
os.makedirs(self.result_path, exist_ok=True)
np.save(self.result_path + "result.npy", {
'auc': auc,
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': f_score
})
print("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} auc:{:0.3f}".format(
accuracy, precision, recall, f_score, auc))
return accuracy, precision, recall, f_score, auc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="AnomalyDetection")
parser.add_argument(
"--seed", type=int, default=0, required=False
)
parser.add_argument(
"--data", type=str, default="sensor", required=False
)
parser.add_argument(
"--max_epochs", type=int, default=200, required=False
)
parser.add_argument(
"--hidden_dim", type=int, default=256, required=False
)
parser.add_argument(
"--z_dim", type=int, default=10, required=False
)
parser.add_argument(
"--batch_size", type=int, default=128, required=False
)
parser.add_argument(
"--training_ratio", type=float, default=0.6, required=False
)
parser.add_argument(
"--learning_rate", type=float, default=3e-4, required=False
)
parser.add_argument(
"--start_ratio", type=float, default=0.0, required=False
)
parser.add_argument(
"--data_anomaly_ratio", type=float, default=0.01, required=False
)
parser.add_argument(
"--gmm_k", type=int, default=2, required=False
)
parser.add_argument(
"--missing_ratio", type=float, default=0.0, required=False
)
config = parser.parse_args()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.benchmark = True
DAGMM_Solver = Solver(data_name=config.data, hidden_dim=config.hidden_dim, z_dim=config.z_dim, seed=config.seed,
learning_rate=config.learning_rate, gmm_k=config.gmm_k, missing_ratio=config.missing_ratio,
batch_size=config.batch_size, training_ratio=config.training_ratio, max_epochs=config.max_epochs)
DAGMM_Solver.train()
DAGMM_Solver.test()
print("Data {} finished".format(config.data)) | 12,923 | 39.012384 | 138 | py |
RCA | RCA-main/trainSVDD.py | import torch as torch
import os
import torch.utils.data as data
import numpy as np
from tqdm import tqdm
import argparse
from models.SVDD import SVDD, SVMLoss
from data_process import RealDataset
"""Deep One Class SVM"""
class Solver_SVDD:
def __init__(
self,
data_name,
start_ratio=0.0,
decay_ratio=0.01,
hidden_dim=128,
z_dim=10,
seed=0,
learning_rate=1e-3,
batch_size=128,
training_ratio=0.8,
validation_ratio=0.1,
max_epochs=100,
coteaching=0.0,
knn_impute=False,
missing_ratio=0.0,
):
# Data loader
# read data here
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
use_cuda = torch.cuda.is_available()
self.data_name = data_name
self.device = torch.device("cuda" if use_cuda else "cpu")
data_path = "./data/" + data_name + ".npy"
self.model_save_path = "./trained_model/{}/{}/SVDD/{}/".format(
data_name, missing_ratio, seed
)
self.result_path = "./results/{}/{}/SVDD/{}/".format(
data_name, missing_ratio, seed
)
os.makedirs(self.model_save_path, exist_ok=True)
self.learning_rate = learning_rate
self.missing_ratio = missing_ratio
self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
self.seed = seed
self.start_ratio = start_ratio
self.decay_ratio = decay_ratio
self.hidden_dim = hidden_dim
self.z_dim = z_dim
self.max_epochs = max_epochs
self.coteaching = coteaching
self.data_path = data_path
self.data_anomaly_ratio = self.dataset.__anomalyratio__()
self.input_dim = self.dataset.__dim__()
self.data_normaly_ratio = 1 - self.data_anomaly_ratio
n_sample = self.dataset.__len__()
self.n_train = int(n_sample * (training_ratio))
# self.n_validation = int(n_sample * validation_ratio)
self.n_test = n_sample - self.n_train
print(
"|data dimension: {}|data noise ratio:{}".format(
self.dataset.__dim__(), self.data_anomaly_ratio
)
)
self.decay_ratio = abs(self.start_ratio - (1 - self.data_anomaly_ratio)) / (
self.max_epochs / 2
)
training_data, testing_data = data.random_split(
dataset=self.dataset, lengths=[self.n_train, self.n_test]
)
self.training_loader = data.DataLoader(
training_data, batch_size=batch_size, shuffle=True
)
self.testing_loader = data.DataLoader(
testing_data, batch_size=self.n_test, shuffle=False
)
self.ae = None
self.discriminator = None
self.build_model()
self.print_network()
def build_model(self):
self.ae = SVDD(
input_dim=self.input_dim, hidden_dim=self.hidden_dim, z_dim=self.z_dim
)
self.ae = self.ae.to(self.device)
def print_network(self):
num_params = 0
for p in self.ae.parameters():
num_params += p.numel()
print("The number of parameters: {}".format(num_params))
def train(self):
optimizer = torch.optim.Adam(self.ae.parameters(), lr=self.learning_rate)
# scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
"""
pretrain autoencoder
"""
mse_loss = torch.nn.MSELoss()
if self.data_name == "optdigits":
mse_loss = torch.nn.BCELoss()
min_val_error = 1e10
for epoch in tqdm(range(50)): # pretrain
for i, (x, y) in enumerate(self.training_loader):
x = x.to(self.device).float()
n = x.shape[0]
optimizer.zero_grad()
self.ae.train()
z1, xhat1, _ = self.ae(x.float())
loss = mse_loss(xhat1, x)
loss.backward()
optimizer.step()
# scheduler.step()
# svm
# init c
svm_loss = SVMLoss()
z = []
with torch.no_grad():
self.ae.eval()
for i, (x, y) in enumerate(self.training_loader):
x = x.to(self.device).float()
z1, _, _ = self.ae(x.float())
z.append(z1)
# x_intersect = x[index_intersect, :]
z = torch.cat(z).mean(dim=0)
center = self.ae.init_c(z)
self.ae.train()
for epoch in tqdm(range(self.max_epochs)):
for i, (x, y) in enumerate(self.training_loader):
x = x.to(self.device).float()
# x_missing = x * m + (1-m) * -10
n = x.shape[0]
optimizer.zero_grad()
z1, _, _ = self.ae(x.float())
loss = svm_loss(z1, center)
loss.backward()
optimizer.step()
valerror = 0
for i, (x, y) in enumerate(self.testing_loader):
x = x.to(self.device).float()
# x_missing = x * m + (1-m) * -10
n = x.shape[0]
optimizer.zero_grad()
self.ae.train()
z1, _, _ = self.ae(x.float())
loss = svm_loss(z1, center)
valerror = valerror + loss.item()
if valerror < min_val_error:
min_val_error = valerror
torch.save(
self.ae.state_dict(),
os.path.join(self.model_save_path, "parameter.pth"),
)
def test(self):
print("======================TEST MODE======================")
self.ae.load_state_dict(torch.load(self.model_save_path + "parameter.pth"))
self.ae.eval()
loss = SVMLoss()
for _, (x, y) in enumerate(self.testing_loader):
y = y.data.cpu().numpy()
x = x.to(self.device).float()
z1, _, _ = self.ae(x.float())
error = (z1 - self.ae.c1) ** 2
error = error.sum(dim=1)
error = error.data.cpu().numpy()
thresh = np.percentile(error, self.data_normaly_ratio * 100)
print("Threshold :", thresh)
pred = (error > thresh).astype(int)
gt = y.astype(int)
from sklearn.metrics import (
precision_recall_fscore_support as prf,
accuracy_score,
roc_auc_score,
)
gt = gt.squeeze()
auc = roc_auc_score(gt, error)
accuracy = accuracy_score(gt, pred)
precision, recall, f_score, support = prf(gt, pred, average="binary")
print(
"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC :{:0.4f}".format(
accuracy, precision, recall, f_score, auc
)
)
os.makedirs(self.result_path, exist_ok=True)
np.save(
self.result_path + "result.npy",
{
"auc": auc,
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1": f_score,
},
)
return accuracy, precision, recall, f_score, auc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AnomalyDetection")
parser.add_argument("--algorithm", type=str, default="Deep-SVDD", required=False)
parser.add_argument("--seed", type=int, default=0, required=False)
parser.add_argument("--decay", type=float, default=0.001, required=False)
parser.add_argument("--data", type=str, default="vowels", required=False)
parser.add_argument("--max_epochs", type=int, default=200, required=False)
parser.add_argument("--hidden_dim", type=int, default=128, required=False)
parser.add_argument("--batch_size", type=int, default=128, required=False)
parser.add_argument("--training_ratio", type=float, default=0.6, required=False)
parser.add_argument("--learning_rate", type=float, default=3e-4, required=False)
parser.add_argument("--start_ratio", type=float, default=0.0, required=False)
parser.add_argument("--z_dim", type=int, default=10, required=False)
parser.add_argument("--missing_ratio", type=float, default=0.0, required=False)
config = parser.parse_args()
"""
read data
"""
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.benchmark = True
Solver = Solver_SVDD(
data_name=config.data,
hidden_dim=config.hidden_dim,
z_dim=config.z_dim,
seed=config.seed,
start_ratio=config.start_ratio,
learning_rate=config.learning_rate,
batch_size=config.batch_size,
decay_ratio=config.decay,
training_ratio=config.training_ratio,
max_epochs=config.max_epochs,
missing_ratio=config.missing_ratio,
)
Solver.train()
Solver.test()
print("Data {} finished".format(config.data))
| 9,095 | 32.441176 | 112 | py |
RCA | RCA-main/loss.py | import torch.nn as nn
import torch as torch
import torch.nn.functional as F
class Loss_Knn(nn.Module):
def __init__(self, margin):
self.margin = margin
super(Loss_Knn, self).__init__()
def forward(self, z, z_nn):
n_batch, n_nn, d = z_nn.shape
z = z.unsqueeze(dim=1).repeat(1, n_nn, 1)
dist = ((z-z_nn)**2).mean(dim=1)
dist = dist.sum(dim=1)
dist = F.relu(dist-self.margin)
loss = dist.sum(dim=0)
return loss
class Dist_KNN(nn.Module):
def __init__(self, margin):
self.margin = margin
super(Dist_KNN, self).__init__()
def forward(self, z, z_nn):
n_batch, n_nn, d = z_nn.shape
z = z.unsqueeze(dim=1).repeat(1, n_nn, 1)
dist = ((z-z_nn)**2).mean(dim=1)
dist = dist.sum(dim=1)
dist = F.relu(dist-self.margin)
return dist
class VAE_LOSS(nn.Module):
def __init__(self):
super(VAE_LOSS, self).__init__()
def forward(self, recon_x, x, mu, logvar, rec_type='MSE', gamma=1):
if rec_type == 'BCE':
BCE = F.binary_cross_entropy(recon_x, x, reduction='sum')
elif rec_type == 'MSE':
BCE = F.mse_loss(recon_x, x, reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + gamma * KLD
class VAE_LOSS_SCORE(nn.Module):
def __init__(self):
super(VAE_LOSS_SCORE, self).__init__()
def forward(self, recon_x, x, mu, logvar, rec_type='MSE'):
if rec_type == 'BCE':
BCE = F.binary_cross_entropy(recon_x, x, reduce=False)
BCE = BCE.sum(dim=1)
elif rec_type == 'MSE':
BCE = F.mse_loss(recon_x, x, reduce=False)
BCE = BCE.sum(dim=1)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp())
KLD = KLD.sum(dim=1)
return BCE + KLD
class VAE_Outlier_SCORE(nn.Module):
def __init__(self):
super(VAE_Outlier_SCORE, self).__init__()
def forward(self, recon_x, x, mu, logvar, rec_type='MSE'):
if rec_type == 'BCE':
BCE = F.binary_cross_entropy(recon_x, x, reduce=False)
BCE = BCE.sum(dim=1)
elif rec_type == 'MSE':
BCE = F.mse_loss(recon_x, x, reduce=False)
BCE = BCE.sum(dim=1)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
return BCE | 2,952 | 32.179775 | 73 | py |
RCA | RCA-main/utils.py | import os
import torch
from torch.autograd import Variable
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def mkdir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
| 282 | 17.866667 | 41 | py |
RCA | RCA-main/trainRCAMulti.py | import torch as torch
import os
import random
import torch.utils.data as data
import numpy as np
from tqdm import tqdm
import argparse
from models.RCA import SingleAE
from data_process import RealDataset
class Solver_RCA_Multi:
def __init__(
self,
data_name,
n_member=2,
start_ratio=0.0,
decay_ratio=0.01,
hidden_dim=128,
z_dim=10,
seed=0,
learning_rate=1e-3,
batch_size=128,
training_ratio=0.8,
validation_ratio=0.1,
max_epochs=100,
coteaching=1.0,
oe=0.0,
missing_ratio=0.0,
knn_impute=False,
):
# Data loader
# read data here
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
use_cuda = torch.cuda.is_available()
self.data_name = data_name
self.knn_impute = knn_impute
self.device = torch.device("cuda" if use_cuda else "cpu")
data_path = "./data/" + data_name + ".npy"
self.missing_ratio = missing_ratio
self.model_save_path = "./trained_model/{}/{}/{}-RCA/{}/".format(
data_name, missing_ratio, n_member, seed
)
if oe == 0.0:
self.result_path = "./results/{}/{}/{}-RCA/{}/".format(
data_name, missing_ratio, n_member, seed
)
else:
self.result_path = "./results/{}/{}/{}-RCA_{}/{}/".format(
data_name, missing_ratio, n_member, oe, seed
)
os.makedirs(self.model_save_path, exist_ok=True)
self.learning_rate = learning_rate
self.dataset = RealDataset(data_path, missing_ratio=self.missing_ratio)
self.seed = seed
self.start_ratio = start_ratio
self.decay_ratio = decay_ratio
self.hidden_dim = hidden_dim
self.z_dim = z_dim
self.max_epochs = max_epochs
self.coteaching = coteaching
self.start_ratio = start_ratio
self.data_path = data_path
self.data_anomaly_ratio = self.dataset.__anomalyratio__() + oe
self.input_dim = self.dataset.__dim__()
self.data_normaly_ratio = 1 - self.data_anomaly_ratio
n_sample = self.dataset.__len__()
self.n_train = int(n_sample * (training_ratio))
# self.n_validation = int(n_sample * validation_ratio)
self.n_test = n_sample - self.n_train
print(
"|data dimension: {}|data noise ratio:{}".format(
self.dataset.__dim__(), self.data_anomaly_ratio
)
)
self.decay_ratio = abs(self.start_ratio - (1 - self.data_anomaly_ratio)) / (
self.max_epochs / 2
)
training_data, testing_data = data.random_split(
dataset=self.dataset, lengths=[self.n_train, self.n_test]
)
self.training_loader = data.DataLoader(
training_data, batch_size=batch_size, shuffle=True
)
self.testing_loader = data.DataLoader(
testing_data, batch_size=self.n_test, shuffle=False
)
self.n_member = n_member
self.ae = None
self.discriminator = None
self.build_model()
self.print_network()
def build_model(self):
self.ae = []
for _ in range(self.n_member):
ae = SingleAE(
input_dim=self.input_dim, hidden_dim=self.hidden_dim, z_dim=self.z_dim
)
ae = ae.to(self.device)
self.ae.append(ae)
def print_network(self):
num_params = 0
for p in self.ae[0].parameters():
num_params += p.numel()
print(
"The number of parameters: {}, number of networks".format(
num_params, self.n_member
)
)
def train(self):
optimizer = []
for i in range(self.n_member):
optimizer.append(
torch.optim.Adam(self.ae[i].parameters(), lr=self.learning_rate)
)
self.ae[i].eval()
loss_mse = torch.nn.MSELoss(reduction="none")
if self.data_name == "optdigits":
loss_mse = torch.nn.BCELoss(reduction="none")
min_val_error = 1e10
for epoch in tqdm(range(self.max_epochs)): # train 3 time classifier
for i, (x, y) in enumerate(self.training_loader):
x = x.to(self.device).float()
# m = m.to(self.device).float()
n = x.shape[0]
n_selected = int(n * (1 - self.start_ratio))
if config.coteaching == 0.0:
n_selected = n
if i == 0:
current_ratio = "{}/{}".format(n_selected, n)
selected_all_model = []
with torch.no_grad():
for model_idx in range(self.n_member):
self.ae[model_idx].eval()
xhat = self.ae[model_idx](x.float())
error = loss_mse(xhat, x)
error = error.sum(dim=1)
_, index = torch.sort(error)
index = index[:n_selected]
selected_all_model.append(index)
random.shuffle(selected_all_model)
for model_idx in range(self.n_member):
optimizer[model_idx].zero_grad()
self.ae[model_idx].train()
xhat = self.ae[model_idx](x[selected_all_model[model_idx]])
error = loss_mse(xhat, x[selected_all_model[model_idx]])
error = error.mean()
error.backward()
optimizer[model_idx].step()
if self.start_ratio < self.data_anomaly_ratio:
self.start_ratio = min(
self.data_anomaly_ratio, self.start_ratio + self.decay_ratio
)
if self.start_ratio > self.data_anomaly_ratio:
self.start_ratio = max(
self.data_anomaly_ratio, self.start_ratio - self.decay_ratio
) # 0.0005 for 0.1 anomaly, 0.0001 for 0.001 anomaly
# with torch.no_grad():
# self.ae.eval()
# for i, (x, y, m) in enumerate(self.testing_loader):
# x = x.to(self.device).float()
# m = m.to(self.device).float()
# # y = y.to(device)
# x = x.float()
# _, _, xhat1, xhat2 = self.ae(x, x, m, m)
# error1 = loss_mse(xhat1, x)
# error2 = loss_mse(xhat2, x)
# error1 = error1.sum(dim=1)
# error2 = error2.sum(dim=1)
#
# n_val = x.shape[0]
# n_selected = int(n_val * (1 - self.data_anomaly_ratio))
# if self.coteaching == 0.0:
# n_selected = n
# # n_selected = n_val
# _, index1 = torch.sort(error1)
# _, index2 = torch.sort(error2)
# index1 = index1[:n_selected]
# index2 = index2[:n_selected]
#
# x1 = x[index2, :]
# x2 = x[index1, :]
# m1 = m[index2, :]
# m2 = m[index1, :]
# z1, z2, xhat1, xhat2 = self.ae(x1, x2, m1, m2)
# val_loss = loss_mse(x1, xhat1) + loss_mse(x2, xhat2)
# val_loss = val_loss.sum()
# if val_loss < min_val_error:
# # print(epoch)
# min_val_error = val_loss
# torch.save(
# self.ae.state_dict(),
# os.path.join(self.model_save_path, "parameter.pth"),
# )
# scheduler.step()
def test(self):
print("======================TEST MODE======================")
# self.dagmm.load_stat
# self.ae.load_state_dict(torch.load(self.model_save_path + "parameter.pth"))
# self.ae.eval()
mse_loss = torch.nn.MSELoss(reduction="none")
if self.data_name == "optdigits":
mse_loss = torch.nn.BCELoss(reduction="none")
error_list = []
for _ in range(1000): # ensemble score over 100 stochastic feedforward
with torch.no_grad():
error_average = torch.zeros(self.n_test).cuda()
for model in self.ae:
model.train()
for _, (x, y) in enumerate(self.testing_loader):
y = y.data.cpu().numpy()
x = x.to(self.device).float()
# m = m.to(self.device).float()
xhat = model(x.float())
error = mse_loss(xhat, x)
error = error.sum(dim=1)
error_average = error_average + error
error = error_average.data.cpu().numpy()
error_list.append(error)
error_list = np.array(error_list)
# error_list = np.percentile(error, )
error = error_list.mean(axis=0)
from sklearn.metrics import (
precision_recall_fscore_support as prf,
accuracy_score,
roc_auc_score,
)
gt = y.astype(int)
auc = roc_auc_score(gt, error)
thresh = np.percentile(error, self.dataset.__anomalyratio__() * 100)
print("Threshold :", thresh)
pred = (error > thresh).astype(int)
gt = y.astype(int)
auc = roc_auc_score(gt, error)
accuracy = accuracy_score(gt, pred)
precision, recall, f_score, support = prf(gt, pred, average="binary")
print(
"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC : {:0.4f}".format(
accuracy, precision, recall, f_score, auc
)
)
os.makedirs(self.result_path, exist_ok=True)
np.save(
self.result_path + "result.npy",
{
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1": f_score,
"auc": auc,
},
)
print("result save to {}".format(self.result_path))
return accuracy, precision, recall, f_score, auc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="RCA")
parser.add_argument("--algorithm", type=str, default="RCA", required=False)
parser.add_argument("--seed", type=int, default=5, required=False)
parser.add_argument("--decay", type=float, default=0.001, required=False)
parser.add_argument("--data", type=str, default="letter", required=False)
parser.add_argument("--max_epochs", type=int, default=50, required=False)
parser.add_argument("--knn_impute", type=bool, default=False, required=False)
parser.add_argument("--hidden_dim", type=int, default=256, required=False)
parser.add_argument("--batch_size", type=int, default=128, required=False)
parser.add_argument("--oe", type=float, default=0.0, required=False)
parser.add_argument("--training_ratio", type=float, default=0.599, required=False)
parser.add_argument("--validation_ratio", type=float, default=0.001, required=False)
parser.add_argument("--learning_rate", type=float, default=3e-4, required=False)
parser.add_argument("--start_ratio", type=float, default=0.0, required=False)
parser.add_argument("--z_dim", type=int, default=10, required=False)
parser.add_argument("--coteaching", type=float, default=1.0, required=False)
parser.add_argument("--n_member", type=int, default=3, required=False)
parser.add_argument("--missing_ratio", type=float, default=0.0, required=False)
config = parser.parse_args()
"""
read data
"""
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.benchmark = True
Solver = Solver_RCA_Multi(
data_name=config.data,
hidden_dim=config.hidden_dim,
z_dim=config.z_dim,
seed=config.seed,
start_ratio=config.start_ratio,
learning_rate=config.learning_rate,
batch_size=config.batch_size,
decay_ratio=config.decay,
training_ratio=config.training_ratio,
validation_ratio=config.validation_ratio,
max_epochs=config.max_epochs,
missing_ratio=config.missing_ratio,
knn_impute=config.knn_impute,
n_member=config.n_member,
oe=config.oe,
)
Solver.train()
Solver.test()
print("Data {} finished".format(config.data))
| 12,857 | 36.817647 | 113 | py |
RCA | RCA-main/trainRCA.py | import torch as torch
import os
import torch.utils.data as data
import numpy as np
from tqdm import tqdm
import argparse
from models.RCA import AE
from data_process import RealDataset
class Solver_RCA:
def __init__(
self,
data_name,
hidden_dim=128, # number of hidden neurons in RCA
z_dim=10, # bottleneck dimension
seed=0, # random seed
learning_rate=1e-3, # learning rate
batch_size=128, # batchsize
training_ratio=0.8, # training data percentage
max_epochs=100, # training epochs
coteaching=1.0, # whether selects sample based on loss value
oe=0.0, # how much we overestimate the ground-truth anomaly ratio
missing_ratio=0.0, # missing ratio in the data
):
# Data loader
# read data here
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
use_cuda = torch.cuda.is_available()
self.data_name = data_name
self.device = torch.device("cuda" if use_cuda else "cpu")
data_path = "./data/" + data_name + ".npy"
self.missing_ratio = missing_ratio
self.model_save_path = "./trained_model/{}/{}/RCA/{}/".format(
data_name, missing_ratio, seed
)
if oe == 0.0:
self.result_path = "./results/{}/{}/RCA/{}/".format(
data_name, missing_ratio, seed
)
else:
self.result_path = "./results/{}/{}/RCA_{}/{}/".format(
data_name, missing_ratio, oe, seed
)
os.makedirs(self.model_save_path, exist_ok=True)
self.learning_rate = learning_rate
self.dataset = RealDataset(
data_path, missing_ratio=self.missing_ratio
)
self.seed = seed
self.hidden_dim = hidden_dim
self.z_dim = z_dim
self.max_epochs = max_epochs
self.coteaching = coteaching
self.beta = 0.0 # initially, select all data
self.alpha = 0.5
self.data_path = data_path
self.data_anomaly_ratio = self.dataset.__anomalyratio__() + oe
self.input_dim = self.dataset.__dim__()
self.data_normaly_ratio = 1 - self.data_anomaly_ratio
n_sample = self.dataset.__len__()
self.n_train = int(n_sample * (training_ratio))
self.n_test = n_sample - self.n_train
print(
"|data dimension: {}|data noise ratio:{}".format(
self.dataset.__dim__(), self.data_anomaly_ratio
)
)
self.decay_ratio = abs(self.beta - (1 - self.data_anomaly_ratio)) / (
self.max_epochs / 2
)
training_data, testing_data = data.random_split(
dataset=self.dataset, lengths=[self.n_train, self.n_test]
)
self.training_loader = data.DataLoader(
training_data, batch_size=batch_size, shuffle=True
)
self.testing_loader = data.DataLoader(
testing_data, batch_size=self.n_test, shuffle=False
)
self.ae = None
self.discriminator = None
self.build_model()
self.print_network()
def build_model(self):
self.ae = AE(
input_dim=self.input_dim, hidden_dim=self.hidden_dim, z_dim=self.z_dim
)
self.ae = self.ae.to(self.device)
def print_network(self):
num_params = 0
for p in self.ae.parameters():
num_params += p.numel()
print("The number of parameters: {}".format(num_params))
def train(self):
optimizer = torch.optim.Adam(self.ae.parameters(), lr=self.learning_rate)
self.ae.eval()
loss_mse = torch.nn.MSELoss(reduction='none')
if self.data_name == 'optdigits':
loss_mse = torch.nn.BCELoss(reduction='none')
for epoch in tqdm(range(self.max_epochs)): # train 3 time classifier
for i, (x, y) in enumerate(self.training_loader):
x = x.to(self.device).float()
n = x.shape[0]
n_selected = int(n * (1-self.beta))
if config.coteaching == 0.0:
n_selected = n
if i == 0:
current_ratio = "{}/{}".format(n_selected, n)
optimizer.zero_grad()
with torch.no_grad():
self.ae.eval()
z1, z2, xhat1, xhat2 = self.ae(x.float(), x.float())
error1 = loss_mse(xhat1, x)
error1 = error1
error2 = loss_mse(xhat2, x)
error2 = error2
error1 = error1.sum(dim=1)
error2 = error2.sum(dim=1)
_, index1 = torch.sort(error1)
_, index2 = torch.sort(error2)
index1 = index1[:n_selected]
index2 = index2[:n_selected]
x1 = x[index2, :]
x2 = x[index1, :]
self.ae.train()
z1, z2, xhat1, xhat2 = self.ae(x1.float(), x2.float())
loss = loss_mse(xhat1, x1) + loss_mse(xhat2, x2)
loss = loss.sum()
loss.backward()
optimizer.step()
if self.beta < self.data_anomaly_ratio:
self.beta = min(
self.data_anomaly_ratio, self.beta + self.decay_ratio
)
def test(self):
print("======================TEST MODE======================")
self.ae.train()
mse_loss = torch.nn.MSELoss(reduction='none')
if self.data_name == 'optdigits':
mse_loss = torch.nn.BCELoss(reduction='none')
error_list = []
for _ in range(1000): # ensemble score over 100 stochastic feedforward
with torch.no_grad():
for _, (x, y) in enumerate(self.testing_loader): # testing data loader has n_test batchsize, if it is image data, need change this part
y = y.data.cpu().numpy()
x = x.to(self.device).float()
_, _, xhat1, xhat2 = self.ae(x.float(), x.float())
error = mse_loss(xhat1, x) + mse_loss(xhat2, x)
error = error.mean(dim=1)
error = error.data.cpu().numpy()
error_list.append(error)
error_list = np.array(error_list)
error = error_list.mean(axis=0)
from sklearn.metrics import (
precision_recall_fscore_support as prf,
accuracy_score,
roc_auc_score,
)
gt = y.astype(int)
thresh = np.percentile(error, self.dataset.__anomalyratio__() * 100)
print("Threshold :", thresh)
pred = (error > thresh).astype(int)
gt = y.astype(int)
auc = roc_auc_score(gt, error)
accuracy = accuracy_score(gt, pred)
precision, recall, f_score, support = prf(gt, pred, average="binary")
print(
"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC : {:0.4f}".format(
accuracy, precision, recall, f_score, auc
)
)
os.makedirs(self.result_path, exist_ok=True)
np.save(
self.result_path + "result.npy",
{
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1": f_score,
"auc": auc,
},
)
print("result save to {}".format(self.result_path))
return accuracy, precision, recall, f_score, auc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="RCA")
parser.add_argument("--algorithm", type=str, default="RCA", required=False)
parser.add_argument("--seed", type=int, default=0, required=False)
parser.add_argument("--data", type=str, default="pima", required=False)
parser.add_argument("--max_epochs", type=int, default=200, required=False)
parser.add_argument("--hidden_dim", type=int, default=256, required=False)
parser.add_argument("--batch_size", type=int, default=128, required=False)
parser.add_argument("--oe", type=float, default=0.0, required=False)
parser.add_argument("--training_ratio", type=float, default=0.6, required=False)
parser.add_argument("--learning_rate", type=float, default=3e-4, required=False)
parser.add_argument("--z_dim", type=int, default=10, required=False)
parser.add_argument("--coteaching", type=float, default=1.0, required=False)
parser.add_argument("--missing_ratio", type=float, default=0.0, required=False)
config = parser.parse_args()
"""
read data
"""
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.benchmark = True
Solver = Solver_RCA(
data_name=config.data,
hidden_dim=config.hidden_dim,
z_dim=config.z_dim,
seed=config.seed,
learning_rate=config.learning_rate,
batch_size=config.batch_size,
training_ratio=config.training_ratio,
max_epochs=config.max_epochs,
missing_ratio=config.missing_ratio,
oe=config.oe,
)
Solver.train()
Solver.test()
print("Data {} finished".format(config.data))
| 9,369 | 35.177606 | 152 | py |
RCA | RCA-main/models/SVDD.py | import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
import torch as torch
class SVDD(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
super(SVDD, self).__init__()
self.c1 = torch.zeros(z_dim)
self.R1 = None
self.encoder = nn.Sequential(
# nn.Dropout(0.8),
nn.Linear(input_dim, hidden_dim, bias=False),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim, bias=False),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim, bias=False),
nn.LeakyReLU(0.1),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim, bias=False),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim, bias=False),
nn.LeakyReLU(0.1),
# nn.Dropout(0.2),
nn.Linear(hidden_dim, input_dim, bias=False),
nn.Sigmoid(),
)
self.svdd_layer = nn.Linear(z_dim, 1, bias=False)
def forward(self, x1):
z1 = self.encoder(x1)
xhat1 = self.decoder(z1)
svm_output = self.svdd_layer(z1)
return z1, xhat1, svm_output
def init_c(self, c1):
self.c1 = c1
return c1
def distance(self, z1):
distance1 = torch.sqrt(((z1 - self.c1) ** 2).sum(dim=1))
return distance1
class SVMLoss(torch.nn.Module):
def __init__(self):
super(SVMLoss, self).__init__()
def forward(self, z1, c1):
loss = torch.sqrt(((z1 - c1) ** 2).mean())
return loss
| 1,701 | 26.901639 | 64 | py |
RCA | RCA-main/models/AE_Coteaching.py | import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
import torch as torch
class SingleAE(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
super(SingleAE, self).__init__()
self.encoder1 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder1 = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
def forward(self, x1):
z1 = self.encoder1(x1)
xhat1 = self.decoder1(z1)
return xhat1
class AE(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
super(AE, self).__init__()
self.encoder1 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder1 = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
self.encoder2 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder2 = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
def forward(self, x1, x2):
_, d = x1.shape
z1 = self.encoder1(x1)
xhat1 = self.decoder1(z1)
z2 = self.encoder1(x2)
xhat2 = self.decoder1(z2)
return z1, z2, xhat1, xhat2
| 2,680 | 29.123596 | 53 | py |
RCA | RCA-main/models/RCA.py | import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
import torch as torch
class SingleAE(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
super(SingleAE, self).__init__()
self.encoder1 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder1 = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
def forward(self, x1):
z1 = self.encoder1(x1)
xhat1 = self.decoder1(z1)
return xhat1
class AE(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
super(AE, self).__init__()
self.encoder1 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder1 = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
self.encoder2 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder2 = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
def forward(self, x1, x2):
_, d = x1.shape
z1 = self.encoder1(x1)
xhat1 = self.decoder1(z1)
z2 = self.encoder1(x2)
xhat2 = self.decoder1(z2)
return z1, z2, xhat1, xhat2
| 2,686 | 27.284211 | 53 | py |
RCA | RCA-main/models/DAGMM.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torchvision
from torch.autograd import Variable
import itertools
from utils import *
class Cholesky(torch.autograd.Function):
def forward(ctx, a):
l = torch.cholesky(a, False)
ctx.save_for_backward(l)
return l
def backward(ctx, grad_output):
l, = ctx.saved_variables
linv = l.inverse()
inner = torch.tril(torch.mm(l.t(), grad_output)) * torch.tril(
1.0 - Variable(l.data.new(l.size(1)).fill_(0.5).diag()))
s = torch.mm(linv.t(), torch.mm(inner, linv))
return s
class DaGMM(nn.Module):
"""Residual Block."""
def __init__(self, input_dim, hidden_dim, z_dim, n_gmm=2):
super(DaGMM, self).__init__()
latent_dim = z_dim + 2 # hidden representation plus reconstruction loss and cos similarity
# layers = []
# layers += [nn.Linear(input_dim, hidden_dim)]
# layers += [nn.Tanh()]
# layers += [nn.Linear(hidden_dim, hidden_dim)]
# layers += [nn.Tanh()]
# layers += [nn.Linear(hidden_dim, hidden_dim)]
# layers += [nn.Tanh()]
# layers += [nn.Linear(hidden_dim, z_dim)]
#
# self.encoder = nn.Sequential(*layers)
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, z_dim),
)
self.decoder = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Dropout(0.5),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, input_dim),
nn.Sigmoid(),
)
# layers = []
# layers += [nn.Linear(z_dim, hidden_dim)]
# layers += [nn.Tanh()]
# layers += [nn.Linear(hidden_dim, hidden_dim)]
# layers += [nn.Tanh()]
# layers += [nn.Linear(hidden_dim, hidden_dim)]
# layers += [nn.Tanh()]
# layers += [nn.Linear(hidden_dim, input_dim)]
#
# self.decoder = nn.Sequential(*layers)
layers = []
layers += [nn.Linear(latent_dim, 10)]
layers += [nn.Tanh()]
layers += [nn.Dropout(p=0.5)]
layers += [nn.Linear(10, n_gmm)]
layers += [nn.Softmax(dim=1)]
self.estimation = nn.Sequential(*layers)
self.register_buffer("phi", torch.zeros(n_gmm))
self.register_buffer("mu", torch.zeros(n_gmm, latent_dim))
self.register_buffer("cov", torch.zeros(n_gmm, latent_dim, latent_dim))
def relative_euclidean_distance(self, a, b):
return (a - b).norm(2, dim=1) / a.norm(2, dim=1)
def forward(self, x):
enc = self.encoder(x)
dec = self.decoder(enc)
rec_cosine = F.cosine_similarity(x, dec, dim=1)
rec_euclidean = self.relative_euclidean_distance(x, dec)
z = torch.cat([enc, rec_euclidean.unsqueeze(-1), rec_cosine.unsqueeze(-1)], dim=1)
gamma = self.estimation(z)
return enc, dec, z, gamma
def compute_gmm_params(self, z, gamma):
if torch.isnan(gamma.sum()):
print("pause")
gamma = torch.clamp(gamma, 0.0001, 0.9999)
N = gamma.size(0)
# K
sum_gamma = torch.sum(gamma, dim=0)
# K
phi = (sum_gamma / N)
self.phi = phi.data
# K x D
mu = torch.sum(gamma.unsqueeze(-1) * z.unsqueeze(1), dim=0) / (sum_gamma.unsqueeze(-1))
self.mu = mu.data
# z = N x D
# mu = K x D
# gamma N x K
# z_mu = N x K x D
z_mu = (z.unsqueeze(1) - mu.unsqueeze(0))
# z_mu_outer = N x K x D x D
z_mu_outer = z_mu.unsqueeze(-1) * z_mu.unsqueeze(-2)
# K x D x D
cov = torch.sum(gamma.unsqueeze(-1).unsqueeze(-1) * z_mu_outer, dim=0) / sum_gamma.unsqueeze(-1).unsqueeze(-1)
self.cov = cov.data
return phi, mu, cov
def compute_energy(self, z, phi=None, mu=None, cov=None, size_average=True):
# Compute the energy based on the specified gmm params.
# If none are specified use the cached values.
if phi is None:
phi = to_var(self.phi)
if mu is None:
mu = to_var(self.mu)
if cov is None:
cov = to_var(self.cov)
k, D, _ = cov.size()
z_mu = (z.unsqueeze(1) - mu.unsqueeze(0))
cov_inverse = []
det_cov = []
cov_diag = 0
eps = 1e-8
for i in range(k):
# K x D x D
cov_k = cov[i] + to_var(torch.eye(D) * eps)
cov_inverse.append(torch.inverse(cov_k).unsqueeze(0))
# (sign, logdet) = np.linalg.slogdet(cov_k.data.cpu().numpy() * (2 * np.pi))
# det = sign * np.exp(logdet)
# det_cov.append(det)
det = cov_k.data.cpu().numpy() * (2 * np.pi)
det_a = np.linalg.det(det)
if np.isnan(np.array(det_a)):
print('pause')
# assert np.isnan(np.array(det_a))
det_cov.append(np.linalg.det(cov_k.data.cpu().numpy() * (2 * np.pi)))
cov_diag = cov_diag + torch.sum(1 / cov_k.diag())
# K x D x D
cov_inverse = torch.cat(cov_inverse, dim=0)
# K
det_cov = to_var(torch.from_numpy(np.float32(np.array(det_cov))))
# N x K
exp_term_tmp = -0.5 * torch.sum(torch.sum(z_mu.unsqueeze(-1) * cov_inverse.unsqueeze(0), dim=-2) * z_mu, dim=-1)
# for stability (logsumexp)
max_val = torch.max((exp_term_tmp).clamp(min=0), dim=1, keepdim=True)[0]
exp_term = torch.exp(exp_term_tmp - max_val)
sample_energy = -max_val.squeeze() - torch.log(
torch.sum(phi.unsqueeze(0) * exp_term / (torch.sqrt(det_cov)).unsqueeze(0), dim=1) + eps)
if size_average:
sample_energy = torch.mean(sample_energy)
return sample_energy, cov_diag
def loss_function(self, x, x_hat, z, gamma, lambda_energy, lambda_cov_diag):
recon_error = torch.mean((x - x_hat) ** 2)
phi, mu, cov = self.compute_gmm_params(z, gamma)
sample_energy, cov_diag = self.compute_energy(z, phi, mu, cov)
loss = recon_error + lambda_energy * sample_energy + lambda_cov_diag * cov_diag
return loss, sample_energy, recon_error, cov_diag | 6,589 | 31.623762 | 120 | py |
LKD-Net | LKD-Net-main/test.py | import os
import argparse
import torch
import torch.nn.functional as F
from pytorch_msssim import ssim
from torch.utils.data import DataLoader
from collections import OrderedDict
from utils import AverageMeter, write_img, chw_to_hwc
from datasets.loader import PairLoader
from models import *
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='LKD-s', type=str, help='model name')
parser.add_argument('--model_weight', default='./result/RESIDE-OUT/LKD-s/LKD-s.pth', type=str,
help='model weight file name')
parser.add_argument('--num_workers', default=8, type=int, help='number of workers')
parser.add_argument('--datasets_dir', default='./data', type=str, help='path to dataset')
parser.add_argument('--save_dir', default='./result',
type=str, help='path to models saving')
parser.add_argument('--dataset', default='SOTS', type=str, help='dataset name')
parser.add_argument('--subset', default='outdoor', type=str, help='subset')
parser.add_argument('--mode', default='valid', type=str, help='dataset mode')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def single(save_dir):
state_dict = torch.load(save_dir, map_location=torch.device(device))
# print(state_dict)
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def test(test_loader, network, result_dir):
PSNR = AverageMeter()
SSIM = AverageMeter()
torch.cuda.empty_cache()
network.eval()
os.makedirs(os.path.join(result_dir, 'imgs'), exist_ok=True)
f_result = open(os.path.join(result_dir, 'results.csv'), 'w')
for idx, batch in enumerate(test_loader):
input = batch['source'].to(device)
target = batch['target'].to(device)
filename = batch['filename'][0]
with torch.no_grad():
output = network(input).clamp_(-1, 1)
# [-1, 1] to [0, 1]
output = output * 0.5 + 0.5
target = target * 0.5 + 0.5
psnr_val = 10 * torch.log10(1 / F.mse_loss(output, target)).item()
_, _, H, W = output.size()
down_ratio = max(1, round(min(H, W) / 256))
ssim_val = ssim(F.adaptive_avg_pool2d(output, (int(H / down_ratio), int(W / down_ratio))),
F.adaptive_avg_pool2d(target, (int(H / down_ratio), int(W / down_ratio))),
data_range=1, size_average=False).item()
PSNR.update(psnr_val)
SSIM.update(ssim_val)
print('Test: [{0}]\t'
'PSNR: {psnr.val:.02f} ({psnr.avg:.02f})\t'
'SSIM: {ssim.val:.03f} ({ssim.avg:.03f})'
.format(idx, psnr=PSNR, ssim=SSIM))
f_result.write('%s,%.02f,%.03f\n' % (filename, psnr_val, ssim_val))
out_img = chw_to_hwc(output.detach().cpu().squeeze(0).numpy())
write_img(os.path.join(result_dir, 'imgs', filename), out_img)
f_result.write('Avg_all,%.02f,%.03f\n' % (PSNR.avg, SSIM.avg))
f_result.close()
if __name__ == '__main__':
# load model
network = eval(args.model.replace('-', '_'))()
network.to(device)
# load pre-trained weight
if os.path.exists(args.model_weight):
print('==> Start testing, current model name: ' + args.model)
network.load_state_dict(single(args.model_weight), strict=False)
else:
print('==> No existing trained model!')
exit(0)
# load dataset
test_dataset = PairLoader(args.datasets_dir, os.path.join(args.dataset, args.subset), args.mode)
test_loader = DataLoader(test_dataset,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True)
result_dir = os.path.join(args.save_dir, 'test_result', args.model, args.dataset, args.subset)
# begin test
test(test_loader, network, result_dir)
| 4,023 | 33.689655 | 102 | py |
LKD-Net | LKD-Net-main/train.py | import os
import argparse
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
import logging
import time
import shutil
import torch.backends.cudnn as cudnn
from utils import AverageMeter
from datasets.loader import PairLoader
from utils.utils import create_logger, summary_model, \
save_checkpoint, resume_checkpoint, save_model, \
set_seed_torch
from models import *
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='LKD-t', type=str, help='model')
parser.add_argument('--model_name', default='LKD.py', type=str, help='model name')
parser.add_argument('--num_workers', default=8, type=int, help='number of workers')
parser.add_argument('--no_autocast', action='store_false', default=True, help='disable autocast')
parser.add_argument('--save_dir', default='./result', type=str,
help='path to models saving')
parser.add_argument('--resume_checkpoint', default=True, type=bool,
help='resume checkpoint')
# dataset config
parser.add_argument('--datasets_dir', default='./data', type=str, help='path to datasets dir')
parser.add_argument('--train_dataset', default='ITS', type=str, help='train dataset name')
parser.add_argument('--valid_dataset', default='SOTS', type=str, help='valid dataset name')
parser.add_argument('--exp_config', default='indoor', type=str, help='experiment configuration')
parser.add_argument('--exp_name', default='test', type=str, help='experiment name')
parser.add_argument('--gpu', default='0', type=str, help='GPUs used for training')
parser.add_argument('--cudnn_BENCHMARK', default=True)
parser.add_argument('--cudnn_DETERMINISTIC', default=False)
parser.add_argument('--cudnn_ENABLED', default=True)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
def train(train_loader, network, criterion, optimizer, scaler):
losses = AverageMeter()
batch_time = AverageMeter()
torch.cuda.empty_cache()
network.train()
pbar = tqdm(desc="Epoch[{0}]".format(epoch), total=len(train_loader), leave=True,
ncols=160)
end = time.time()
for batch in train_loader:
source_img = batch['source'].cuda()
target_img = batch['target'].cuda()
with autocast(args.no_autocast):
output = network(source_img)
loss1 = criterion[0](output, target_img)
# loss2 = criterion[1](output, target_img)
loss = loss1
losses.update(loss.item())
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
pbar.set_postfix(Speed="{:.1f} samples/s".format(output.size(0) / batch_time.val),
Loss="{:.5f}".format(loss))
pbar.update()
pbar.close()
return losses.avg
def valid(val_loader, network):
losses = AverageMeter()
PSNR = AverageMeter()
torch.cuda.empty_cache()
network.eval()
end = time.time()
# init progress bar
pbar = tqdm(desc="Testing", total=len(val_loader), leave=True, ncols=160)
for batch in val_loader:
source_img = batch['source'].cuda()
target_img = batch['target'].cuda()
with torch.no_grad(): # torch.no_grad() may cause warning
output = network(source_img)
loss1 = criterion[0](output, target_img)
# loss2 = criterion[1](output, target_img)
loss = loss1
losses.update(loss.item())
mse_loss = F.mse_loss(output * 0.5 + 0.5, target_img * 0.5 + 0.5, reduction='none').mean((1, 2, 3))
# mse_loss = F.mse_loss(output, target_img, reduction='none').mean((1, 2, 3))
psnr = 10 * torch.log10(1 / mse_loss).mean()
pbar.set_postfix(PSNR="{:.2f}db".format(psnr))
pbar.update()
PSNR.update(psnr.item(), source_img.size(0))
pbar.close()
return losses.avg, PSNR.avg
def setup_cudnn(config):
cudnn.benchmark = config.cudnn_BENCHMARK
torch.backends.cudnn.deterministic = config.cudnn_DETERMINISTIC
torch.backends.cudnn.enabled = config.cudnn_ENABLED
if __name__ == '__main__':
setting_filename = os.path.join('configs', args.exp_config, args.model + '.json')
with open(setting_filename, 'r') as f:
setting = json.load(f)
# set random seed
set_seed_torch()
setup_cudnn(args)
# Create logger
final_output_dir = os.path.join(args.save_dir, args.train_dataset, args.exp_name)
create_logger(final_output_dir)
# build network
network = eval(args.model.replace('-', '_'))()
# copy config file
shutil.copy2(
setting_filename,
final_output_dir
)
# copy model file
summary_model(network, args.model_name, final_output_dir, [256, 256])
network = nn.DataParallel(network).cuda()
# build criterion
criterion = []
criterion.append(nn.L1Loss().cuda())
# build optimizer
if setting['optimizer'] == 'adam':
optimizer = torch.optim.Adam(network.parameters(), lr=setting['lr'])
elif setting['optimizer'] == 'adamw':
optimizer = torch.optim.AdamW(network.parameters(), lr=setting['lr'])
else:
raise Exception("ERROR: unsupported optimizer")
# resume checkpoint
best_psnr, begin_epoch = resume_checkpoint(network, optimizer, args, final_output_dir, True)
# build scheduler
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=setting['epochs'],
eta_min=setting['lr'] * 1e-2, last_epoch=begin_epoch - 1)
# build scaler
scaler = GradScaler()
# build dataloader
train_dataset = PairLoader(args.datasets_dir, args.train_dataset, 'train',
setting['patch_size'], setting['only_h_flip'])
val_dataset = PairLoader(args.datasets_dir, os.path.join(args.valid_dataset, args.exp_config), 'valid',
setting['valid_mode'], setting['patch_size'])
train_loader = DataLoader(train_dataset,
batch_size=setting['batch_size'],
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(val_dataset,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True)
# init SummaryWriter
writer = SummaryWriter(log_dir=final_output_dir)
# begin epoch
logging.info('=> start training')
for epoch in range(begin_epoch, setting['epochs'] + 1):
head = 'Epoch[{}]:'.format(epoch)
logging.info('=> {} train start'.format(head))
lr = scheduler.get_last_lr()[0]
logging.info(f'=> lr: {lr}')
start = time.time()
train_loss = train(train_loader, network, criterion, optimizer, scaler)
writer.add_scalars('Loss', {'train Loss': train_loss}, epoch)
msg = '=> Train:\t' \
'Loss {:.4f}\t'.format(train_loss)
logging.info(msg)
logging.info('=> {} train end, duration: {:.2f}s'.format(head, time.time() - start))
scheduler.step(epoch=epoch + 1)
save_checkpoint(model=network, model_name=args.model.replace('-', '_'), optimizer=optimizer,
output_dir=final_output_dir, in_epoch=True, epoch_or_step=epoch, best_perf=best_psnr)
if epoch % setting['eval_freq'] == 0:
logging.info('=> {} validate start'.format(head))
val_start = time.time()
valid_loss, avg_psnr = valid(val_loader, network)
writer.add_scalars('Loss', {'valid Loss': valid_loss}, epoch)
msg = '=> Valid:\t' \
'Loss {:.4f}\t' \
'PSNR {:.2f}\t'.format(valid_loss, avg_psnr)
logging.info(msg)
logging.info('=> {} validate end, duration: {:.2f}s'.format(head, time.time() - val_start))
writer.add_scalar('valid_psnr', avg_psnr, epoch)
if avg_psnr > best_psnr:
best_psnr = avg_psnr
save_model(network, final_output_dir, 'best_model.pth')
writer.add_scalar('best_psnr', best_psnr, epoch)
writer.close()
save_model(network, final_output_dir, 'final_model.pth')
logging.info('=> finish training')
logging.info("=> Highest PSNR:{:.2f}".format(best_psnr))
| 8,740 | 35.881857 | 116 | py |
LKD-Net | LKD-Net-main/models/LKD.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.nn.init import _calculate_fan_in_and_fan_out
from timm.models.layers import trunc_normal_
class LayerNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super(LayerNorm, self).__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones((1, dim, 1, 1)))
self.bias = nn.Parameter(torch.zeros((1, dim, 1, 1)))
def forward(self, input):
mean = torch.mean(input, dim=(1, 2, 3), keepdim=True)
std = torch.sqrt((input - mean).pow(2).mean(dim=(1, 2, 3), keepdim=True) + self.eps)
normalized_input = (input - mean) / std
out = normalized_input * self.weight + self.bias
return out
class Mlp(nn.Module):
def __init__(self, network_depth, in_features, hidden_features=None, out_features=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.network_depth = network_depth
self.mlp = nn.Sequential(
nn.Conv2d(in_features, hidden_features, 1),
nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features),
nn.ReLU(True),
nn.Conv2d(hidden_features, out_features, 1)
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
gain = (8 * self.network_depth) ** (-1 / 4)
fan_in, fan_out = _calculate_fan_in_and_fan_out(m.weight)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
trunc_normal_(m.weight, std=std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
return self.mlp(x)
class CEFN(nn.Module):
def __init__(self, dim, network_depth, hidden_features=None, out_features=None):
super(CEFN, self).__init__()
self.mlp = Mlp(network_depth=network_depth, in_features=dim, hidden_features=hidden_features,
out_features=out_features)
self.norm = LayerNorm(dim, eps=1e-5)
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(dim, dim // 8, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim // 8, dim, 1, padding=0, bias=True),
nn.Sigmoid()
)
self.scaler = nn.Parameter(torch.ones(dim, 1, 1))
def forward(self, x):
attn = self.scaler * self.ca(x)
x = self.norm(self.mlp(x))
return x * attn
class LKDBlock(nn.Module):
def __init__(self, network_depth, dim, mlp_ratio=4.):
super().__init__()
# DLKCB
self.norm1 = nn.BatchNorm2d(dim)
self.Linear1 = nn.Conv2d(dim, dim, 1)
self.DWConv = nn.Conv2d(dim, dim, 5, padding=2, groups=dim, padding_mode='reflect')
self.DWDConv = nn.Conv2d(dim, dim, 7, stride=1, padding=9, groups=dim, dilation=3, padding_mode='reflect')
self.Linear2 = nn.Conv2d(dim, dim, 1)
# CEFN
self.norm2 = nn.BatchNorm2d(dim)
self.cemlp = CEFN(network_depth=network_depth, dim=dim, hidden_features=int(mlp_ratio) * dim, out_features=dim)
def forward(self, x):
identity = x
x = self.norm1(x)
x = self.Linear1(x)
x = self.DWConv(x)
x = self.DWDConv(x)
x = self.Linear2(x) + identity
identity = x
x = self.norm2(x)
x = self.cemlp(x) + identity
return x
class LKDBlocks(nn.Module):
def __init__(self, network_depth, dim, depth, mlp_ratio=4.):
super().__init__()
self.dim = dim
self.depth = depth
# build blocks
self.blocks = nn.ModuleList([
LKDBlock(network_depth=network_depth,
dim=dim,
mlp_ratio=mlp_ratio)
for i in range(depth)])
def forward(self, x):
for blk in self.blocks:
x = blk(x)
return x
class PatchEmbed(nn.Module):
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, kernel_size=None):
super().__init__()
self.in_chans = in_chans
self.embed_dim = embed_dim
if kernel_size is None:
kernel_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=patch_size,
padding=(kernel_size - patch_size + 1) // 2, padding_mode='reflect')
def forward(self, x):
x = self.proj(x)
return x
class PatchUnEmbed(nn.Module):
def __init__(self, patch_size=4, out_chans=3, embed_dim=96, kernel_size=None):
super().__init__()
self.out_chans = out_chans
self.embed_dim = embed_dim
if kernel_size is None:
kernel_size = 1
self.proj = nn.Sequential(
nn.Conv2d(embed_dim, out_chans * patch_size ** 2, kernel_size=kernel_size,
padding=kernel_size // 2, padding_mode='reflect'),
nn.PixelShuffle(patch_size)
)
def forward(self, x):
x = self.proj(x)
return x
class SKFusion(nn.Module):
def __init__(self, dim, height=2, reduction=8):
super(SKFusion, self).__init__()
self.height = height
d = max(int(dim / reduction), 4)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Conv2d(dim, d, 1, bias=False),
nn.ReLU(),
nn.Conv2d(d, dim * height, 1, bias=False)
)
self.softmax = nn.Softmax(dim=1)
def forward(self, in_feats):
B, C, H, W = in_feats[0].shape
in_feats = torch.cat(in_feats, dim=1)
in_feats = in_feats.view(B, self.height, C, H, W)
feats_sum = torch.sum(in_feats, dim=1)
attn = self.mlp(self.avg_pool(feats_sum))
attn = self.softmax(attn.view(B, self.height, C, 1, 1))
out = torch.sum(in_feats * attn, dim=1)
return out
class Dehaze(nn.Module):
def __init__(self, in_chans=3, out_chans=4,
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[16, 16, 16, 8, 8], ):
super(Dehaze, self).__init__()
self.patch_size = 4
self.mlp_ratios = mlp_ratios
self.patch_embed = PatchEmbed(
patch_size=1, in_chans=in_chans, embed_dim=embed_dims[0], kernel_size=3)
# backbone
self.layer1 = LKDBlocks(network_depth=sum(depths), dim=embed_dims[0], depth=depths[0], mlp_ratio=mlp_ratios[0])
self.patch_merge1 = PatchEmbed(
patch_size=2, in_chans=embed_dims[0], embed_dim=embed_dims[1])
self.skip1 = nn.Conv2d(embed_dims[0], embed_dims[0], 1)
self.layer2 = LKDBlocks(network_depth=sum(depths), dim=embed_dims[1], depth=depths[1], mlp_ratio=mlp_ratios[1])
self.patch_merge2 = PatchEmbed(
patch_size=2, in_chans=embed_dims[1], embed_dim=embed_dims[2])
self.skip2 = nn.Conv2d(embed_dims[1], embed_dims[1], 1)
self.layer3 = LKDBlocks(network_depth=sum(depths), dim=embed_dims[2], depth=depths[2], mlp_ratio=mlp_ratios[2])
self.patch_split1 = PatchUnEmbed(
patch_size=2, out_chans=embed_dims[3], embed_dim=embed_dims[2])
assert embed_dims[1] == embed_dims[3]
self.fusion1 = SKFusion(embed_dims[3])
self.layer4 = LKDBlocks(network_depth=sum(depths), dim=embed_dims[3], depth=depths[3], mlp_ratio=mlp_ratios[3])
self.patch_split2 = PatchUnEmbed(
patch_size=2, out_chans=embed_dims[4], embed_dim=embed_dims[3])
assert embed_dims[0] == embed_dims[4]
self.fusion2 = SKFusion(embed_dims[4])
self.layer5 = LKDBlocks(network_depth=sum(depths), dim=embed_dims[4], depth=depths[4], mlp_ratio=mlp_ratios[4])
self.patch_unembed = PatchUnEmbed(
patch_size=1, out_chans=out_chans, embed_dim=embed_dims[4], kernel_size=3)
def check_image_size(self, x):
_, _, h, w = x.size()
patch_size = self.patch_size
mod_pad_h = (patch_size - h % patch_size) % patch_size
mod_pad_w = (patch_size - w % patch_size) % patch_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_features(self, x):
x = self.patch_embed(x)
x = self.layer1(x)
skip1 = x
x = self.patch_merge1(x)
x = self.layer2(x)
skip2 = x
x = self.patch_merge2(x)
x = self.layer3(x)
x = self.patch_split1(x)
x = self.fusion1([x, self.skip2(skip2)]) + x
x = self.layer4(x)
x = self.patch_split2(x)
x = self.fusion2([x, self.skip1(skip1)]) + x
x = self.layer5(x)
x = self.patch_unembed(x)
return x
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
feat = self.forward_features(x)
K, B = torch.split(feat, (1, 3), dim=1)
x = K * x - B + x
x = x[:, :, :H, :W]
return x
def LKD_t():
return Dehaze(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[4., 4., 4., 4., 4.],
depths=[1, 1, 2, 1, 1],
)
def LKD_s():
return Dehaze(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[4., 4., 4., 4., 4.],
depths=[2, 2, 4, 2, 2],
)
def LKD_b():
return Dehaze(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[4., 4., 4., 4., 4.],
depths=[4, 4, 8, 4, 4],
)
def LKD_l():
return Dehaze(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[4., 4., 4., 4., 4.],
depths=[8, 8, 16, 8, 8],
)
| 9,738 | 30.115016 | 119 | py |
LKD-Net | LKD-Net-main/datasets/loader.py | import os
import random
import numpy as np
import cv2
from torch.utils.data import Dataset
from utils import hwc_to_chw, read_img
import torchvision.transforms as tfs
def augment(imgs=[], size=256, edge_decay=0., only_h_flip=False):
H, W, _ = imgs[0].shape
Hc, Wc = [size, size]
# simple re-weight for the edge
if random.random() < Hc / H * edge_decay:
Hs = 0 if random.randint(0, 1) == 0 else H - Hc
else:
Hs = random.randint(0, H - Hc)
if random.random() < Wc / W * edge_decay:
Ws = 0 if random.randint(0, 1) == 0 else W - Wc
else:
Ws = random.randint(0, W - Wc)
for i in range(len(imgs)):
imgs[i] = imgs[i][Hs:(Hs + Hc), Ws:(Ws + Wc), :]
# horizontal flip
if random.randint(0, 1) == 1:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=1)
if not only_h_flip:
# bad data augmentations for outdoor
rot_deg = random.randint(0, 3)
for i in range(len(imgs)):
imgs[i] = np.rot90(imgs[i], rot_deg, (0, 1))
return imgs
def align(imgs=[], size=256):
H, W, _ = imgs[0].shape
Hc, Wc = [size, size]
Hs = (H - Hc) // 2
Ws = (W - Wc) // 2
for i in range(len(imgs)):
imgs[i] = imgs[i][Hs:(Hs + Hc), Ws:(Ws + Wc), :]
return imgs
class PairLoader(Dataset):
def __init__(self, data_dir, dataset_name, mode, size=256, only_h_flip=False):
assert mode in ['train', 'valid', 'test']
self.data_dir = data_dir
self.dataset_name = dataset_name
self.mode = mode
self.size = size
self.edge_decay = 0
self.only_h_flip = only_h_flip
self.img_names = sorted(os.listdir(os.path.join(self.data_dir, dataset_name, 'hazy')))
# read exclude files
exclude_file = os.path.abspath(os.path.join('datasets','exclude_files', self.dataset_name + '_exclude_file.txt'))
if os.path.exists(exclude_file):
with open(exclude_file, 'r') as f:
exclude_filenames = eval(f.read())
# filter out exclude files
for exclude_filename in exclude_filenames:
if exclude_filename in self.img_names:
self.img_names.remove(exclude_filename)
self.gt_names = sorted(os.listdir(os.path.join(self.data_dir, dataset_name, 'gt')))
self.img_num = len(self.img_names)
def __len__(self):
return self.img_num
def __getitem__(self, idx):
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
hazy_name = self.img_names[idx]
if self.dataset_name == 'ITS':
gt_name = hazy_name.split('_')[0] + '.png'
elif self.dataset_name == 'OTS':
gt_name = hazy_name.split('_')[0] + '.jpg'
elif self.dataset_name == 'SOTS/indoor' or self.dataset_name == 'SOTS/outdoor':
gt_name = hazy_name.split('_')[0] + '.png'
else:
gt_name = self.gt_names[idx]
source_img = read_img(os.path.join(self.data_dir, self.dataset_name, 'hazy', hazy_name))
target_img = read_img(os.path.join(self.data_dir, self.dataset_name, 'gt', gt_name))
# scale [0, 1] to [-1, 1]
source_img = source_img * 2 - 1
target_img = target_img * 2 - 1
if self.mode == 'train':
[source_img, target_img] = augment([source_img, target_img], self.size, self.edge_decay, self.only_h_flip)
return {'source': hwc_to_chw(source_img), 'target': hwc_to_chw(target_img), 'filename': hazy_name}
| 3,568 | 31.445455 | 122 | py |
LKD-Net | LKD-Net-main/utils/data_parallel.py | from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if len(self.device_ids) == 1:
inputs, kwargs = super().scatter(inputs, kwargs, self.device_ids)
return self.module(*inputs[0], **kwargs[0])
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if self.gpu0_bsz == 0:
replicas = self.replicate(self.module, self.device_ids)
else:
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# replicas = self.replicate(self.module, device_ids[:len(inputs)])
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids[:len(inputs)])
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim) | 4,069 | 38.514563 | 84 | py |
LKD-Net | LKD-Net-main/utils/utils.py | from pathlib import Path
import os
import logging
import shutil
import time
import torch
from thop import profile
from fvcore.nn import FlopCountAnalysis
import numpy as np
def setup_logger(final_output_dir, phase):
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}.txt'.format(phase, time_str)
final_log_file = os.path.join(final_output_dir, log_file)
head = '%(asctime)-15s:[P:%(process)d]:' + ' %(message)s'
logging.basicConfig(
filename=str(final_log_file), format=head
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter(head)
)
logging.getLogger('').addHandler(console)
def create_logger(final_output_dir, phase='train'):
final_output_dir = Path(final_output_dir)
print('=> creating {} ...'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
print('=> setup logger ...')
setup_logger(final_output_dir, phase)
def set_seed_torch(seed=2022):
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def summary_model(model, model_name, output_dir, image_size=(256, 256)):
this_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# copy model file
shutil.copy2(
os.path.join(this_dir, 'models', model_name),
output_dir
)
try:
logging.info('== get_model_complexity_info by thop and fvcore ==')
input = torch.randn(1, 3, image_size[0], image_size[1])
flops = FlopCountAnalysis(model, input)
_, params = profile(model, inputs=(input,))
flops = flops.total() / 1e9
params = params / 1e6
logging.info(f'=> FLOPs: {flops:<8}G, params: {params:<8}M')
logging.info('== get_model_complexity_info by thop and fvcore ==')
except Exception:
logging.error('=> error when run get_model_complexity_info')
def resume_checkpoint(model,
optimizer,
config,
output_dir,
in_epoch):
best_perf = 0.0
begin_epoch_or_step = 0
checkpoint = os.path.join(output_dir, 'checkpoint.pth')
if config.resume_checkpoint and os.path.exists(checkpoint):
logging.info(
"=> loading checkpoint '{}'".format(checkpoint)
)
checkpoint_dict = torch.load(checkpoint, map_location='cpu')
best_perf = checkpoint_dict['perf']
begin_epoch_or_step = checkpoint_dict['epoch' if in_epoch else 'step']
state_dict = checkpoint_dict['state_dict']
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint_dict['optimizer'])
logging.info(
"=> loaded checkpoint '{}' ({}: {})".format(checkpoint, 'epoch' if in_epoch else 'step',
begin_epoch_or_step)
)
return best_perf, begin_epoch_or_step
def save_checkpoint(model,
*,
model_name,
optimizer,
output_dir,
in_epoch,
epoch_or_step,
best_perf):
states = model.state_dict()
logging.info('=> saving checkpoint to {}'.format(output_dir))
save_dict = {
'epoch' if in_epoch else 'step': epoch_or_step + 1,
'model': model_name,
'state_dict': states,
'perf': best_perf,
'optimizer': optimizer.state_dict(),
}
try:
torch.save(save_dict, os.path.join(output_dir, 'checkpoint.pth'))
except Exception:
logging.error('=> error when saving checkpoint!')
def save_model(model, out_dir, fname):
try:
fname_full = os.path.join(out_dir, fname)
logging.info(f'=> save model to {fname_full}')
torch.save(
model.state_dict(),
fname_full
)
except Exception:
logging.error('=> error when saving checkpoint!')
| 4,149 | 30.923077 | 100 | py |
UA-MT | UA-MT-master/code/train_LA.py | import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import argparse
import logging
import time
import random
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from networks.vnet import VNet
from utils.losses import dice_loss
from dataloaders.la_heart import LAHeart, RandomCrop, CenterCrop, RandomRotFlip, ToTensor, TwoStreamBatchSampler
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str, default='vnet_supervisedonly_dp', help='model_name')
parser.add_argument('--max_iterations', type=int, default=6000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument('--base_lr', type=float, default=0.01, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
args = parser.parse_args()
train_data_path = args.root_path
snapshot_path = "../model/" + args.exp + "/"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
batch_size = args.batch_size * len(args.gpu.split(','))
max_iterations = args.max_iterations
base_lr = args.base_lr
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
patch_size = (112, 112, 80)
num_classes = 2
if __name__ == "__main__":
## make logger file
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code', shutil.ignore_patterns(['.git','__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
net = net.cuda()
db_train = LAHeart(base_dir=train_data_path,
split='train',
num=16,
transform = transforms.Compose([
RandomRotFlip(),
RandomCrop(patch_size),
ToTensor(),
]))
db_test = LAHeart(base_dir=train_data_path,
split='test',
transform = transforms.Compose([
CenterCrop(patch_size),
ToTensor()
]))
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
net.train()
optimizer = optim.SGD(net.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path+'/log')
logging.info("{} itertations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations//len(trainloader)+1
lr_ = base_lr
net.train()
for epoch_num in tqdm(range(max_epoch), ncols=70):
time1 = time.time()
for i_batch, sampled_batch in enumerate(trainloader):
time2 = time.time()
# print('fetch data cost {}'.format(time2-time1))
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs = net(volume_batch)
loss_seg = F.cross_entropy(outputs, label_batch)
outputs_soft = F.softmax(outputs, dim=1)
loss_seg_dice = dice_loss(outputs_soft[:, 1, :, :, :], label_batch == 1)
loss = 0.5*(loss_seg+loss_seg_dice)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_num = iter_num + 1
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
writer.add_scalar('loss/loss', loss, iter_num)
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3,0,1,2).repeat(1,3,1,1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
outputs_soft = F.softmax(outputs, 1)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label', grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label', grid_image, iter_num)
## change lr
if iter_num % 2500 == 0:
lr_ = base_lr * 0.1 ** (iter_num // 2500)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
if iter_num % 1000 == 0:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(net.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num > max_iterations:
break
time1 = time.time()
if iter_num > max_iterations:
break
save_mode_path = os.path.join(snapshot_path, 'iter_'+str(max_iterations+1)+'.pth')
torch.save(net.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
writer.close()
| 6,569 | 41.115385 | 139 | py |
UA-MT | UA-MT-master/code/test_util.py | import h5py
import math
import nibabel as nib
import numpy as np
from medpy import metric
import torch
import torch.nn.functional as F
from tqdm import tqdm
def test_all_case(net, image_list, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4, save_result=True, test_save_path=None, preproc_fn=None):
total_metric = 0.0
for image_path in tqdm(image_list):
id = image_path.split('/')[-1]
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
if preproc_fn is not None:
image = preproc_fn(image)
prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
if np.sum(prediction)==0:
single_metric = (0,0,0,0)
else:
single_metric = calculate_metric_percase(prediction, label[:])
total_metric += np.asarray(single_metric)
if save_result:
nib.save(nib.Nifti1Image(prediction.astype(np.float32), np.eye(4)), test_save_path + id + "_pred.nii.gz")
nib.save(nib.Nifti1Image(image[:].astype(np.float32), np.eye(4)), test_save_path + id + "_img.nii.gz")
nib.save(nib.Nifti1Image(label[:].astype(np.float32), np.eye(4)), test_save_path + id + "_gt.nii.gz")
avg_metric = total_metric / len(image_list)
print('average metric is {}'.format(avg_metric))
return avg_metric
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2,w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2,h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2,d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad,wr_pad),(hl_pad,hr_pad), (dl_pad, dr_pad)], mode='constant', constant_values=0)
ww,hh,dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y,hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(test_patch,axis=0),axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
y1 = net(test_patch)
y = F.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0,:,:,:,:]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt,axis=0)
label_map = np.argmax(score_map, axis = 0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,hl_pad:hl_pad+h,dl_pad:dl_pad+d]
score_map = score_map[:,wl_pad:wl_pad+w,hl_pad:hl_pad+h,dl_pad:dl_pad+d]
return label_map, score_map
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction==i)
label_tmp = (label==i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / (np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
dice = metric.binary.dc(pred, gt)
jc = metric.binary.jc(pred, gt)
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return dice, jc, hd, asd
| 4,596 | 38.290598 | 157 | py |
UA-MT | UA-MT-master/code/train_LA_meanteacher_certainty.py | import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import argparse
import logging
import time
import random
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from networks.vnet import VNet
from dataloaders import utils
from utils import ramps, losses
from dataloaders.la_heart import LAHeart, RandomCrop, CenterCrop, RandomRotFlip, ToTensor, TwoStreamBatchSampler
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str, default='UAMT', help='model_name')
parser.add_argument('--max_iterations', type=int, default=6000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument('--labeled_bs', type=int, default=2, help='labeled_batch_size per gpu')
parser.add_argument('--base_lr', type=float, default=0.01, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
### costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str, default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float, default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float, default=40.0, help='consistency_rampup')
args = parser.parse_args()
train_data_path = args.root_path
snapshot_path = "../model/" + args.exp + "/"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
batch_size = args.batch_size * len(args.gpu.split(','))
max_iterations = args.max_iterations
base_lr = args.base_lr
labeled_bs = args.labeled_bs
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
num_classes = 2
patch_size = (112, 112, 80)
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
if __name__ == "__main__":
## make logger file
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code', shutil.ignore_patterns(['.git','__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
def create_model(ema=False):
# Network definition
net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = LAHeart(base_dir=train_data_path,
split='train',
transform = transforms.Compose([
RandomRotFlip(),
RandomCrop(patch_size),
ToTensor(),
]))
db_test = LAHeart(base_dir=train_data_path,
split='test',
transform = transforms.Compose([
CenterCrop(patch_size),
ToTensor()
]))
labeled_idxs = list(range(16))
unlabeled_idxs = list(range(16, 80))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size-labeled_bs)
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler, num_workers=4, pin_memory=True,worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
if args.consistency_type == 'mse':
consistency_criterion = losses.softmax_mse_loss
elif args.consistency_type == 'kl':
consistency_criterion = losses.softmax_kl_loss
else:
assert False, args.consistency_type
writer = SummaryWriter(snapshot_path+'/log')
logging.info("{} itertations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations//len(trainloader)+1
lr_ = base_lr
model.train()
for epoch_num in tqdm(range(max_epoch), ncols=70):
time1 = time.time()
for i_batch, sampled_batch in enumerate(trainloader):
time2 = time.time()
# print('fetch data cost {}'.format(time2-time1))
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
noise = torch.clamp(torch.randn_like(volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = volume_batch + noise
outputs = model(volume_batch)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
T = 8
volume_batch_r = volume_batch.repeat(2, 1, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, 2, 112, 112, 80]).cuda()
for i in range(T//2):
ema_inputs = volume_batch_r + torch.clamp(torch.randn_like(volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
preds[2 * stride * i:2 * stride * (i + 1)] = ema_model(ema_inputs)
preds = F.softmax(preds, dim=1)
preds = preds.reshape(T, stride, 2, 112, 112, 80)
preds = torch.mean(preds, dim=0) #(batch, 2, 112,112,80)
uncertainty = -1.0*torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True) #(batch, 1, 112,112,80)
## calculate the loss
loss_seg = F.cross_entropy(outputs[:labeled_bs], label_batch[:labeled_bs])
outputs_soft = F.softmax(outputs, dim=1)
loss_seg_dice = losses.dice_loss(outputs_soft[:labeled_bs, 1, :, :, :], label_batch[:labeled_bs] == 1)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist = consistency_criterion(outputs, ema_output) #(batch, 2, 112,112,80)
threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num, max_iterations))*np.log(2)
mask = (uncertainty<threshold).float()
consistency_dist = torch.sum(mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
consistency_loss = consistency_weight * consistency_dist
loss = 0.5*(loss_seg+loss_seg_dice) + consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
iter_num = iter_num + 1
writer.add_scalar('uncertainty/mean', uncertainty[0,0].mean(), iter_num)
writer.add_scalar('uncertainty/max', uncertainty[0,0].max(), iter_num)
writer.add_scalar('uncertainty/min', uncertainty[0,0].min(), iter_num)
writer.add_scalar('uncertainty/mask_per', torch.sum(mask)/mask.numel(), iter_num)
writer.add_scalar('uncertainty/threshold', threshold, iter_num)
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar('loss/loss', loss, iter_num)
writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
writer.add_scalar('train/consistency_loss', consistency_loss, iter_num)
writer.add_scalar('train/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('train/consistency_dist', consistency_dist, iter_num)
logging.info('iteration %d : loss : %f cons_dist: %f, loss_weight: %f' %
(iter_num, loss.item(), consistency_dist.item(), consistency_weight))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
# image = outputs_soft[0, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
image = torch.max(outputs_soft[0, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
image = utils.decode_seg_map_sequence(image)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label', grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].permute(2, 0, 1)
grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
writer.add_image('train/Groundtruth_label', grid_image, iter_num)
image = uncertainty[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/uncertainty', grid_image, iter_num)
mask2 = (uncertainty > threshold).float()
image = mask2[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/mask', grid_image, iter_num)
#####
image = volume_batch[-1, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('unlabel/Image', grid_image, iter_num)
# image = outputs_soft[-1, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
image = torch.max(outputs_soft[-1, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
image = utils.decode_seg_map_sequence(image)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('unlabel/Predicted_label', grid_image, iter_num)
image = label_batch[-1, :, :, 20:61:10].permute(2, 0, 1)
grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
writer.add_image('unlabel/Groundtruth_label', grid_image, iter_num)
## change lr
if iter_num % 2500 == 0:
lr_ = base_lr * 0.1 ** (iter_num // 2500)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
if iter_num % 1000 == 0:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
break
save_mode_path = os.path.join(snapshot_path, 'iter_'+str(max_iterations)+'.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
writer.close()
| 12,308 | 47.652174 | 129 | py |
UA-MT | UA-MT-master/code/train_LA_meanteacher_certainty_unlabel.py | import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import argparse
import logging
import time
import random
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from networks.vnet import VNet
from dataloaders import utils
from utils import ramps, losses
from dataloaders.la_heart import LAHeart, RandomCrop, CenterCrop, RandomRotFlip, ToTensor, TwoStreamBatchSampler
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str, default='UAMT_unlabel', help='model_name')
parser.add_argument('--max_iterations', type=int, default=6000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument('--labeled_bs', type=int, default=2, help='labeled_batch_size per gpu')
parser.add_argument('--base_lr', type=float, default=0.01, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
### costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str, default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float, default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float, default=40.0, help='consistency_rampup')
args = parser.parse_args()
train_data_path = args.root_path
snapshot_path = "../model/" + args.exp + "/"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
batch_size = args.batch_size * len(args.gpu.split(','))
max_iterations = args.max_iterations
base_lr = args.base_lr
labeled_bs = args.labeled_bs
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
num_classes = 2
patch_size = (112, 112, 80)
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
if __name__ == "__main__":
## make logger file
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code', shutil.ignore_patterns(['.git','__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
def create_model(ema=False):
# Network definition
net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = LAHeart(base_dir=train_data_path,
split='train',
transform = transforms.Compose([
RandomRotFlip(),
RandomCrop(patch_size),
ToTensor(),
]))
db_test = LAHeart(base_dir=train_data_path,
split='test',
transform = transforms.Compose([
CenterCrop(patch_size),
ToTensor()
]))
labeled_idxs = list(range(16))
unlabeled_idxs = list(range(16, 80))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size-labeled_bs)
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
if args.consistency_type == 'mse':
consistency_criterion = losses.softmax_mse_loss
elif args.consistency_type == 'kl':
consistency_criterion = losses.softmax_kl_loss
else:
assert False, args.consistency_type
writer = SummaryWriter(snapshot_path+'/log')
logging.info("{} itertations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations//len(trainloader)+1
lr_ = base_lr
model.train()
for epoch_num in tqdm(range(max_epoch), ncols=70):
time1 = time.time()
for i_batch, sampled_batch in enumerate(trainloader):
time2 = time.time()
# print('fetch data cost {}'.format(time2-time1))
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[labeled_bs:]
noise = torch.clamp(torch.randn_like(unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
T = 8
volume_batch_r = unlabeled_volume_batch.repeat(2, 1, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, 2, 112, 112, 80]).cuda()
for i in range(T//2):
ema_inputs = volume_batch_r + torch.clamp(torch.randn_like(volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
preds[2 * stride * i:2 * stride * (i + 1)] = ema_model(ema_inputs)
preds = F.softmax(preds, dim=1)
preds = preds.reshape(T, stride, 2, 112, 112, 80)
preds = torch.mean(preds, dim=0) #(batch, 2, 112,112,80)
uncertainty = -1.0*torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True) #(batch, 1, 112,112,80)
## calculate the loss
loss_seg = F.cross_entropy(outputs[:labeled_bs], label_batch[:labeled_bs])
outputs_soft = F.softmax(outputs, dim=1)
loss_seg_dice = losses.dice_loss(outputs_soft[:labeled_bs, 1, :, :, :], label_batch[:labeled_bs] == 1)
supervised_loss = 0.5*(loss_seg+loss_seg_dice)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist = consistency_criterion(outputs[labeled_bs:], ema_output) #(batch, 2, 112,112,80)
threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num, max_iterations))*np.log(2)
mask = (uncertainty<threshold).float()
consistency_dist = torch.sum(mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
consistency_loss = consistency_weight * consistency_dist
loss = supervised_loss + consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
iter_num = iter_num + 1
writer.add_scalar('uncertainty/mean', uncertainty[0,0].mean(), iter_num)
writer.add_scalar('uncertainty/max', uncertainty[0,0].max(), iter_num)
writer.add_scalar('uncertainty/min', uncertainty[0,0].min(), iter_num)
writer.add_scalar('uncertainty/mask_per', torch.sum(mask)/mask.numel(), iter_num)
writer.add_scalar('uncertainty/threshold', threshold, iter_num)
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar('loss/loss', loss, iter_num)
writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
writer.add_scalar('train/consistency_loss', consistency_loss, iter_num)
writer.add_scalar('train/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('train/consistency_dist', consistency_dist, iter_num)
logging.info('iteration %d : loss : %f cons_dist: %f, loss_weight: %f' %
(iter_num, loss.item(), consistency_dist.item(), consistency_weight))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
# image = outputs_soft[0, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
image = torch.max(outputs_soft[0, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
image = utils.decode_seg_map_sequence(image)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label', grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].permute(2, 0, 1)
grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
writer.add_image('train/Groundtruth_label', grid_image, iter_num)
image = uncertainty[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/uncertainty', grid_image, iter_num)
mask2 = (uncertainty > threshold).float()
image = mask2[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/mask', grid_image, iter_num)
#####
image = volume_batch[-1, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('unlabel/Image', grid_image, iter_num)
# image = outputs_soft[-1, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
image = torch.max(outputs_soft[-1, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
image = utils.decode_seg_map_sequence(image)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('unlabel/Predicted_label', grid_image, iter_num)
image = label_batch[-1, :, :, 20:61:10].permute(2, 0, 1)
grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
writer.add_image('unlabel/Groundtruth_label', grid_image, iter_num)
## change lr
if iter_num % 2500 == 0:
lr_ = base_lr * 0.1 ** (iter_num // 2500)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
if iter_num % 1000 == 0:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
break
save_mode_path = os.path.join(snapshot_path, 'iter_'+str(max_iterations)+'.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
writer.close()
| 12,471 | 47.529183 | 130 | py |
UA-MT | UA-MT-master/code/test_LA.py | import os
import argparse
import torch
from networks.vnet import VNet
from test_util import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--model', type=str, default='vnet_supervisedonly_dp', help='model_name')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
FLAGS = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
snapshot_path = "../model/"+FLAGS.model+"/"
test_save_path = "../model/prediction/"+FLAGS.model+"_post/"
if not os.path.exists(test_save_path):
os.makedirs(test_save_path)
num_classes = 2
with open(FLAGS.root_path + '/../test.list', 'r') as f:
image_list = f.readlines()
image_list = [FLAGS.root_path +item.replace('\n', '')+"/mri_norm2.h5" for item in image_list]
def test_calculate_metric(epoch_num):
net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=False).cuda()
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(epoch_num) + '.pth')
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
avg_metric = test_all_case(net, image_list, num_classes=num_classes,
patch_size=(112, 112, 80), stride_xy=18, stride_z=4,
save_result=True, test_save_path=test_save_path)
return avg_metric
if __name__ == '__main__':
metric = test_calculate_metric(6000)
print(metric) | 1,576 | 36.547619 | 115 | py |
UA-MT | UA-MT-master/code/networks/vnet.py | import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class VNet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False):
super(VNet, self).__init__()
self.has_dropout = has_dropout
self.block_one = ConvBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = ConvBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = ConvBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
return out
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
| 9,073 | 35.58871 | 110 | py |
UA-MT | UA-MT-master/code/dataloaders/utils.py | import os
import torch
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from skimage import measure
import scipy.ndimage as nd
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def get_cityscapes_labels():
return np.array([
# [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]])
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_seg_map_sequence(label_masks, dataset='pascal'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'pascal':
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == 'cityscapes':
n_classes = 19
label_colours = get_cityscapes_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
# for key, val in param.items():
# log_file.write(key + ':' + str(val) + '\n')
log_file.write(str(param))
log_file.close()
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)
if weight is None:
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
else:
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False)
loss = criterion(logit, target.long())
if size_average:
loss /= (h * w)
if batch_average:
loss /= n
return loss
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
def get_iou(pred, gt, n_classes=21):
total_iou = 0.0
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
intersect = [0] * n_classes
union = [0] * n_classes
for j in range(n_classes):
match = (pred_tmp == j) + (gt_tmp == j)
it = torch.sum(match == 2).item()
un = torch.sum(match > 0).item()
intersect[j] += it
union[j] += un
iou = []
for k in range(n_classes):
if union[k] == 0:
continue
iou.append(intersect[k] / union[k])
img_iou = (sum(iou) / len(iou))
total_iou += img_iou
return total_iou
def get_dice(pred, gt):
total_dice = 0.0
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
print(dice)
total_dice += dice
return total_dice
def get_mc_dice(pred, gt, num=2):
# num is the total number of classes, include the background
total_dice = np.zeros(num-1)
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
for j in range(1, num):
pred_tmp = (pred[i]==j)
gt_tmp = (gt[i]==j)
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
total_dice[j-1] +=dice
return total_dice
def post_processing(prediction):
prediction = nd.binary_fill_holes(prediction)
label_cc, num_cc = measure.label(prediction,return_num=True)
total_cc = np.sum(prediction)
measure.regionprops(label_cc)
for cc in range(1,num_cc+1):
single_cc = (label_cc==cc)
single_vol = np.sum(single_cc)
if single_vol/total_cc<0.2:
prediction[single_cc]=0
return prediction
| 6,729 | 30.302326 | 144 | py |
UA-MT | UA-MT-master/code/dataloaders/la_heart.py | import os
import torch
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
import itertools
from torch.utils.data.sampler import Sampler
class LAHeart(Dataset):
""" LA Dataset """
def __init__(self, base_dir=None, split='train', num=None, transform=None):
self._base_dir = base_dir
self.transform = transform
self.sample_list = []
if split=='train':
with open(self._base_dir+'/../train.list', 'r') as f:
self.image_list = f.readlines()
elif split == 'test':
with open(self._base_dir+'/../test.list', 'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n','') for item in self.image_list]
if num is not None:
self.image_list = self.image_list[:num]
print("total {} samples".format(len(self.image_list)))
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
image_name = self.image_list[idx]
h5f = h5py.File(self._base_dir+"/"+image_name+"/mri_norm2.h5", 'r')
image = h5f['image'][:]
label = h5f['label'][:]
sample = {'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
class CenterCrop(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
(w, h, d) = image.shape
w1 = int(round((w - self.output_size[0]) / 2.))
h1 = int(round((h - self.output_size[1]) / 2.))
d1 = int(round((d - self.output_size[2]) / 2.))
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomCrop(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)], mode='constant', constant_values=0)
(w, h, d) = image.shape
# if np.random.uniform() > 0.33:
# w1 = np.random.randint((w - self.output_size[0])//4, 3*(w - self.output_size[0])//4)
# h1 = np.random.randint((h - self.output_size[1])//4, 3*(h - self.output_size[1])//4)
# else:
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomRotFlip(object):
"""
Crop randomly flip the dataset in a sample
Args:
output_size (int): Desired output size
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return {'image': image, 'label': label}
class RandomNoise(object):
def __init__(self, mu=0, sigma=0.1):
self.mu = mu
self.sigma = sigma
def __call__(self, sample):
image, label = sample['image'], sample['label']
noise = np.clip(self.sigma * np.random.randn(image.shape[0], image.shape[1], image.shape[2]), -2*self.sigma, 2*self.sigma)
noise = noise + self.mu
image = image + noise
return {'image': image, 'label': label}
class CreateOnehotLabel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, sample):
image, label = sample['image'], sample['label']
onehot_label = np.zeros((self.num_classes, label.shape[0], label.shape[1], label.shape[2]), dtype=np.float32)
for i in range(self.num_classes):
onehot_label[i, :, :, :] = (label == i).astype(np.float32)
return {'image': image, 'label': label,'onehot_label':onehot_label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
if 'onehot_label' in sample:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long(),
'onehot_label': torch.from_numpy(sample['onehot_label']).long()}
else:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long()}
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 7,965 | 37.483092 | 130 | py |
UA-MT | UA-MT-master/code/utils/losses.py | import torch
from torch.nn import functional as F
import numpy as np
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def dice_loss1(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target)
z_sum = torch.sum(score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def entropy_loss(p,C=2):
## p N*C*W*H*D
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1)/torch.tensor(np.log(C)).cuda()
ent = torch.mean(y1)
return ent
def softmax_dice_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
n = input_logits.shape[1]
dice = 0
for i in range(0, n):
dice += dice_loss1(input_softmax[:, i], target_softmax[:, i])
mean_dice = dice / n
return mean_dice
def entropy_loss_map(p, C=2):
ent = -1*torch.sum(p * torch.log(p + 1e-6), dim=1, keepdim=True)/torch.tensor(np.log(C)).cuda()
return ent
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
mse_loss = (input_softmax-target_softmax)**2
return mse_loss
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='none')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
return torch.mean((input1 - input2)**2)
| 3,141 | 31.061224 | 99 | py |
UA-MT | UA-MT-master/code/utils/util.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
import networks
def load_model(path):
"""Loads model and return it without DataParallel table."""
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
# size of the top layer
N = checkpoint['state_dict']['top_layer.bias'].size()
# build skeleton of the model
sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()
model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))
# deal with a dataparallel table
def rename_key(key):
if not 'module' in key:
return key
return ''.join(key.split('.module'))
checkpoint['state_dict'] = {rename_key(key): val
for key, val
in checkpoint['state_dict'].items()}
# load weights
model.load_state_dict(checkpoint['state_dict'])
print("Loaded")
else:
model = None
print("=> no checkpoint found at '{}'".format(path))
return model
class UnifLabelSampler(Sampler):
"""Samples elements uniformely accross pseudolabels.
Args:
N (int): size of returned iterator.
images_lists: dict of key (target), value (list of data with this target)
"""
def __init__(self, N, images_lists):
self.N = N
self.images_lists = images_lists
self.indexes = self.generate_indexes_epoch()
def generate_indexes_epoch(self):
size_per_pseudolabel = int(self.N / len(self.images_lists)) + 1
res = np.zeros(size_per_pseudolabel * len(self.images_lists))
for i in range(len(self.images_lists)):
indexes = np.random.choice(
self.images_lists[i],
size_per_pseudolabel,
replace=(len(self.images_lists[i]) <= size_per_pseudolabel)
)
res[i * size_per_pseudolabel: (i + 1) * size_per_pseudolabel] = indexes
np.random.shuffle(res)
return res[:self.N].astype('int')
def __iter__(self):
return iter(self.indexes)
def __len__(self):
return self.N
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def learning_rate_decay(optimizer, t, lr_0):
for param_group in optimizer.param_groups:
lr = lr_0 / np.sqrt(1 + lr_0 * param_group['weight_decay'] * t)
param_group['lr'] = lr
class Logger():
""" Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), 'wb') as fp:
pickle.dump(self.data, fp, -1)
| 3,449 | 27.75 | 85 | py |
pymdptoolbox | pymdptoolbox-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Python Markov Decision Process Toolbox documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 26 16:15:31 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# When on read the docs stub out the modules that cannot be loaded
if on_rtd:
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'cvxopt']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
if on_rtd:
extensions.extend([
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon',
'sphinx.ext.viewcode',
])
else:
extensions.extend([
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
])
# Napoleon settings
napoleon_google_docstrings = False
napoleon_use_param = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Python Markov Decision Process Toolbox'
copyright = '2015, Steven A W Cordwell'
author = 'Steven A W Cordwell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0-b4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonMarkovDecisionProcessToolboxdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PythonMarkovDecisionProcessToolbox.tex', 'Python Markov Decision Process Toolbox Documentation',
'Steven A W Cordwell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pythonmarkovdecisionprocesstoolbox', 'Python Markov Decision Process Toolbox Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PythonMarkovDecisionProcessToolbox', 'Python Markov Decision Process Toolbox Documentation',
author, 'PythonMarkovDecisionProcessToolbox', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 10,423 | 31.073846 | 112 | py |
OpenAttack | OpenAttack-master/examples/adversarial_training.py | '''
This example code shows how to conduct adversarial training to improve the robustness of a sentiment analysis model.
The most important part is the "attack()" function, in which adversarial examples are easily generated with an API "attack_eval.generate_adv()"
'''
import OpenAttack
import torch
import datasets
import tqdm
from OpenAttack.text_process.tokenizer import PunctTokenizer
tokenizer = PunctTokenizer()
class MyClassifier(OpenAttack.Classifier):
def __init__(self, model, vocab) -> None:
self.model = model
self.vocab = vocab
def get_prob(self, sentences):
with torch.no_grad():
token_ids = make_batch_tokens([
tokenizer.tokenize(sent, pos_tagging=False) for sent in sentences
], self.vocab)
token_ids = torch.LongTensor(token_ids)
return self.model(token_ids).cpu().numpy()
def get_pred(self, sentences):
return self.get_prob(sentences).argmax(axis=1)
# Design a feedforward neural network as the the victim sentiment analysis model
def make_model(vocab_size):
"""
see `tutorial - pytorch <https://pytorch.org/tutorials/beginner/text_sentiment_ngrams_tutorial.html#define-the-model>`__
"""
import torch.nn as nn
class TextSentiment(nn.Module):
def __init__(self, vocab_size, embed_dim=32, num_class=2):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim)
self.fc = nn.Linear(embed_dim, num_class)
self.softmax = nn.Softmax(dim=1)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text):
embedded = self.embedding(text, None)
return self.softmax(self.fc(embedded))
return TextSentiment(vocab_size)
def dataset_mapping(x):
return {
"x": x["sentence"],
"y": 1 if x["label"] > 0.5 else 0,
"tokens": tokenizer.tokenize(x["sentence"], pos_tagging=False)
}
# Choose SST-2 as the dataset
def prepare_data():
vocab = {
"<UNK>": 0,
"<PAD>": 1
}
dataset = datasets.load_dataset("sst").map(function=dataset_mapping).remove_columns(["label", "sentence", "tree"])
for dataset_name in ["train", "validation", "test"]:
for inst in dataset[dataset_name]:
for token in inst["tokens"]:
if token not in vocab:
vocab[token] = len(vocab)
return dataset["train"], dataset["validation"], dataset["test"], vocab
def make_batch_tokens(tokens_list, vocab):
batch_x = [
[
vocab[token] if token in vocab else vocab["<UNK>"]
for token in tokens
] for tokens in tokens_list
]
max_len = max( [len(tokens) for tokens in tokens_list] )
batch_x = [
sentence + [vocab["<PAD>"]] * (max_len - len(sentence))
for sentence in batch_x
]
return batch_x
# Batch data
def make_batch(data, vocab):
batch_x = make_batch_tokens(data["tokens"], vocab)
batch_y = data["y"]
return torch.LongTensor(batch_x), torch.LongTensor(batch_y)
# Train the victim model for one epoch
def train_epoch(model, dataset, vocab, batch_size=128, learning_rate=5e-3):
dataset = dataset.shuffle()
model.train()
criterion = torch.nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
avg_loss = 0
for start in range(0, len(dataset), batch_size):
train_x, train_y = make_batch(dataset[start: start + batch_size], vocab)
pred = model(train_x)
loss = criterion(pred.log(), train_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()
return avg_loss / len(dataset)
def eval_classifier_acc(dataset, victim):
correct = 0
for inst in dataset:
correct += (victim.get_pred( [inst["x"]] )[0] == inst["y"])
return correct / len(dataset)
# Train the victim model and conduct evaluation
def train_model(model, data_train, data_valid, vocab, num_epoch=10):
mx_acc = None
mx_model = None
for i in range(num_epoch):
loss = train_epoch(model, data_train, vocab)
victim = MyClassifier(model, vocab)
accuracy = eval_classifier_acc(data_valid, victim)
print("Epoch %d: loss: %lf, accuracy %lf" % (i, loss, accuracy))
if mx_acc is None or mx_acc < accuracy:
mx_model = model.state_dict()
model.load_state_dict(mx_model)
return model
# Launch adversarial attacks and generate adversarial examples
def attack(classifier, dataset, attacker = OpenAttack.attackers.PWWSAttacker()):
attack_eval = OpenAttack.AttackEval(
attacker,
classifier,
)
correct_samples = [
inst for inst in dataset if classifier.get_pred( [inst["x"]] )[0] == inst["y"]
]
accuracy = len(correct_samples) / len(dataset)
adversarial_samples = {
"x": [],
"y": [],
"tokens": []
}
for result in tqdm.tqdm(attack_eval.ieval(correct_samples), total=len(correct_samples)):
if result["success"]:
adversarial_samples["x"].append(result["result"])
adversarial_samples["y"].append(result["data"]["y"])
adversarial_samples["tokens"].append(tokenizer.tokenize(result["result"], pos_tagging=False))
attack_success_rate = len(adversarial_samples["x"]) / len(correct_samples)
print("Accuracy: %lf%%\nAttack success rate: %lf%%" % (accuracy * 100, attack_success_rate * 100))
return datasets.Dataset.from_dict(adversarial_samples)
def main():
print("Loading data")
train, valid, test, vocab = prepare_data() # Load dataset
model = make_model(len(vocab)) # Design a victim model
print("Training")
trained_model = train_model(model, train, valid, vocab) # Train the victim model
print("Generating adversarial samples (this step will take dozens of minutes)")
victim = MyClassifier(trained_model, vocab) # Wrap the victim model
adversarial_samples = attack(victim, train) # Conduct adversarial attacks and generate adversarial examples
print("Adversarially training classifier")
print(train.features)
print(adversarial_samples.features)
new_dataset = {
"x": [],
"y": [],
"tokens": []
}
for it in train:
new_dataset["x"].append( it["x"] )
new_dataset["y"].append( it["y"] )
new_dataset["tokens"].append( it["tokens"] )
for it in adversarial_samples:
new_dataset["x"].append( it["x"] )
new_dataset["y"].append( it["y"] )
new_dataset["tokens"].append( it["tokens"] )
finetune_model = train_model(trained_model, datasets.Dataset.from_dict(new_dataset), valid, vocab) # Retrain the classifier with additional adversarial examples
print("Testing enhanced model (this step will take dozens of minutes)")
attack(victim, train) # Re-attack the victim model to measure the effect of adversarial training
if __name__ == '__main__':
main() | 7,284 | 35.243781 | 164 | py |
OpenAttack | OpenAttack-master/examples/multiprocess_eval.py | '''
This example code shows how to using multiprocessing to accelerate adversarial attacks
'''
import OpenAttack
import datasets
def dataset_mapping(x):
return {
"x": x["sentence"],
"y": 1 if x["label"] > 0.5 else 0,
}
def main():
victim = OpenAttack.loadVictim("BERT.SST")
# Victim.BiLSTM.SST is a pytorch model which is trained on Dataset.SST. It uses Glove vectors for word representation.
# The load operation returns a PytorchClassifier that can be further used for Attacker and AttackEval.
dataset = datasets.load_dataset("sst", split="train[:20]").map(function=dataset_mapping)
# Dataset.SST.sample is a list of 1k sentences sampled from test dataset of Dataset.SST.
attacker = OpenAttack.attackers.GeneticAttacker()
# After this step, we’ve initialized a GeneticAttacker and uses the default configuration during attack process.
attack_eval = OpenAttack.AttackEval(attacker, victim)
# DefaultAttackEval is the default implementation for AttackEval which supports seven basic metrics.
attack_eval.eval(dataset, visualize=True, num_workers=4)
# Using multiprocessing by specify num_workers
if __name__ == "__main__":
main() | 1,217 | 37.0625 | 122 | py |
OpenAttack | OpenAttack-master/examples/workflow.py | '''
This example code shows how to how to use the PWWS attack model to attack BERT on the SST-2 dataset.
'''
import OpenAttack
import datasets
def dataset_mapping(x):
return {
"x": x["sentence"],
"y": 1 if x["label"] > 0.5 else 0,
}
def main():
victim = OpenAttack.loadVictim("BERT.SST")
# BERT.SST is a pytorch model which is fine-tuned on SST-2. It uses Glove vectors for word representation.
# The load operation returns a PytorchClassifier that can be further used for Attacker and AttackEval.
dataset = datasets.load_dataset("sst", split="train[:20]").map(function=dataset_mapping)
# We load the sst-2 dataset using `datasets` package, and map the fields.
attacker = OpenAttack.attackers.PWWSAttacker()
# After this step, we’ve initialized a PWWSAttacker and uses the default configuration during attack process.
attack_eval = OpenAttack.AttackEval(attacker, victim)
# Use the default implementation for AttackEval which supports seven basic metrics.
attack_eval.eval(dataset, visualize=True)
# Using visualize=True in attack_eval.eval can make it displays a visualized result. This function is really useful for analyzing small datasets.
if __name__ == "__main__":
main() | 1,266 | 38.59375 | 149 | py |
OpenAttack | OpenAttack-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
autodoc_mock_imports = ["onmt", "onmt_model", "editdistance", "transformers", "torch", "datasets"]
# -- Project information -----------------------------------------------------
project = 'OpenAttack'
copyright = '2020, THUNLP'
author = 'THUNLP'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx-mathjax-offline'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_theme = "sphinx_thunlp_theme"
html_codeblock_linenos_style = "table"
add_module_names = False
autodoc_member_order = "groupwise"
autodoc_typehints = "description"
html_static_path = ['_static']
html_css_files = ["css/custom.css"]
master_doc = 'index'
| 2,248 | 33.075758 | 98 | py |
OpenAttack | OpenAttack-master/OpenAttack/metric/algorithms/sentence_sim.py | from .base import AttackMetric
from ...tags import *
class SentenceSim(AttackMetric):
NAME = "Sentence Similarity"
TAGS = { TAG_English }
def __init__(self):
"""
:Pakcage Requirements:
* sentence_transformers
:Language: english
"""
from sentence_transformers import SentenceTransformer
from ...data_manager import DataManager
self.model = SentenceTransformer(DataManager.load("AttackAssist.SentenceTransformer"), device='cuda')
def calc_score(self, sen1 : str, sen2 : str) -> float:
"""
Args:
sen1: The first sentence.
sen2: The second sentence.
Returns:
Sentence similarity.
"""
from sentence_transformers import util
emb1,emb2 = self.model.encode([sen1,sen2],show_progress_bar=False)
cos_sim = util.pytorch_cos_sim(emb1, emb2)
return cos_sim.cpu().numpy()[0][0]
def after_attack(self, input, adversarial_sample):
if adversarial_sample is not None:
return self.calc_score(input["x"], adversarial_sample)
| 1,142 | 29.078947 | 109 | py |
OpenAttack | OpenAttack-master/OpenAttack/metric/algorithms/levenshtein.py | from typing import List
from .base import AttackMetric
import torch
from ...text_process.tokenizer import Tokenizer
class Levenshtein(AttackMetric):
NAME = "Levenshtein Edit Distance"
def __init__(self, tokenizer : Tokenizer) -> None:
"""
Args:
tokenizer: A tokenizer that will be used in this metric. Must be an instance of :py:class:`.Tokenizer`
"""
self.tokenizer = tokenizer
@property
def TAGS(self):
if hasattr(self.tokenizer, "TAGS"):
return self.tokenizer.TAGS
return set()
def calc_score(self, a : List[str], b : List[str]) -> int:
"""
Args:
a: The first list.
b: The second list.
Returns:
Levenshtein edit distance between two sentences.
Both parameters can be str or list, str for char-level edit distance while list for token-level edit distance.
"""
la = len(a)
lb = len(b)
f = torch.zeros(la + 1, lb + 1, dtype=torch.long)
for i in range(la + 1):
for j in range(lb + 1):
if i == 0:
f[i][j] = j
elif j == 0:
f[i][j] = i
elif a[i - 1] == b[j - 1]:
f[i][j] = f[i - 1][j - 1]
else:
f[i][j] = min(f[i - 1][j - 1], f[i - 1][j], f[i][j - 1]) + 1
return f[la][lb].item()
def after_attack(self, input, adversarial_sample):
if adversarial_sample is not None:
return self.calc_score( self.tokenizer.tokenize(input["x"], pos_tagging=False), self.tokenizer.tokenize(adversarial_sample, pos_tagging=False) )
| 1,727 | 32.230769 | 156 | py |
OpenAttack | OpenAttack-master/OpenAttack/metric/algorithms/gptlm.py | import math
import transformers
from ...tags import *
from .base import AttackMetric
class GPT2LM(AttackMetric):
NAME = "Fluency (ppl)"
TAGS = { TAG_English }
def __init__(self):
"""
Language Models are Unsupervised Multitask Learners.
`[pdf] <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`__
`[code] <https://github.com/openai/gpt-2>`__
:Language: english
"""
self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2")
self.lm = transformers.GPT2LMHeadModel.from_pretrained("gpt2")
def after_attack(self, input, adversarial_sample):
if adversarial_sample is not None:
ipt = self.tokenizer(adversarial_sample, return_tensors="pt", verbose=False)
return math.exp( self.lm(**ipt, labels=ipt.input_ids)[0] )
return None
class GPT2LMChinese(AttackMetric):
NAME = "Fluency (ppl)"
TAGS = { TAG_Chinese }
def __init__(self):
"""
Language Models are Unsupervised Multitask Learners.
`[pdf] <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`__
`[code] <https://github.com/openai/gpt-2>`__
:Package Requirements:
* tensorflow>=2
:Language: chinese
"""
## TODO train a pytorch chinese gpt-2 model
self.tokenizer = transformers.BertTokenizerFast.from_pretrained("mymusise/EasternFantasyNoval")
self.lm = transformers.GPT2LMHeadModel.from_pretrained("mymusise/EasternFantasyNoval", from_tf=True)
## FIXME after_attack | 1,644 | 31.254902 | 108 | py |
OpenAttack | OpenAttack-master/OpenAttack/metric/algorithms/usencoder.py | from .base import AttackMetric
import numpy as np
from ...tags import *
from ...data_manager import DataManager
## TODO use a pytorch model instead
class UniversalSentenceEncoder(AttackMetric):
NAME = "Semantic Similarity"
TAGS = { TAG_English }
def __init__(self):
"""
Universal Sentence Encoder in tensorflow_hub.
`[pdf] <https://arxiv.org/pdf/1803.11175>`__
`[page] <https://tfhub.dev/google/universal-sentence-encoder/4>`__
:Data Requirements: :py:data:`.AttackAssist.UniversalSentenceEncoder`
:Package Requirements:
* **tensorflow** >= 2.0.0
* **tensorflow_hub**
:Language: english
"""
import tensorflow_hub as hub
self.embed = hub.load( DataManager.load("AttackAssist.UniversalSentenceEncoder") )
def calc_score(self, sentA : str, sentB : str) -> float:
"""
Args:
sentA: The first sentence.
sentB: The second sentence.
Returns:
Cosine distance between two sentences.
"""
ret = self.embed([sentA, sentB]).numpy()
return ret[0].dot(ret[1]) / (np.linalg.norm(ret[0]) * np.linalg.norm(ret[1]))
def after_attack(self, input, adversarial_sample):
if adversarial_sample is not None:
return self.calc_score(input["x"], adversarial_sample)
| 1,410 | 28.395833 | 90 | py |
OpenAttack | OpenAttack-master/OpenAttack/attackers/bert_attack/__init__.py | import copy
from typing import List, Optional, Union
import numpy as np
from transformers import BertConfig, BertTokenizerFast, BertForMaskedLM
import torch
from ..classification import ClassificationAttacker, Classifier, ClassifierGoal
from ...tags import TAG_English, Tag
from ...exceptions import WordNotInDictionaryException
from ...attack_assist.substitute.word import get_default_substitute, WordSubstitute
from ...attack_assist.filter_words import get_default_filter_words
class Feature(object):
def __init__(self, seq_a, label):
self.label = label
self.seq = seq_a
self.final_adverse = seq_a
self.query = 0
self.change = 0
self.success = 0
self.sim = 0.0
self.changes = []
class BERTAttacker(ClassificationAttacker):
@property
def TAGS(self):
return { self.__lang_tag, Tag("get_pred", "victim"), Tag("get_prob", "victim") }
def __init__(self,
mlm_path : str = 'bert-base-uncased',
k : int = 36,
use_bpe : bool = True,
sim_mat : Union[None, bool, WordSubstitute] = None,
threshold_pred_score : float = 0.3,
max_length : int = 512,
device : Optional[torch.device] = None,
filter_words : List[str] = None
):
"""
BERT-ATTACK: Adversarial Attack Against BERT Using BERT, Linyang Li, Ruotian Ma, Qipeng Guo, Xiangyang Xue, Xipeng Qiu, EMNLP2020
`[pdf] <https://arxiv.org/abs/2004.09984>`__
`[code] <https://github.com/LinyangLee/BERT-Attack>`__
Args:
mlm_path: The path to the masked language model. **Default:** 'bert-base-uncased'
k: The k most important words / sub-words to substitute for. **Default:** 36
use_bpe: Whether use bpe. **Default:** `True`
sim_mat: Whether use cosine_similarity to filter out atonyms. Keep `None` for not using a sim_mat.
threshold_pred_score: Threshold used in substitute module. **Default:** 0.3
max_length: The maximum length of an input sentence for bert. **Default:** 512
device: A computing device for bert.
filter_words: A list of words that will be preserved in the attack procesudre.
:Classifier Capacity:
* get_pred
* get_prob
"""
self.tokenizer_mlm = BertTokenizerFast.from_pretrained(mlm_path, do_lower_case=True)
if device is not None:
self.device = device
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config_atk = BertConfig.from_pretrained(mlm_path)
self.mlm_model = BertForMaskedLM.from_pretrained(mlm_path, config=config_atk).to(self.device)
self.k = k
self.use_bpe = use_bpe
self.threshold_pred_score = threshold_pred_score
self.max_length = max_length
self.__lang_tag = TAG_English
if filter_words is None:
filter_words = get_default_filter_words(self.__lang_tag)
self.filter_words = set(filter_words)
if sim_mat is None or sim_mat is False:
self.use_sim_mat = False
else:
self.use_sim_mat = True
if sim_mat is True:
self.substitute = get_default_substitute(self.__lang_tag)
else:
self.substitute = sim_mat
def attack(self, victim: Classifier, sentence, goal: ClassifierGoal):
x_orig = sentence.lower()
# return None
tokenizer = self.tokenizer_mlm
# MLM-process
feature = Feature(x_orig, goal.target)
words, sub_words, keys = self._tokenize(feature.seq, tokenizer)
max_length = self.max_length
# original label
inputs = tokenizer.encode_plus(feature.seq, None, add_special_tokens=True, max_length=max_length, truncation=True)
input_ids, _ = torch.tensor(inputs["input_ids"]), torch.tensor(inputs["token_type_ids"])
orig_probs = torch.Tensor(victim.get_prob([feature.seq]))
orig_probs = orig_probs[0].squeeze()
current_prob = orig_probs.max()
sub_words = ['[CLS]'] + sub_words[:2] + sub_words[2:max_length - 2] + ['[SEP]']
input_ids_ = torch.tensor([tokenizer.convert_tokens_to_ids(sub_words)])
word_predictions = self.mlm_model(input_ids_.to(self.device))[0].squeeze() # seq-len(sub) vocab
word_pred_scores_all, word_predictions = torch.topk(word_predictions, self.k, -1) # seq-len k
word_predictions = word_predictions[1:len(sub_words) + 1, :]
word_pred_scores_all = word_pred_scores_all[1:len(sub_words) + 1, :]
important_scores = self.get_important_scores(words, victim, current_prob, goal.target, orig_probs)
feature.query += int(len(words))
list_of_index = sorted(enumerate(important_scores), key=lambda x: x[1], reverse=True)
final_words = copy.deepcopy(words)
for top_index in list_of_index:
if feature.change > int(0.2 * (len(words))):
feature.success = 1 # exceed
return None
tgt_word = words[top_index[0]]
if tgt_word in self.filter_words:
continue
if keys[top_index[0]][0] > max_length - 2:
continue
substitutes = word_predictions[keys[top_index[0]][0]:keys[top_index[0]][1]] # L, k
word_pred_scores = word_pred_scores_all[keys[top_index[0]][0]:keys[top_index[0]][1]]
substitutes = self.get_substitues(substitutes, tokenizer, self.mlm_model, self.use_bpe, word_pred_scores, self.threshold_pred_score)
if self.use_sim_mat:
try:
cfs_output = self.substitute(tgt_word)
cos_sim_subtitutes = [elem[0] for elem in cfs_output]
substitutes = list(set(substitutes) & set(cos_sim_subtitutes))
except WordNotInDictionaryException:
pass
# print("The target word is not representable by counter fitted vectors. Keeping the substitutes output by the MLM model.")
most_gap = 0.0
candidate = None
for substitute in substitutes:
if substitute == tgt_word:
continue # filter out original word
if '##' in substitute:
continue # filter out sub-word
if substitute in self.filter_words:
continue
# if substitute in self.w2i and tgt_word in self.w2i:
# if self.cos_mat[self.w2i[substitute]][self.w2i[tgt_word]] < 0.4:
# continue
temp_replace = final_words
temp_replace[top_index[0]] = substitute
temp_text = tokenizer.convert_tokens_to_string(temp_replace)
inputs = tokenizer.encode_plus(temp_text, None, add_special_tokens=True, max_length=max_length, truncation=True)
input_ids = torch.tensor(inputs["input_ids"]).unsqueeze(0).to(self.device)
seq_len = input_ids.size(1)
temp_prob = torch.Tensor(victim.get_prob([temp_text]))[0].squeeze()
temp_label = torch.argmax(temp_prob)
feature.query += 1
if goal.check(feature.final_adverse, temp_label):
feature.change += 1
final_words[top_index[0]] = substitute
feature.changes.append([keys[top_index[0]][0], substitute, tgt_word])
feature.final_adverse = temp_text
feature.success = 4
return feature.final_adverse
else:
label_prob = temp_prob[goal.target]
gap = current_prob - label_prob
if gap > most_gap:
most_gap = gap
candidate = substitute
if most_gap > 0:
feature.change += 1
feature.changes.append([keys[top_index[0]][0], candidate, tgt_word])
current_prob = current_prob - most_gap
final_words[top_index[0]] = candidate
feature.final_adverse = (tokenizer.convert_tokens_to_string(final_words))
feature.success = 2
return None
def _tokenize(self, seq, tokenizer):
seq = seq.replace('\n', '').lower()
words = seq.split(' ')
sub_words = []
keys = []
index = 0
for word in words:
sub = tokenizer.tokenize(word)
sub_words += sub
keys.append([index, index + len(sub)])
index += len(sub)
return words, sub_words, keys
def _get_masked(self, words):
len_text = max(len(words), 2)
masked_words = []
for i in range(len_text - 1):
masked_words.append(words[0:i] + ['[UNK]'] + words[i + 1:])
# list of words
return masked_words
def get_important_scores(self, words, tgt_model, orig_prob, orig_label, orig_probs):
masked_words = self._get_masked(words)
texts = [' '.join(words) for words in masked_words] # list of text of masked words
leave_1_probs = torch.Tensor(tgt_model.get_prob(texts))
leave_1_probs_argmax = torch.argmax(leave_1_probs, dim=-1)
import_scores = (orig_prob
- leave_1_probs[:, orig_label]
+
(leave_1_probs_argmax != orig_label).float()
* (leave_1_probs.max(dim=-1)[0] - torch.index_select(orig_probs, 0, leave_1_probs_argmax))
).data.cpu().numpy()
return import_scores
def get_bpe_substitues(self, substitutes, tokenizer, mlm_model):
# substitutes L, k
substitutes = substitutes[0:12, 0:4] # maximum BPE candidates
# find all possible candidates
all_substitutes = []
for i in range(substitutes.size(0)):
if len(all_substitutes) == 0:
lev_i = substitutes[i]
all_substitutes = [[int(c)] for c in lev_i]
else:
lev_i = []
for all_sub in all_substitutes:
for j in substitutes[i]:
lev_i.append(all_sub + [int(j)])
all_substitutes = lev_i
# all substitutes list of list of token-id (all candidates)
c_loss = torch.nn.CrossEntropyLoss(reduction='none')
word_list = []
# all_substitutes = all_substitutes[:24]
all_substitutes = torch.tensor(all_substitutes) # [ N, L ]
all_substitutes = all_substitutes[:24].to(self.device)
# print(substitutes.size(), all_substitutes.size())
N, L = all_substitutes.size()
word_predictions = mlm_model(all_substitutes)[0] # N L vocab-size
ppl = c_loss(word_predictions.view(N*L, -1), all_substitutes.view(-1)) # [ N*L ]
ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1)) # N
_, word_list = torch.sort(ppl)
word_list = [all_substitutes[i] for i in word_list]
final_words = []
for word in word_list:
tokens = [tokenizer._convert_id_to_token(int(i)) for i in word]
text = tokenizer.convert_tokens_to_string(tokens)
final_words.append(text)
return final_words
def get_substitues(self, substitutes, tokenizer, mlm_model, use_bpe, substitutes_score=None, threshold=3.0):
# substitues L,k
# from this matrix to recover a word
words = []
sub_len, k = substitutes.size() # sub-len, k
if sub_len == 0:
return words
elif sub_len == 1:
for (i,j) in zip(substitutes[0], substitutes_score[0]):
if threshold != 0 and j < threshold:
break
words.append(tokenizer._convert_id_to_token(int(i)))
else:
if use_bpe == 1:
words = self.get_bpe_substitues(substitutes, tokenizer, mlm_model)
else:
return words
return words
def get_sim_embed(self, embed_path, sim_path):
id2word = {}
word2id = {}
with open(embed_path, 'r', encoding='utf-8') as ifile:
for line in ifile:
word = line.split()[0]
if word not in id2word:
id2word[len(id2word)] = word
word2id[word] = len(id2word) - 1
cos_sim = np.load(sim_path)
return cos_sim, word2id, id2word
| 12,769 | 40.193548 | 144 | py |
OpenAttack | OpenAttack-master/OpenAttack/attackers/bae/__init__.py | from typing import List, Optional
from ...data_manager import DataManager
from ..classification import ClassificationAttacker, Classifier, ClassifierGoal
from ...metric import UniversalSentenceEncoder
from ...utils import check_language
from ...tags import Tag, TAG_English
from ...attack_assist.filter_words import get_default_filter_words
import copy
import random
import numpy as np
import torch
from transformers import BertConfig, BertTokenizerFast, BertForMaskedLM
class Feature(object):
def __init__(self, seq_a, label):
self.label = label
self.seq = seq_a
self.final_adverse = seq_a
self.query = 0
self.change = 0
self.success = 0
self.sim = 0.0
self.changes = []
class BAEAttacker(ClassificationAttacker):
@property
def TAGS(self):
return { self.__lang_tag, Tag("get_pred", "victim"), Tag("get_prob", "victim") }
def __init__(self,
mlm_path : str = "bert-base-uncased",
k : int = 50,
threshold_pred_score : float = 0.3,
max_length : int = 512,
batch_size : int = 32,
replace_rate : float = 1.0,
insert_rate : float = 0.0,
device : Optional[torch.device] = None,
sentence_encoder = None,
filter_words : List[str] = None
):
"""
BAE: BERT-based Adversarial Examples for Text Classification. Siddhant Garg, Goutham Ramakrishnan. EMNLP 2020.
`[pdf] <https://arxiv.org/abs/2004.01970>`__
`[code] <https://github.com/QData/TextAttack/blob/master/textattack/attack_recipes/bae_garg_2019.py>`__
This script is adapted from <https://github.com/LinyangLee/BERT-Attack> given the high similarity between the two attack methods.
This attacker supports the 4 attack methods (BAE-R, BAE-I, BAE-R/I, BAE-R+I) in the paper.
Args:
mlm_path: The path to the masked language model. **Default:** 'bert-base-uncased'
k: The k most important words / sub-words to substitute for. **Default:** 50
threshold_pred_score: Threshold used in substitute module. **Default:** 0.3
max_length: The maximum length of an input sentence for bert. **Default:** 512
batch_size: The size of a batch of input sentences for bert. **Default:** 32
replace_rate: Replace rate.
insert_rate: Insert rate.
device: A computing device for bert.
sentence_encoder: A sentence encoder to calculate the semantic similarity of two sentences. Default: :py:class:`.UniversalSentenceEncoder`
filter_words: A list of words that will be preserved in the attack procesudre.
:Data Requirements: :py:data:`.TProcess.NLTKPerceptronPosTagger`
:Classifier Capacity:
* get_pred
* get_prob
:Language: english
"""
if sentence_encoder is None:
self.encoder = UniversalSentenceEncoder()
else:
self.encoder = sentence_encoder
self.tokenizer_mlm = BertTokenizerFast.from_pretrained(mlm_path, do_lower_case=True)
if device is not None:
self.device = device
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config_atk = BertConfig.from_pretrained(mlm_path)
self.mlm_model = BertForMaskedLM.from_pretrained(mlm_path, config=config_atk).to(self.device)
self.k = k
self.threshold_pred_score = threshold_pred_score
self.max_length = max_length
self.batch_size = batch_size
self.replace_rate = replace_rate
self.insert_rate = insert_rate
if self.replace_rate == 1.0 and self.insert_rate == 0.0:
self.sub_mode = 0 # only using replacement
elif self.replace_rate == 0.0 and self.insert_rate == 1.0:
self.sub_mode = 1 # only using insertion
elif self.replace_rate + self.insert_rate == 1.0:
self.sub_mode = 2 # replacement OR insertion for each token / subword
elif self.replace_rate == 1.0 and self.insert_rate == 1.0:
self.sub_mode = 3 # first replacement AND then insertion for each token / subword
else:
raise NotImplementedError()
self.__lang_tag = TAG_English
if filter_words is None:
filter_words = get_default_filter_words(self.__lang_tag)
self.filter_words = set(filter_words)
check_language([self.encoder], self.__lang_tag)
def attack(self, victim: Classifier, sentence, goal: ClassifierGoal):
x_orig = sentence.lower()
# return None
tokenizer = self.tokenizer_mlm
# MLM-process
feature = Feature(x_orig, goal.target)
words, sub_words, keys = self._tokenize(feature.seq, tokenizer)
max_length = self.max_length
# original label
inputs = tokenizer.encode_plus(feature.seq, None, add_special_tokens=True, max_length=max_length, truncation=True)
input_ids, token_type_ids = torch.tensor(inputs["input_ids"]), torch.tensor(inputs["token_type_ids"])
attention_mask = torch.tensor([1] * len(input_ids))
seq_len = input_ids.size(0)
orig_probs = torch.Tensor(victim.get_prob([feature.seq]))
orig_probs = orig_probs[0].squeeze()
orig_probs = torch.softmax(orig_probs, -1)
current_prob = orig_probs.max()
sub_words = ['[CLS]'] + sub_words[:max_length - 2] + ['[SEP]']
input_ids_ = torch.tensor([tokenizer.convert_tokens_to_ids(sub_words)])
word_predictions = self.mlm_model(input_ids_.to(self.device))[0].squeeze() # seq-len(sub) vocab
word_pred_scores_all, word_predictions = torch.topk(word_predictions, self.k, -1) # seq-len k
word_predictions = word_predictions[1:len(sub_words) + 1, :]
word_pred_scores_all = word_pred_scores_all[1:len(sub_words) + 1, :]
important_scores = self.get_important_scores(words, victim, current_prob, goal.target, orig_probs,
tokenizer, self.batch_size, max_length)
feature.query += int(len(words))
list_of_index = sorted(enumerate(important_scores), key=lambda x: x[1], reverse=True)
final_words = copy.deepcopy(words)
offset = 0
for top_index in list_of_index:
if feature.change > int(0.2 * (len(words))):
feature.success = 1 # exceed
return None
tgt_word = words[top_index[0]]
if tgt_word in self.filter_words:
continue
substitutes = word_predictions[keys[top_index[0]][0]:keys[top_index[0]][1]] # L, k
word_pred_scores = word_pred_scores_all[keys[top_index[0]][0]:keys[top_index[0]][1]]
# in the substitute function, masked_index = top_index[0] + 1, because "[CLS]" has been inserted into sub_words
replace_sub_len, insert_sub_len = 0, 0
temp_sub_mode = -1
if self.sub_mode == 0:
substitutes = self.get_substitues(top_index[0] + 1, sub_words, tokenizer, self.mlm_model, 'r', self.k, self.threshold_pred_score)
elif self.sub_mode == 1:
substitutes = self.get_substitues(top_index[0] + 1, sub_words, tokenizer, self.mlm_model, 'i', self.k, self.threshold_pred_score)
elif self.sub_mode == 2:
rand_num = random.random()
if rand_num < self.replace_rate:
substitutes = self.get_substitues(top_index[0] + 1, sub_words, tokenizer, self.mlm_model, 'r', self.k, self.threshold_pred_score)
temp_sub_mode = 0
else:
substitutes = self.get_substitues(top_index[0] + 1, sub_words, tokenizer, self.mlm_model, 'i', self.k, self.threshold_pred_score)
temp_sub_mode = 1
elif self.sub_mode == 3:
substitutes_replace = self.get_substitues(top_index[0] + 1, sub_words, tokenizer, self.mlm_model, 'r', self.k / 2, self.threshold_pred_score)
substitutes_insert = self.get_substitues(top_index[0] + 1, sub_words, tokenizer, self.mlm_model, 'i', self.k - self.k / 2, self.threshold_pred_score)
replace_sub_len, insert_sub_len = len(substitutes_replace), len(substitutes_insert)
substitutes = substitutes_replace + substitutes_insert
else:
raise NotImplementedError
most_gap = 0.0
candidate = None
for i, substitute in enumerate(substitutes):
if substitute == tgt_word:
continue # filter out original word
if '##' in substitute:
continue # filter out sub-word
if substitute in self.filter_words:
continue
if self.sub_mode == 3:
if i < replace_sub_len:
temp_sub_mode = 0
else:
temp_sub_mode = 1
temp_replace = final_words
# Check if we should REPLACE or INSERT the substitute into the orignal word list
is_replace = self.sub_mode == 0 or temp_sub_mode == 0
is_insert = self.sub_mode == 1 or temp_sub_mode == 1
if is_replace:
orig_word = temp_replace[top_index[0]]
pos_tagger = DataManager.load("TProcess.NLTKPerceptronPosTagger")
pos_tag_list_before = [elem[1] for elem in pos_tagger(temp_replace)]
temp_replace[top_index[0]] = substitute
pos_tag_list_after = [elem[1] for elem in pos_tagger(temp_replace)]
# reverse temp_replace back to its original if pos_tag changes, and continue
# searching for the next best substitue
if pos_tag_list_after != pos_tag_list_before:
temp_replace[top_index[0]] = orig_word
continue
elif is_insert:
temp_replace.insert(top_index[0] + offset, substitute)
else:
raise NotImplementedError
temp_text = tokenizer.convert_tokens_to_string(temp_replace)
use_score = self.encoder.calc_score(temp_text, x_orig)
# From TextAttack's implementation: Finally, since the BAE code is based on the TextFooler code, we need to
# adjust the threshold to account for the missing / pi in the cosine
# similarity comparison. So the final threshold is 1 - (1 - 0.8) / pi
# = 1 - (0.2 / pi) = 0.936338023.
if use_score < 0.936:
continue
inputs = tokenizer.encode_plus(temp_text, None, add_special_tokens=True, max_length=max_length, truncation=True)
input_ids = torch.tensor(inputs["input_ids"]).unsqueeze(0).to(self.device)
seq_len = input_ids.size(1)
temp_prob = torch.Tensor(victim.get_prob([temp_text]))[0].squeeze()
feature.query += 1
temp_prob = torch.softmax(temp_prob, -1)
temp_label = torch.argmax(temp_prob)
if goal.check(feature.final_adverse, temp_label):
feature.change += 1
if is_replace:
final_words[top_index[0]] = substitute
elif is_insert:
final_words.insert(top_index[0] + offset, substitute)
else:
raise NotImplementedError()
feature.changes.append([keys[top_index[0]][0], substitute, tgt_word])
feature.final_adverse = temp_text
feature.success = 4
return feature.final_adverse
else:
label_prob = temp_prob[goal.target]
gap = current_prob - label_prob
if gap > most_gap:
most_gap = gap
candidate = substitute
if is_insert:
final_words.pop(top_index[0] + offset)
if most_gap > 0:
feature.change += 1
feature.changes.append([keys[top_index[0]][0], candidate, tgt_word])
current_prob = current_prob - most_gap
if is_replace:
final_words[top_index[0]] = candidate
elif is_insert:
final_words.insert(top_index[0] + offset, candidate)
offset += 1
else:
raise NotImplementedError()
feature.final_adverse = (tokenizer.convert_tokens_to_string(final_words))
feature.success = 2
return None
def _tokenize(self, seq, tokenizer):
seq = seq.replace('\n', '').lower()
words = seq.split(' ')
sub_words = []
keys = []
index = 0
for word in words:
sub = tokenizer.tokenize(word)
sub_words += sub
keys.append([index, index + len(sub)])
index += len(sub)
return words, sub_words, keys
def _get_masked_insert(self, words):
len_text = max(len(words), 2)
masked_words = []
for i in range(len_text - 1):
masked_words.append(words[0:i + 1] + ['[UNK]'] + words[i + 1:])
# list of words
return masked_words
def get_important_scores(self, words, tgt_model, orig_prob, orig_label, orig_probs, tokenizer, batch_size, max_length):
masked_words = self._get_masked_insert(words)
texts = [' '.join(words) for words in masked_words] # list of text of masked words
leave_1_probs = torch.Tensor(tgt_model.get_prob(texts))
leave_1_probs = torch.softmax(leave_1_probs, -1) #
leave_1_probs_argmax = torch.argmax(leave_1_probs, dim=-1)
import_scores = (orig_prob
- leave_1_probs[:, orig_label]
+
(leave_1_probs_argmax != orig_label).float()
* (leave_1_probs.max(dim=-1)[0] - torch.index_select(orig_probs, 0, leave_1_probs_argmax))
).data.cpu().numpy()
return import_scores
##### TODO: make this one of the substitute unit under ./substitures #####
def get_substitues(self, masked_index, tokens, tokenizer, model, sub_mode, k, threshold=3.0):
masked_tokens = copy.deepcopy(tokens)
if sub_mode == "r":
masked_tokens[masked_index] = '[MASK]'
elif sub_mode == "i":
masked_tokens.insert(masked_index, '[MASK]')
else:
raise NotImplementedError()
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(masked_tokens)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0] * len(indexed_tokens)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens]).to(self.device)
segments_tensors = torch.tensor([segments_ids]).to(self.device)
model.eval()
# Predict all tokens
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
predictions = outputs[0]
predicted_indices = torch.topk(predictions[0, masked_index], self.k)[1]
predicted_tokens = tokenizer.convert_ids_to_tokens(predicted_indices)
return predicted_tokens
def get_sim_embed(self, embed_path, sim_path):
id2word = {}
word2id = {}
with open(embed_path, 'r', encoding='utf-8') as ifile:
for line in ifile:
word = line.split()[0]
if word not in id2word:
id2word[len(id2word)] = word
word2id[word] = len(id2word) - 1
cos_sim = np.load(sim_path)
return cos_sim, word2id, id2word
| 16,398 | 44.052198 | 166 | py |
OpenAttack | OpenAttack-master/OpenAttack/attackers/geometry/__init__.py | import numpy as np
import torch
import torch.nn as nn
import copy
from typing import List, Optional
from ...text_process.tokenizer import Tokenizer, get_default_tokenizer
from ...attack_assist.substitute.word import WordSubstitute, get_default_substitute
from ...utils import get_language, check_language, language_by_name
from ..classification import ClassificationAttacker, Classifier
from ...attack_assist.goal import ClassifierGoal
from ...tags import TAG_English, Tag
from ...exceptions import WordNotInDictionaryException
from transformers import BertConfig, BertTokenizerFast
from transformers import BertForSequenceClassification, BertForMaskedLM
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
import time
# specific to geometry attack
from nltk.corpus import stopwords
import string
from collections import Counter
from copy import deepcopy
from torch.nn import CosineSimilarity
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from tqdm import tqdm
if torch.__version__ < '1.9.0':
from torch.autograd.gradcheck import zero_gradients
else:
import collections.abc as container_abcs
def zero_gradients(x):
if isinstance(x, torch.Tensor):
if x.grad is not None:
x.grad.detach_()
x.grad.zero_()
elif isinstance(x, container_abcs.Iterable):
for elem in x:
zero_gradients(elem)
DEFAULT_CONFIG = {
"threshold": 0.5,
"substitute": None,
"token_unk": "<UNK>",
"token_pad": "<PAD>",
"mlm_path": 'bert-base-uncased',
"num_label": 2,
"k": 5,
"use_bpe": 0,
"threshold_pred_score": 0,
"use_sim_mat": 0,
"max_length": 50,
"max_steps": 50,
"model": 'lstm',
"embedding": 'random',
"hidden_size": 128,
"bidirectional": False,
"dataset": 'imdb',
"vocab_size": 60000,
"attack": 'deepfool',
"splits": 1500,
"max_loops": 5,
"abandon_stopwords": True,
"metric": 'projection',
"model_path": 'models/9.pth',
"embedding_file": 'glove.6B.300d.txt',
"embedding_size": 100
}
filter_words = ['a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'ain', 'all', 'almost',
'alone', 'along', 'already', 'also', 'although', 'am', 'among', 'amongst', 'an', 'and', 'another',
'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'aren', "aren't", 'around', 'as',
'at', 'back', 'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides',
'between', 'beyond', 'both', 'but', 'by', 'can', 'cannot', 'could', 'couldn', "couldn't", 'd', 'didn',
"didn't", 'doesn', "doesn't", 'don', "don't", 'down', 'due', 'during', 'either', 'else', 'elsewhere',
'empty', 'enough', 'even', 'ever', 'everyone', 'everything', 'everywhere', 'except', 'first', 'for',
'former', 'formerly', 'from', 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
'how', 'however', 'hundred', 'i', 'if', 'in', 'indeed', 'into', 'is', 'isn', "isn't", 'it', "it's",
'its', 'itself', 'just', 'latter', 'latterly', 'least', 'll', 'may', 'me', 'meanwhile', 'mightn',
"mightn't", 'mine', 'more', 'moreover', 'most', 'mostly', 'must', 'mustn', "mustn't", 'my', 'myself',
'namely', 'needn', "needn't", 'neither', 'never', 'nevertheless', 'next', 'no', 'nobody', 'none',
'noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'o', 'of', 'off', 'on', 'once', 'one', 'only',
'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out', 'over', 'per',
'please', 's', 'same', 'shan', "shan't", 'she', "she's", "should've", 'shouldn', "shouldn't", 'somehow',
'something', 'sometime', 'somewhere', 'such', 't', 'than', 'that', "that'll", 'the', 'their', 'theirs',
'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
'thereupon', 'these', 'they', 'this', 'those', 'through', 'throughout', 'thru', 'thus', 'to', 'too',
'toward', 'towards', 'under', 'unless', 'until', 'up', 'upon', 'used', 've', 'was', 'wasn', "wasn't",
'we', 'were', 'weren', "weren't", 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while',
'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'with', 'within', 'without', 'won',
"won't", 'would', 'wouldn', "wouldn't", 'y', 'yet', 'you', "you'd", "you'll", "you're", "you've",
'your', 'yours', 'yourself', 'yourselves']
filter_words = set(filter_words)
class Sample:
def __init__(self, data, words, steps, label, length, id):
self.word_ids = data[0:steps]
if words is not None:
self.sentence = words[0:steps]
self.length = length
self.label = label
self.id = id
self.history = []
self.new_info = None
self.mask = None
self.stopwords_mask = None
self.stopwords = set(stopwords.words('english'))
self.punctuations = string.punctuation
def set_mask(self, mask, stopwords_mask):
self.mask = mask
self.stopwords_mask = stopwords_mask
def set_new_info(self, new_info):
# [new_id, new_word, old_id, old_word, idx]
self.new_info = new_info
class DeepFool(nn.Module):
def __init__(self, config, num_classes, max_iters, overshoot=0.02):
super(DeepFool, self).__init__()
self.config = config
self.num_classes = num_classes
self.loops_needed = None
self.max_iters = max_iters
self.overshoot = overshoot
self.loops = 0
def forward(self, vecs, net_, target=None):
"""
:param vecs: [batch_size, vec_size]
:param net_: FFNN in our case
:param target:
:return:
"""
net = deepcopy(net_.classifier)
sent_vecs = deepcopy(vecs.data)
input_shape = sent_vecs.size()
f_vecs = net.forward(sent_vecs).data
# print("input and output:", sent_vecs.shape, f_vecs.shape)
I = torch.argsort(f_vecs, dim=1, descending=True)
# I = torch.argsort(f_vecs, dim=-1, descending=True)
I = I[:, 0:self.num_classes]
# print("I shape:", I.shape)
# this is actually the predicted label
label = I[:, 0]
# print("label:", label)
if target is not None:
I = target.unsqueeze(1)
if self.config['dataset'] == 'imdb':
num_classes = 2
elif self.config['dataset'] == 'agnews':
num_classes = 4
else:
print('Unrecognized dataset {}'.format(self.config['dataset']))
else:
num_classes = I.size(1)
pert_vecs = deepcopy(sent_vecs)
r_tot = torch.zeros(input_shape)
check_fool = deepcopy(sent_vecs)
k_i = label
loop_i = 0
# pre-define an finish_mask, [batch_size], all samples are not finished at first
finish_mask = torch.zeros((input_shape[0], 1), dtype=torch.float)
finished = torch.ones_like(finish_mask)
self.loops_needed = torch.zeros((input_shape[0],))
if torch.cuda.is_available():
r_tot = r_tot.cuda()
finish_mask = finish_mask.cuda()
finished = finished.cuda()
self.loops_needed = self.loops_needed.cuda()
# every sample needs to be finished, and total loops should be smaller than max_iters
while torch.sum(finish_mask >= finished) != input_shape[0] and loop_i < self.max_iters:
x = pert_vecs.requires_grad_(True)
fs = net.forward(x)
pert = torch.ones(input_shape[0]) * np.inf
w = torch.zeros(input_shape)
if torch.cuda.is_available():
pert = pert.cuda()
w = w.cuda()
# fs[sample_index, I[sample_index, sample_label]]
logits_label_sum = torch.gather(
fs, dim=1, index=label.unsqueeze(1)).sum()
logits_label_sum.backward(retain_graph=True)
grad_orig = deepcopy(x.grad.data)
for k in range(1, num_classes):
if target is not None:
k = k - 1
if k > 0:
break
zero_gradients(x)
# fs[sample_index, I[sample_index, sample_class]]
logits_class_sum = torch.gather(
fs, dim=1, index=I[:, k].unsqueeze(1)).sum()
logits_class_sum.backward(retain_graph=True)
# [batch_size, n_channels, height, width]
cur_grad = deepcopy(x.grad.data)
w_k = cur_grad - grad_orig
# fs[sample_index, I[sample_index, sample_class]] - fs[sample_index, I[sample_index, sample_label]]
f_k = torch.gather(fs, dim=1, index=I[:, k].unsqueeze(
1)) - torch.gather(fs, dim=1, index=label.unsqueeze(1))
f_k = f_k.squeeze(-1)
# element-wise division
pert_k = torch.div(torch.abs(f_k), self.norm_dim(w_k))
valid_pert_mask = pert_k < pert
new_pert = pert_k + 0.
new_w = w_k + 0.
valid_pert_mask = valid_pert_mask.bool()
pert = torch.where(valid_pert_mask, new_pert, pert)
# index by valid_pert_mask
valid_w_mask = torch.reshape(
valid_pert_mask, shape=(input_shape[0], 1)).float()
valid_w_mask = valid_w_mask.bool()
w = torch.where(valid_w_mask, new_w, w)
r_i = torch.mul(torch.clamp(pert, min=1e-4).reshape(-1, 1), w)
r_i = torch.div(r_i, self.norm_dim(w).reshape((-1, 1)))
r_tot_new = r_tot + r_i
# if get 1 for cur_update_mask, then the sample has never changed its label, we need to update it
cur_update_mask = (finish_mask < 1.0).byte()
if torch.cuda.is_available():
cur_update_mask = cur_update_mask.cuda()
cur_update_mask = cur_update_mask.bool()
r_tot = torch.where(cur_update_mask, r_tot_new, r_tot)
# r_tot already filtered with cur_update_mask, no need to do again
pert_vecs = sent_vecs + r_tot
check_fool = sent_vecs + (1.0 + self.overshoot) * r_tot
k_i = torch.argmax(net.forward(
check_fool.requires_grad_(True)), dim=-1).data
if target is None:
# in untargeted version, we finish perturbing when the network changes its predictions to the advs
finish_mask += ((k_i != label) * 1.0).reshape((-1, 1)).float()
# print(torch.sum(finish_mask >= finished))
else:
# in targeted version, we finish perturbing when the network classifies the advs as the target class
finish_mask += ((k_i == target) * 1.0).reshape((-1, 1)).float()
loop_i += 1
self.loops += 1
self.loops_needed[cur_update_mask.squeeze()] = loop_i
r_tot.detach_()
check_fool.detach_()
r_i.detach_()
pert_vecs.detach_()
# grad is not really need for deepfool, used here as an additional check
x = pert_vecs.requires_grad_(True)
fs = net.forward(x)
torch.sum(torch.gather(fs, dim=1, index=k_i.unsqueeze(
1)) - torch.gather(fs, dim=1, index=label.unsqueeze(1))).backward(retain_graph=True)
grad = deepcopy(x.grad.data)
grad = torch.div(grad, self.norm_dim(grad).unsqueeze(1))
label = deepcopy(label.data)
if target is not None:
# in targeted version, we move an adv towards the true class, but we do not want to cross the boundary
pert_vecs = deepcopy(pert_vecs.data)
return grad, pert_vecs, label
else:
# check_fool should be on the other side of the decision boundary
check_fool_vecs = deepcopy(check_fool.data)
return grad, check_fool_vecs, label
@staticmethod
def norm_dim(w):
norms = []
for idx in range(w.size(0)):
norms.append(w[idx].norm())
norms = torch.stack(tuple(norms), dim=0)
return norms
class WordSaliencyBatch:
def __init__(self, config, word2id):
self.config = config
self.word2id = word2id
self.UNK_WORD = self.config['token_unk']
self.model = None
def split_forward(self, new_word_ids, new_lengths):
# split new_word_ids and new_lengths
new_word_ids_splits = new_word_ids.split(1, dim=0)
new_lengths_splits = new_lengths.split(1, dim=0)
new_logits = []
for idx in range(len(new_lengths_splits)):
outputs = self.model(new_word_ids_splits[idx])
new_logits_split = outputs.logits
new_logits.append(new_logits_split)
new_logits = torch.cat(new_logits, dim=0)
return new_logits
def compute_saliency(self, model_, word_ids, labels, lengths, mask, order=False):
"""
compute saliency for a batch of examples
# TODO: implement batch to more than one examples
:param model_:
:param word_ids: [batch_size, max_steps]
:param labels: [batch_size]
:param lengths: [batch_size]
:param mask: [batch_size, max_steps]
:param order:
:return:
"""
# with torch.no_grad():
print('start')
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_max_memory_cached()
print(torch.cuda.max_memory_allocated() / 1024 / 1024)
print(torch.cuda.memory_allocated() / 1024 / 1024)
self.model = deepcopy(model_)
# self.model.eval()
# self.model = model_
# cur_batch_size = word_ids.size(0)
cur_batch_size = 1
unk_id = self.word2id[self.UNK_WORD]
unk_id = torch.tensor(unk_id)
if torch.cuda.is_available():
unk_id = unk_id.cuda()
# compute the original probs for true class
# logits: [batch_size, num_classes]
# predictions: [batch_size]
# probs: [batch_size, num_classes]
# logits, _ = self.model(word_ids, lengths)
logits = self.model(word_ids).logits
predictions = torch.argmax(logits, dim=-1)
probs = torch.softmax(logits, dim=-1)
# [batch_size, num_classes]
one_hot_mask = torch.arange(logits.size(1)).unsqueeze(
0).repeat(cur_batch_size, 1)
if torch.cuda.is_available():
one_hot_mask = one_hot_mask.cuda()
one_hot_mask = one_hot_mask == predictions.unsqueeze(1)
# [batch_size, 1]
true_probs = torch.masked_select(probs, one_hot_mask)
# unsqueeze word_ids
# [batch_size, 1, max_steps]
new_word_ids = word_ids.unsqueeze(0)
# new_word_ids = new_word_ids.unsqueeze(1)
# print("before word id shape:", new_word_ids.shape)
# [batch_size, max_steps, max_steps]
# dim 1 used to indicate which word is replaced by unk
new_word_ids = new_word_ids.repeat(1, self.config['max_steps'], 1)
# then replace word by unk
# [max_steps, max_steps]
# diagonal elements = 1
diag_mask = torch.diag(torch.ones(self.config['max_steps']))
# [1, max_steps, max_steps]
diag_mask = diag_mask.unsqueeze(0)
# [batch_size, max_steps, max_steps]
# for elements with a mask of 1, replace with unk_id
diag_mask = diag_mask.repeat(cur_batch_size, 1, 1).bool()
if torch.cuda.is_available():
diag_mask = diag_mask.cuda()
# [batch_size, max_steps, max_steps]
# replace with unk_id
# print(diag_mask.shape, unk_id.shape,new_word_ids.shape)
new_word_ids = diag_mask * unk_id + (~diag_mask) * new_word_ids
# compute probs for new_word_ids
# [batch_size*max_steps, max_steps]
new_word_ids = new_word_ids.view(
cur_batch_size * self.config['max_steps'], -1)
# construct new_lengths
# [batch_size, 1]
new_lengths = torch.ones([cur_batch_size, 1]) * lengths
# print("size 1:", new_lengths.size())
# new_lengths = lengths.view(cur_batch_size, 1)
# new_lengths = torch.ones((cur_batch_size, 1)) * lengths
# repeat
# [batch_size*max_steps]
new_lengths = new_lengths.repeat(
1, self.config['max_steps']).view(-1)
# print("size 2:", new_lengths.size())
# the same applies to new_predictions
# [batch_size, 1]
new_predictions = predictions.view(cur_batch_size, 1)
# repeat
# [batch_size*max_steps]
new_predictions = new_predictions.repeat(
1, self.config['max_steps']).view(-1)
# [batch_size*max_steps, num_classes]
one_hot_mask = torch.arange(logits.size(1)).unsqueeze(
0).repeat(new_predictions.size(0), 1)
if torch.cuda.is_available():
one_hot_mask = one_hot_mask.cuda()
one_hot_mask = one_hot_mask == new_predictions.unsqueeze(1)
# [batch_size*max_steps, num_classes]
# new_logits, _ = self.model(new_word_ids, new_lengths)
# start = timer()
new_logits = self.split_forward(new_word_ids, new_lengths)
# end = timer()
# print('time = {}'.format(end - start))
# sys.stdout.flush()
# [batch_size*max_steps, num_classes]
all_probs = torch.softmax(new_logits, dim=-1)
# print('end')
# print(torch.cuda.max_memory_allocated()/1024/1024)
# print(torch.cuda.memory_allocated()/1024/1024)
# [batch_size, max_steps]
all_true_probs = torch.masked_select(
all_probs, one_hot_mask).view(cur_batch_size, -1)
# only words with a mask of 1 will be considered
# setting the prob of unqualified words to a large number
all_true_probs[~mask] = 100.0
if torch.cuda.is_available():
all_true_probs = all_true_probs.cuda()
# [batch_size, max_steps]
saliency = true_probs.unsqueeze(1) - all_true_probs
# select the word with the largest saliency
# [batch_size]
best_word_idx = torch.argmax(saliency, dim=1)
replace_order = torch.argsort(saliency, descending=True)
# check
check = (best_word_idx < lengths).sum().data.cpu().numpy()
# assert check == cur_batch_size
if order:
return best_word_idx, replace_order
else:
return best_word_idx
class GreedyAttack:
"""
Select words greedily as an attack
"""
def __init__(self, config, word2id, id2word, vocab, wordid2synonyms):
self.config = config
self.stopwords = set(stopwords.words('english'))
self.mode = None
self.samples = None
self.cosine_similarity = CosineSimilarity(dim=1, eps=1e-6)
self.global_step = 0
self.word2id = word2id
self.id2word = id2word
self.vocab = vocab
self.wordid2synonyms = wordid2synonyms
self.max_loops = self.config['max_loops']
if self.config['attack'] == 'deepfool':
# in fact, deepfool will finish far more quicker than this
self.attack = DeepFool(config, num_classes=2, max_iters=20)
else:
print('Attack {} not recognized'.format(self.config['attack']))
self.model = None
self.word_saliency = WordSaliencyBatch(config, word2id)
def select_word_batch(self, all_word_ids, cur_available, labels, lengths, finish_mask, stopwords_mask, mask,
previous_replaced_words=None):
"""
select words in a batch fashion
:param all_word_ids: [batch_size, max_steps]
:param cur_available: [batch_size, max_steps]
:param labels: [batch_size]
:param lengths: [batch_size]
:param finish_mask: [batch_size]
:param stopwords_mask: [batch_size, max_steps]
:param mask: [batch_size, max_steps]
:param previous_replaced_words: a list of length batch_size
:return:
"""
# currently, batched word_saliency is too mem consuming
cur_batch_size = 1
all_replace_orders = []
# t = self.select_word(word_ids=all_word_ids, cur_available=cur_available,
# label=labels, length=lengths)
if self.config['abandon_stopwords']:
mask = torch.mul(mask, stopwords_mask)
if torch.cuda.is_available():
mask = mask.cuda()
cur_available = cur_available.cuda()
mask = torch.mul(mask, cur_available)
mask = mask.bool()
_, all_replace_orders = self.word_saliency.compute_saliency(model_=self.model, word_ids=all_word_ids,
labels=labels, lengths=lengths, mask=mask,
order=True)
if torch.cuda.is_available():
all_replace_orders = all_replace_orders.cuda()
# [batch_size, max_steps]
return all_replace_orders
def construct_new_sample_batch2(self, word_ids, labels, lengths, word_indices, sample_ids, finish_mask):
"""
:param word_ids: [batch_size, max_steps]
:param labels: [batch_size]
:param lengths: [batch_size]
:param word_indices: [batch_size], the best word in each example to replace
:param sample_ids: [batch_size]
:param finish_mask: [batch_size]
:return:
"""
# cur_batch_size = sample_ids.size(0)
cur_batch_size = 1
all_new_lengths = []
all_new_labels = []
all_new_word_ids = []
n_new_samples = []
for idx in range(cur_batch_size):
new_word_ids, new_lengths, new_labels = self.construct_new_sample2(word_ids=word_ids[idx], label=labels,
length=lengths,
word_idx=word_indices[idx],
sample_id=sample_ids[idx],
finish_mask=finish_mask[idx])
all_new_word_ids.append(new_word_ids)
all_new_lengths.append(new_lengths)
all_new_labels.append(new_labels)
n_new_samples.append(new_labels.size(0))
all_new_word_ids = torch.cat(all_new_word_ids)
all_new_lengths = torch.cat(all_new_lengths)
all_new_labels = torch.cat(all_new_labels)
return all_new_word_ids, all_new_lengths, all_new_labels, n_new_samples
def construct_new_sample2(self, word_ids, label, length, word_idx, sample_id, finish_mask):
"""
:param word_ids:
:param label:
:param length:
:param word_idx:
:param sample_id:
:param finish_mask:
:return: all_new_word_ids, [N, max_steps]
all_new_lengths, []
all_new_labels
"""
label = torch.tensor([label])
length = torch.tensor([length])
all_new_lengths = []
all_new_labels = []
all_new_word_ids = []
if finish_mask:
word_ids = word_ids.unsqueeze(0)
length = length.unsqueeze(0)
label = label.unsqueeze(0)
return word_ids, length, label
old_id = int(word_ids[word_idx].data.cpu().numpy())
syn_word_ids = self.wordid2synonyms[old_id]
# if sample_id == 33:
# for i in syn_word_ids:
# w = id2word[i]
# print(w)
for i in range(len(syn_word_ids)):
new_id = syn_word_ids[i]
new_word_ids = deepcopy(word_ids)
new_word_ids[word_idx] = new_id
all_new_word_ids.append(new_word_ids)
all_new_lengths.append(length)
all_new_labels.append(label)
all_new_word_ids = torch.stack(all_new_word_ids)
all_new_lengths = torch.stack(all_new_lengths)
all_new_labels = torch.stack(all_new_labels)
# return all_new_word_ids, torch.Tensor([all_new_lengths]), torch.Tensor([all_new_labels])
return all_new_word_ids, all_new_lengths, all_new_labels
def adv_attack(self, word_ids, lengths, labels, sample_ids, model, samples, stopwords_mask, mask):
"""
attack a batch of words
:param word_ids: [batch_size, max_steps]
:param lengths: [batch_size]
:param labels: [batch_size]
:param sample_ids: [batch_size]
:param model:
:param samples:
:param stopwords_mask: [batch_size, max_steps]
:param mask: [batch_size, max_steps]
:return:
"""
"""
:param word_ids: [batch_size, max_length]
:param lengths: [batch_size]
:param labels: [batch_size]
:param sample_ids: [batch_size]
:param model: current model, deepcopy before use
:param mode:
:return:
"""
# important, set model to eval mode
self.model = deepcopy(model)
# self.model.eval()
# self.samples = samples
word_ids = word_ids.unsqueeze(0)
cur_batch_size = word_ids.size(0) # 1
# logits: [batch_size, num_classes]
# sent_vecs: [batch_size, hidden_size]
# logits, sent_vecs = model(word_ids, lengths)
outputs = model(word_ids)
logits = outputs.logits
sent_vecs = torch.mean(outputs.hidden_states[-1], dim=1)
# preds: [batch_size], original predictions before perturbing
original_predictions = torch.argmax(logits, dim=-1)
num_classes = logits.size(1)
# [batch_size, num_classes]
# select by original prediction
one_hot_mask = torch.arange(num_classes).unsqueeze(
0).repeat(cur_batch_size, 1)
if torch.cuda.is_available():
one_hot_mask = one_hot_mask.cuda()
one_hot_mask = one_hot_mask == original_predictions.unsqueeze(1)
original_probs = torch.nn.functional.softmax(logits, dim=-1)
pred_probs = torch.masked_select(original_probs, one_hot_mask)
intermediate_pred_probs = []
intermediate_pred_probs.append(pred_probs)
# # find the boundary point
# self.model.zero_grad()
# normals, pert_vecs, all_original_predictions = self.attack(vecs=sent_vecs, net_=model.hidden)
# # [batch_size, hidden_size]
# r_tot = pert_vecs - sent_vecs
cur_available = torch.ones(cur_batch_size, self.config['max_steps'])
# [batch_size]
finish_mask = torch.zeros(cur_batch_size).bool()
cur_projections = torch.zeros(cur_batch_size)
cur_predictions = deepcopy(original_predictions.data)
# [batch_size, max_steps]
cur_word_ids = deepcopy(word_ids)
# [batch_size, hidden_size]
cur_sent_vecs = deepcopy(sent_vecs.data)
if torch.cuda.is_available():
finish_mask = finish_mask.cuda()
cur_predictions = cur_predictions.cuda()
cur_projections = cur_projections.cuda()
cur_word_ids = cur_word_ids.cuda()
cur_available = cur_available.cuda()
cur_sent_vecs = cur_sent_vecs.cuda()
# print("original:", [self.text_data.id2word[idx.item()] for idx in word_ids[0]])
intermediate_projections = []
intermediate_normals = []
intermediate_cosines = []
intermediate_distances = []
# [batch_size, iter_idx]
intermediate_word_ids = []
intermediate_update_masks = []
# all_word_ids, cur_available, labels, lengths, finish_mask
# [batch_size, max_steps]
# all_replace_orders = self.select_word_batch(all_word_ids=word_ids, cur_available=cur_available,
# labels=labels, lengths=lengths, finish_mask=finish_mask,
# stopwords_mask=stopwords_mask, mask=mask)
previous_replaced_words = []
intermediate_word_ids.append(word_ids)
for iter_idx in range(self.max_loops):
# print(f'Running the {iter_idx}th loop...')
if finish_mask.sum() == cur_batch_size:
break
self.model.zero_grad()
# for cur_samples, find boundary point
# cur_normals: [batch_size, hidden_size]
# cur_pert_vecs: [batch_size, hidden_size]
# cur_original_predictions: [batch_size]
cur_normals, cur_pert_vecs, cur_original_predictions = self.attack(
vecs=cur_sent_vecs, net_=self.model)
intermediate_normals.append(cur_normals)
# [batch_size, hidden_size]
cur_r_tot = cur_pert_vecs - cur_sent_vecs
# print("cur r total:", cur_r_tot)
# [batch_size], distances to decision boundary
cur_r_tot_distance = self.norm_dim(cur_r_tot)
intermediate_distances.append(cur_r_tot_distance)
# words_to_replace: [batch_size]
# cur_available: [batch_size, max_steps]
# cur_available is updated in selected_word_batch
all_replace_orders = self.select_word_batch(all_word_ids=cur_word_ids, cur_available=cur_available,
labels=labels, lengths=lengths, finish_mask=finish_mask,
stopwords_mask=stopwords_mask, mask=mask)
words_to_replace = all_replace_orders[:, 0]
# print("words:", words_to_replace)
words_to_replace_one_hot = torch.nn.functional.one_hot(
words_to_replace, num_classes=word_ids.size(1))
cur_available = torch.mul(
cur_available, 1 - words_to_replace_one_hot)
# all_new_samples have N samples inside
# n_new_samples: [batch_size], number of new samples for each old sample
# def construct_new_sample_batch(self, word_ids, labels, lengths, word_indices, sample_ids, finish_mask):
# start = timer()
all_new_word_ids, all_new_lengths, all_new_labels, n_new_samples = self.construct_new_sample_batch2(
word_ids=cur_word_ids,
labels=labels, lengths=lengths,
word_indices=words_to_replace,
sample_ids=sample_ids, finish_mask=finish_mask)
assert all_new_word_ids.size(0) == all_new_labels.size(0)
if torch.cuda.is_available():
# [N, max_steps]
all_new_word_ids = all_new_word_ids.cuda()
# [N]
# all_new_lengths = all_new_lengths.cuda()
# all_new_labels = all_new_labels.cuda()
# compute new sent_vecs
# all_new_logits: [N, num_classes]
# all_new_sent_vectors: [N, hidden_size]
# all_new_logits, all_new_sent_vectors = model(all_new_word_ids, all_new_lengths)
outputs = model(all_new_word_ids)
all_new_logits = outputs.logits
all_new_sent_vectors = torch.mean(outputs.hidden_states[-1], dim=1)
# [N]
all_new_predictions = torch.argmax(all_new_logits, dim=-1)
# [N, num_classes]
all_new_probs = torch.softmax(all_new_logits, dim=-1).data
# get new r_tot
# [N, hidden_size]
repeats = torch.tensor(n_new_samples)
if torch.cuda.is_available():
repeats = repeats.cuda()
all_cur_sent_vecs = torch.repeat_interleave(
cur_sent_vecs, repeats=repeats, dim=0)
all_cur_normals = torch.repeat_interleave(
cur_normals, repeats=repeats, dim=0)
all_new_r_tot = all_new_sent_vectors - all_cur_sent_vecs
# [N]
all_new_r_tot_length = self.norm_dim(all_new_r_tot)
all_cosines = self.cosine_similarity(
all_new_r_tot, all_cur_normals)
all_projections = torch.mul(all_new_r_tot_length, all_cosines)
# TODO: instead of projections, use nearest point
if self.config['metric'] != 'projection':
all_cur_normals, all_cur_pert_vecs, all_cur_original_predictions = self.attack(
vecs=all_new_sent_vectors, net_=model)
# [N, hidden_size]
all_cur_r_tot = all_cur_pert_vecs - all_cur_sent_vecs
# [N]
all_cur_r_tot_distance = self.norm_dim(all_cur_r_tot)
all_projections = all_cur_r_tot_distance
# split all_projections to match individual examples
# list of tensors, list length: [batch_size]
all_projections_splited = torch.split(
all_projections, split_size_or_sections=n_new_samples)
all_new_predictions_splited = torch.split(
all_new_predictions, split_size_or_sections=n_new_samples)
all_new_lengths_splited = torch.split(
all_new_lengths, split_size_or_sections=n_new_samples)
all_new_labels_splited = torch.split(
all_new_labels, split_size_or_sections=n_new_samples)
all_cosines_splited = torch.split(
all_cosines, split_size_or_sections=n_new_samples)
# list length: [batch_size]
# each item in the list is a tensor, which consists of several tensors of length max_steps
all_new_word_ids_splited = torch.split(
all_new_word_ids, split_size_or_sections=n_new_samples, dim=0)
all_new_sent_vectors_splited = torch.split(
all_new_sent_vectors, split_size_or_sections=n_new_samples, dim=0)
all_new_probs_splited = torch.split(
all_new_probs, split_size_or_sections=n_new_samples, dim=0)
# for each tensor, pick the one with largest projection
assert len(all_projections_splited) == cur_batch_size
# [batch_size]
selected_indices = []
selected_projections = []
selected_predictions = []
selected_cosines = []
# [batch_size, max_steps]
selected_word_ids = []
# [batch_size, hidden_size]
selected_sent_vecs = []
selected_new_probs = []
for i in range(cur_batch_size):
selected_idx = torch.argmax(all_projections_splited[i])
selected_projection = torch.max(all_projections_splited[i])
if self.config['metric'] != 'projection':
selected_idx = torch.argmin(all_projections_splited[i])
selected_projection = torch.min(all_projections_splited[i])
selected_prediction = all_new_predictions_splited[i][selected_idx]
selected_cosine = all_cosines_splited[i][selected_idx]
selected_word_ids_for_cur_sample = all_new_word_ids_splited[i][selected_idx]
selected_sent_vec_for_cur_sample = all_new_sent_vectors_splited[i][selected_idx]
selected_probs_for_cur_sample = all_new_probs_splited[i][selected_idx]
selected_indices.append(selected_idx)
selected_projections.append(selected_projection)
selected_predictions.append(selected_prediction)
selected_word_ids.append(selected_word_ids_for_cur_sample)
selected_sent_vecs.append(selected_sent_vec_for_cur_sample)
selected_cosines.append(selected_cosine)
selected_new_probs.append(selected_probs_for_cur_sample)
# [batch_size]
selected_indices = torch.tensor(selected_indices)
selected_projections = torch.tensor(selected_projections)
selected_predictions = torch.tensor(selected_predictions)
selected_cosines = torch.tensor(selected_cosines)
# [batch_size, max_steps]
selected_word_ids = torch.stack(selected_word_ids, 0)
# [batch_size, hidden_size]
selected_sent_vecs = torch.stack(selected_sent_vecs, 0)
# [batch_size, num_classes]
selected_new_probs = torch.stack(selected_new_probs, 0)
# [batch_size]
cur_pred_probs = torch.masked_select(
selected_new_probs, one_hot_mask)
intermediate_pred_probs.append(cur_pred_probs)
if torch.cuda.is_available():
selected_indices = selected_indices.cuda()
selected_projections = selected_projections.cuda()
selected_predictions = selected_predictions.cuda()
selected_word_ids = selected_word_ids.cuda()
selected_sent_vecs = selected_sent_vecs.cuda()
# update cur_projections, cur_predictions, and cur_word_ids by ~finish_mask
# all unfinished samples need to be updated
# [batch_size]
cur_update_mask = ~finish_mask
cur_update_mask = torch.mul(
cur_update_mask, selected_projections > 0)
# torch.where(condition, x, y) → Tensor
# x if condition else y
# [batch_size]
cur_projections = torch.where(
cur_update_mask, selected_projections, cur_projections)
cur_predictions = torch.where(
cur_update_mask, selected_predictions, cur_predictions)
# [batch_size, max_steps]
cur_word_ids = torch.where(
cur_update_mask.view(-1, 1), selected_word_ids, cur_word_ids)
intermediate_word_ids.append(cur_word_ids)
# [batch_size, hidden_size]
cur_sent_vecs = torch.where(
cur_update_mask.view(-1, 1), selected_sent_vecs, cur_sent_vecs)
cur_sent_vecs.detach_()
cur_word_ids.detach_()
cur_projections.detach_()
cur_predictions.detach_()
intermediate_projections.append(cur_projections.data)
intermediate_cosines.append(selected_cosines.data)
# if torch.cuda.is_available():
# print(torch.cuda.max_memory_allocated())
# print(torch.cuda.memory_allocated())
# sys.stdout.flush()
# finish if we successfully fool the model
# [batch_size]
cur_finish_mask = (selected_predictions != original_predictions)
intermediate_update_masks.append(cur_finish_mask)
finish_mask += cur_finish_mask
finish_mask = finish_mask.bool()
# for the last sent_vec, calculate its distance to decision boundary
final_normals, final_pert_vecs, final_original_predictions = self.attack(
vecs=cur_sent_vecs, net_=model)
intermediate_normals.append(final_normals)
# [batch_size, hidden_size]
final_r_tot = final_pert_vecs - cur_sent_vecs
# [batch_size], distances to decision boundary
final_r_tot_distance = self.norm_dim(final_r_tot)
intermediate_distances.append(final_r_tot_distance)
# [batch_size, hidden_size]
final_r_tot = cur_sent_vecs - sent_vecs
# [batch_size, max_steps]
final_word_ids = deepcopy(cur_word_ids)
# [batch_size]
final_predictions = deepcopy(cur_predictions)
# [batch_size, n_loops]
intermediate_cosines = torch.stack(
intermediate_cosines).transpose(0, 1)
intermediate_projections = torch.stack(
intermediate_projections).transpose(0, 1)
# [batch_size, n_loops+1]
intermediate_distances = torch.stack(
intermediate_distances).transpose(0, 1)
intermediate_pred_probs = torch.stack(
intermediate_pred_probs).transpose(0, 1)
# [batch_size, loops+1, max_steps]
intermediate_word_ids = torch.stack(
intermediate_word_ids).transpose(0, 1)
intermediate_normals = torch.stack(
intermediate_normals).transpose(0, 1)
if torch.cuda.is_available():
final_r_tot = final_r_tot.cuda()
final_word_ids = final_word_ids.cuda()
final_predictions = final_predictions.cuda()
intermediate_normals = intermediate_normals.cuda()
intermediate_cosines = intermediate_cosines.cuda()
intermediate_projections = intermediate_projections.cuda()
intermediate_distances = intermediate_distances.cuda()
return final_r_tot, final_word_ids, final_predictions, intermediate_normals, \
intermediate_cosines, intermediate_distances, original_predictions, intermediate_word_ids, intermediate_pred_probs
@staticmethod
def norm_dim(w):
norms = []
for idx in range(w.size(0)):
norms.append(w[idx].norm())
norms = torch.stack(tuple(norms), dim=0)
return norms
class GEOAttacker(ClassificationAttacker):
@property
def TAGS(self):
return {self.__lang_tag, Tag("get_pred", "victim"), Tag("get_prob", "victim")}
def __init__(self,
tokenizer: Optional[Tokenizer] = None,
substitute: Optional[WordSubstitute] = None,
lang=None,
**kwargs):
"""
:param float threshold: Threshold used in substitute module. **Default:** 0.5
:param WordSubstitute substitute: Substitute method used in this attacker.
:param TextProcessor processor: Text processor used in this attacker.
:param str token_unk: A token which means "unknown token" in Classifier's vocabulary.
:Classifier Capacity: Probability
Generating Natural Language Adversarial Examples through Probability Weighted Word Saliency. Shuhuai Ren, Yihe Deng, Kun He, Wanxiang Che. ACL 2019.
`[pdf] <https://www.aclweb.org/anthology/P19-1103.pdf>`__
`[code] <https://github.com/JHL-HUST/PWWS/>`__
"""
self.config = DEFAULT_CONFIG.copy()
self.config.update(kwargs)
'''
if self.config["substitute"] is None:
self.config["substitute"] = WordNetSubstitute()
'''
# check_parameters(self.config.keys(), DEFAULT_CONFIG)
# self.processor = self.config["processor"]
lst = []
if tokenizer is not None:
lst.append(tokenizer)
if substitute is not None:
lst.append(substitute)
if len(lst) > 0:
self.__lang_tag = get_language(lst)
else:
self.__lang_tag = language_by_name(lang)
if self.__lang_tag is None:
raise ValueError("Unknown language `%s`" % lang)
if substitute is None:
substitute = get_default_substitute(self.__lang_tag)
self.substitute = substitute
if tokenizer is None:
tokenizer = get_default_tokenizer(self.__lang_tag)
self.tokenizer = tokenizer
# self.substitute = self.config["substitute"]
self.max_length = self.config["max_length"]
self.max_steps = self.config["max_steps"]
self.max_loops = self.config["max_loops"]
self.UNK_WORD = self.config["token_unk"]
self.PAD_WORD = self.config["token_pad"]
self.vocab_size = self.config['vocab_size']
self.word2id, self.id2word = self.build_vocab(self.config['data'])
self.vocab = self.get_vocab()
self.word2synonyms, self.wordid2synonyms = self.construct_synonyms()
self.embedding_size = self.config['embedding_size']
# [vocab_size, embed_dim]
# self.pre_trained_embedding = self.create_embedding()
self.pre_trained_embedding = None
self.greedy_attack = GreedyAttack(
self.config, word2id=self.word2id, id2word=self.id2word, wordid2synonyms=self.wordid2synonyms,
vocab=self.vocab)
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
def preprocess(self, x_batch):
ls_of_words = [list(map(
lambda x: x[0], self.tokenizer.tokenize(sent))) for sent in x_batch]
words = ls_of_words[0]
seq_len = list(map(lambda x: len(x), x_batch))
max_len = max(seq_len)
if self.config["max_len"] is not None:
max_len = min(max_len, self.config["max_len"])
words = words[:max_len]
length = len(words)
word_ids = []
for word in words:
if word in self.word2id.keys():
id_ = self.word2id[word]
else:
id_ = self.word2id[self.config['token_unk']]
word_ids.append(id_)
while len(word_ids) < max_len:
word_ids.append(self.word2id[self.config['token_pad']])
while len(words) < max_len:
words.append(self.config['token_pad'])
return torch.tensor(word_ids), max_len
def attack(self, clsf: Classifier, x_orig: str, goal: ClassifierGoal):
# torch.cuda.empty_cache()
x_orig = x_orig.lower()
if goal.target is None:
targeted = False
target = clsf.get_pred([x_orig])[0] # calc x_orig's prediction
else:
targeted = True
words = list(
map(lambda x: x[0], self.tokenizer.tokenize(x_orig)))
# words = self.config["processor"].get_tokens(x_orig)
words = words[:self.max_length]
length = len(words)
word_ids = []
for word in words:
if word in self.word2id.keys():
id_ = self.word2id[word]
else:
id_ = self.word2id[self.UNK_WORD]
word_ids.append(id_)
while len(word_ids) < self.max_length:
word_ids.append(self.word2id[self.PAD_WORD])
while len(words) < self.max_length:
words.append(self.PAD_WORD)
word_ids = [self.word2id[word] for word in words]
# logits, _ = self.model(torch.tensor(word_ids), length)
# target = torch.argmax(logits, -1)
# word_ids, length = clsf.preprocess([x_orig])
# logits, _ = clsf(torch.tensor(word_ids), length)
stopwords_mask = []
sample = Sample(data=word_ids, words=words,
steps=self.max_length, label=goal.target, length=length, id=-1)
self.stopwords = set(stopwords.words('english'))
self.create_mask(sample, self.stopwords)
final_word_ids, final_pred = self.inner_attack(clsf.model, sample)
final_words = [self.id2word[word_id.item()]
for word_id in final_word_ids[0]]
final_sent = self.tokenizer.detokenize(final_words[:length])
final_pred_clsf = clsf.get_pred([final_sent])[0]
if final_pred_clsf == goal.target:
# return final_sent, final_pred_clsf
return None
else:
return final_sent
def create_mask(self, sample, stopwords):
mask = []
stopwords_mask = []
for idx, word in enumerate(sample.sentence):
if idx >= sample.length:
mask.append(0)
stopwords_mask.append(0)
continue
# word = word[0]
if word in string.punctuation or word not in self.word2id.keys() or word == self.PAD_WORD \
or word == self.UNK_WORD or word not in self.vocab:
mask.append(0)
elif len(self.word2synonyms[word]) <= 1:
mask.append(0)
else:
mask.append(1)
if word.lower() in stopwords:
stopwords_mask.append(0)
else:
stopwords_mask.append(1)
sample.set_mask(mask=mask, stopwords_mask=stopwords_mask)
def get_vocab(self):
vocab = []
for word, idx in self.word2id.items():
word_ = self.id2word[idx]
if word_ != word:
print()
if word == word_ and not (word == self.PAD_WORD or word == self.UNK_WORD):
vocab.append(word)
else:
continue
return vocab
def construct_synonyms(self):
"""
for each word in the vocab, find its synonyms
build a dictionary, where key is word, value is its synonyms
:return:
"""
word2synonyms, wordid2synonyms = {}, {}
for word_id in range(len(self.id2word)):
word = self.id2word[word_id]
# print("inside construct synonyms:", word)
if word == self.PAD_WORD or word == self.UNK_WORD:
word2synonyms[word] = [word]
wordid2synonyms[word_id] = [word_id]
continue
synonyms = []
synonyms_id = []
for syn in wordnet.synsets(word):
for l in syn.lemmas():
w = l.name()
if w not in self.word2id.keys():
continue
w_id = self.word2id[w]
# if synonym is PAD or UNK, continue
if w_id == self.word2id[self.PAD_WORD] or w_id == self.word2id[self.UNK_WORD]:
continue
synonyms.append(w)
synonyms_id.append(w_id)
# put original word in synonyms
synonyms.append(word)
synonyms_id.append(word_id)
synonyms = list(set(synonyms))
synonyms_id = list(set(synonyms_id))
word2synonyms[word] = synonyms
wordid2synonyms[word_id] = synonyms_id
return word2synonyms, wordid2synonyms
def build_vocab(self, data):
all_words = []
for elem in data:
# all_words += self.config["processor"].get_tokens(elem['x'])
all_words += self.tokenizer.tokenize(elem['x'])
counter = Counter([word[0].lower() for word in all_words])
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
# keep the most frequent vocabSize words, including the special tokens
# -1 means we have no limits on the number of words
if self.vocab_size != -1:
count_pairs = count_pairs[0:self.vocab_size - 2]
count_pairs.append((self.UNK_WORD, 100000))
count_pairs.append((self.PAD_WORD, 100000))
self.vocab_size = min(self.vocab_size, len(count_pairs))
if self.vocab_size != -1:
assert len(count_pairs) == self.vocab_size
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
id_to_word = {v: k for k, v in word_to_id.items()}
return word_to_id, id_to_word
# def load_model(self, model_path):
# if torch.cuda.is_available():
# model_state_dict, optimizer_state_dict, _, _, _ = torch.load(model_path)
# else:
# model_state_dict, optimizer_state_dict, _, _, _ = torch.load(model_path, map_location='cpu')
# self.model.load_state_dict(model_state_dict)
def inner_attack(self, model, sample):
# model.eval()
if torch.cuda.is_available():
model.cuda()
# results = {'original_acc': 0.0, 'acc_perturbed': 0.0, 'change_rate': 0.0, 'n_samples': 0,
# 'original_corrects': 0, 'perturbed_corrects:': 0, 'n_changed': 0, 'n_perturbed': 0}
# all_replace_rate = []
# all_n_change_words = []
if sample.length > self.max_steps:
length = self.max_steps
else:
length = sample.length
word_ids = sample.word_ids[:self.max_steps]
stopwords_mask = sample.stopwords_mask[:self.max_steps]
mask = sample.mask[:self.max_steps]
sample_ids, word_ids, lengths, labels, stopwords_mask, mask = sample.id, torch.tensor(
word_ids), length, sample.label, torch.tensor(stopwords_mask), torch.tensor(mask)
# cur_batch_size = lengths.size(0)
if torch.cuda.is_available():
word_ids = word_ids.cuda()
# lengths = lengths.cuda()
# labels = labels.cuda()
stopwords_mask = stopwords_mask.cuda()
# sample_ids = sample_ids.cuda()
mask = mask.cuda()
# [batch_size]
# perturbed_samples: list of samples
# perturbed_loops: list of ints
# perturbed_predictions: tensor
# original_predictions: tensor
# perturbed_projections: tensor
sample_ids = torch.Tensor([sample_ids])
final_r_tot, final_word_ids, perturbed_predictions, intermediate_normals, intermediate_cosines, \
intermediate_distances, original_predictions, intermediate_word_ids, intermediate_pred_probs = \
self.greedy_attack.adv_attack(word_ids=word_ids, lengths=lengths, labels=labels,
sample_ids=sample_ids, model=model, samples=sample,
stopwords_mask=stopwords_mask, mask=mask)
return final_word_ids, perturbed_predictions
| 53,860 | 40.178135 | 156 | py |
OpenAttack | OpenAttack-master/OpenAttack/attackers/gan/__init__.py | import numpy as np
from copy import deepcopy
from ..classification import ClassificationAttacker, Classifier, ClassifierGoal
from ...data_manager import DataManager
from ...tags import TAG_English, Tag
import torch
def get_min(indices_adv1, d):
d1 = deepcopy(d)
idx_adv1 = indices_adv1[np.argmin(d1[indices_adv1])]
orig_idx_adv1 = idx_adv1
return orig_idx_adv1
DEFAULT_CONFIG = {
"sst": False,
}
class GANAttacker(ClassificationAttacker):
@property
def TAGS(self):
return { self.__lang_tag, Tag("get_pred", "victim") }
def __init__(self, gan_dataset : str = "sst"):
"""
Generating Natural Adversarial Examples. Zhengli Zhao, Dheeru Dua, Sameer Singh. ICLR 2018.
`[pdf] <https://arxiv.org/pdf/1710.11342.pdf>`__
`[code] <https://github.com/zhengliz/natural-adversary>`__
Args:
gan_dataset: The name of dataset which GAN model is trained on. Must be one of the following: ``["sst", "snli"]``. **Default:** sst
:Language: english
:Classifier Capacity:
* get_pred
"""
self.__lang_tag = TAG_English
self.gan_dataset = gan_dataset
if gan_dataset == "snli": # snli
self.word2idx, self.autoencoder, self.inverter, self.gan_gen, self.gan_disc = DataManager.load("AttackAssist.GAN")
self.maxlen = 10
elif gan_dataset == "sst":
self.word2idx, self.autoencoder, self.inverter, self.gan_gen, self.gan_disc = DataManager.load("AttackAssist.SGAN")
self.maxlen = 100
else:
raise ValueError("Unknown dataset `%s`" % self.gan_dataset)
self.idx2word = {v: k for k, v in self.word2idx.items()}
## TODO: support GPU gan
self.gan_gen = self.gan_gen.cpu()
self.inverter = self.inverter.cpu()
self.autoencoder.eval()
self.autoencoder = self.autoencoder.cpu()
self.right = 0.05 # ####
self.nsamples = 10
self.autoencoder.gpu = False
self.lowercase = True
def attack(self, victim: Classifier, sentence, goal: ClassifierGoal):
if self.gan_dataset == "snli":
return self.snli_call(victim, sentence, goal)
elif self.gan_dataset == "sst":
return self.sst_call(victim, sentence, goal)
else:
raise ValueError("Unknown dataset `%s`" % self.gan_dataset)
def snli_call(self, clsf : Classifier, hypothesis_orig, goal : ClassifierGoal):
# * **clsf** : **Classifier** .
# * **x_orig** : Input sentence.
# 'entailment': 0, 'neutral': 1, 'contradiction': 2
# tokenization
if self.lowercase:
hypothesis_orig = hypothesis_orig.strip().lower()
hypothesis_words = hypothesis_orig.strip().split(" ")
hypothesis_words = ['<sos>'] + hypothesis_words
hypothesis_words += ['<eos>']
vocab = self.word2idx
unk_idx = vocab['<oov>']
hypothesis_indices = [vocab[w] if w in vocab else unk_idx for w in hypothesis_words]
hypothesis_words = [w if w in vocab else '<oov>' for w in hypothesis_words]
length = min(len(hypothesis_words), self.maxlen)
if len(hypothesis_indices) < self.maxlen:
hypothesis_indices += [0] * (self.maxlen - len(hypothesis_indices))
hypothesis_words += ["<pad>"] * (self.maxlen - len(hypothesis_words))
hypothesis = hypothesis_indices[:self.maxlen]
hypothesis_words = hypothesis_words[:self.maxlen]
c = self.autoencoder.encode(torch.LongTensor([hypothesis, hypothesis]),
torch.LongTensor([length, length]), noise=False)
z = self.inverter(c).data.cpu()
hypothesis = torch.LongTensor(hypothesis)
hypothesis = hypothesis.unsqueeze(0)
right_curr = self.right
counter = 0
while counter <= 5:
mus = z.repeat(self.nsamples, 1)
delta = torch.FloatTensor(mus.size()).uniform_(-1 * right_curr, right_curr)
dist = np.array([np.sqrt(np.sum(x ** 2)) for x in delta.cpu().numpy()])
perturb_z = mus + delta # #### volatile=True
x_tilde = self.gan_gen(perturb_z) # perturb
adv_prob = []
index_adv = []
sentences = []
for i in range(self.nsamples):
x_adv = x_tilde[i]
sample_idx = self.autoencoder.generate(x_adv, 10, True).data.cpu().numpy()[0]
words = [self.idx2word[x] for x in sample_idx]
if "<eos>" in words:
words = words[:words.index("<eos>")]
adv_prob.append(clsf.get_pred([ " ".join(words) ])[0])
sentences.append(" ".join(words))
for i in range(self.nsamples):
if goal.check(sentences[i], int(adv_prob[i])):
index_adv.append(i)
if len(index_adv) == 0:
counter += 1
right_curr *= 2
else:
idx_adv = get_min(index_adv, dist)
return sentences[idx_adv], clsf.get_pred([sentences[idx_adv]])[0]
return None
def sst_call(self, clsf : Classifier, hypothesis_orig, target : ClassifierGoal):
if self.lowercase:
hypothesis_orig = hypothesis_orig.strip().lower()
hypothesis_words = hypothesis_orig.strip().split(" ")
hypothesis_words = ['<sos>'] + hypothesis_words
hypothesis_words += ['<eos>']
vocab = self.word2idx
unk_idx = vocab['<oov>']
hypothesis_indices = [vocab[w] if w in vocab else unk_idx for w in hypothesis_words]
hypothesis_words = [w if w in vocab else '<oov>' for w in hypothesis_words]
length = min(len(hypothesis_words), self.maxlen)
if len(hypothesis_indices) < self.maxlen:
hypothesis_indices += [0] * (self.maxlen - len(hypothesis_indices))
hypothesis_words += ["<pad>"] * (self.maxlen - len(hypothesis_words))
hypothesis = hypothesis_indices[:self.maxlen]
hypothesis_words = hypothesis_words[:self.maxlen]
source_orig = hypothesis[:-1]
if len(source_orig) > self.maxlen:
source_orig = source_orig[:self.maxlen]
zeros = (self.maxlen - len(source_orig)) * [0]
source_orig += zeros
## TODO Something maybe wrong here
output = self.autoencoder(torch.LongTensor([source_orig]),
torch.LongTensor([length]),
noise=True)
_, max_indices = torch.max(output, 2)
max_indices = max_indices.view(output.size(0), -1).data.cpu().numpy()
for idx in max_indices:
words = [self.idx2word[x] for x in idx]
if "<eos>" in words:
words = words[:words.index("<eos>")]
if "." in words:
words = words[:words.index(".")]
for i in range(len(words)):
if words[i] == "<oov>":
words[i] = ""
sent = " ".join(words)
pred = clsf.get_pred([sent])[0]
if target.check(sent, pred):
return sent
return None
| 7,259 | 37.823529 | 143 | py |
OpenAttack | OpenAttack-master/OpenAttack/attackers/scpn/models.py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
import numpy as np
class ParseNet(nn.Module):
def __init__(self, d_nt, d_hid, len_voc):
super(ParseNet, self).__init__()
self.d_nt = d_nt
self.d_hid = d_hid
self.len_voc = len_voc
self.trans_embs = nn.Embedding(len_voc, d_nt)
self.encoder = nn.LSTM(d_nt, d_hid, num_layers=1, batch_first=True)
self.decoder = nn.LSTM(d_nt + d_hid, d_hid, num_layers=1, batch_first=True)
self.out_dense_1 = nn.Linear(d_hid * 2, d_hid)
self.out_dense_2 = nn.Linear(d_hid, len_voc)
self.out_nonlin = nn.LogSoftmax(dim=1)
self.att_W = nn.Parameter(torch.Tensor(d_hid, d_hid))
self.att_parse_W = nn.Parameter(torch.Tensor(d_hid, d_hid))
self.copy_hid_v = nn.Parameter(torch.Tensor(d_hid, 1))
self.copy_att_v = nn.Parameter(torch.Tensor(d_hid, 1))
self.copy_inp_v = nn.Parameter(torch.Tensor(d_nt + d_hid, 1))
def compute_mask(self, lengths):
device = lengths.device
max_len = torch.max(lengths)
range_row = torch.arange(0, max_len, device=device).unsqueeze(0).expand(lengths.size(0), max_len)
mask = lengths.unsqueeze(1).expand_as(range_row)
mask = (range_row < mask).float()
return mask
def masked_softmax(self, vector, mask):
result = nn.functional.softmax(vector, dim=1)
result = result * mask
result = result / (result.sum(dim=1, keepdim=True) + 1e-13) # avoid dividing zero
return result
# compute masked attention over enc hiddens with bilinear product
def compute_decoder_attention(self, hid_previous, enc_hids, in_lens):
mask = self.compute_mask(in_lens)
b_hn = hid_previous[0].mm(self.att_W)
scores = b_hn.unsqueeze(1) * enc_hids
scores = torch.sum(scores, dim=2)
scores = self.masked_softmax(scores, mask)
return scores
# compute masked attention over parse sequence with bilinear product
def compute_transformation_attention(self, hid_previous, trans_embs, trans_lens):
mask = self.compute_mask(trans_lens)
b_hn = hid_previous[0].mm(self.att_parse_W)
scores = b_hn.unsqueeze(1) * trans_embs
scores = torch.sum(scores, dim=2)
scores = self.masked_softmax(scores, mask)
return scores
# return encoding for an input batch
def encode_batch(self, inputs, lengths):
device = inputs.device
bsz, max_len = inputs.size()
in_embs = self.trans_embs(inputs)
lens, indices = torch.sort(lengths, 0, True)
e_hid_init = torch.zeros(1, bsz, self.d_hid, device=device)
e_cell_init = torch.zeros(1, bsz, self.d_hid, device=device)
all_hids, (enc_last_hid, _) = self.encoder(pack(in_embs[indices], lens.tolist(), batch_first=True), (e_hid_init, e_cell_init))
_, _indices = torch.sort(indices, 0)
all_hids = unpack(all_hids, batch_first=True)[0]
return all_hids[_indices], enc_last_hid.squeeze(0)[_indices]
# decode one timestep
def decode_step(self, idx, prev_words, prev_hid, prev_cell,
enc_hids, trans_embs, in_sent_lens, trans_lens, bsz, max_len):
device = self.trans_embs.parameters().__next__().device
# initialize with zeros
if idx == 0:
word_input = torch.zeros(bsz, 1, self.d_nt, device=device)
else:
word_input = self.trans_embs(prev_words)
word_input = word_input.view(bsz, 1, self.d_nt)
# concatenate w/ transformation embeddings
trans_weights = self.compute_transformation_attention(prev_hid, trans_embs, trans_lens)
trans_ctx = torch.sum(trans_weights.unsqueeze(2) * trans_embs, dim=1)
decoder_input = torch.cat([word_input, trans_ctx.unsqueeze(1)], dim=2)
# feed to decoder lstm
_, (hn, cn) = self.decoder(decoder_input, (prev_hid, prev_cell))
# compute attention for next time step and att weighted ave of encoder hiddens
attn_weights = self.compute_decoder_attention(hn, enc_hids, in_sent_lens)
attn_ctx = torch.sum(attn_weights.unsqueeze(2) * enc_hids, dim=1)
# compute copy prob as function of lotsa shit
p_copy = decoder_input.squeeze(1).mm(self.copy_inp_v)
p_copy += attn_ctx.mm(self.copy_att_v)
p_copy += hn.squeeze(0).mm(self.copy_hid_v)
p_copy = torch.sigmoid(p_copy).squeeze(1)
return hn, cn, attn_weights, attn_ctx, p_copy
def forward(self):
raise NotImplemented
# beam search given a single input parse and a batch of template transformations
def batch_beam_search(self, inputs, out_trimmed, in_trans_lens,
out_trimmed_lens, eos_idx, beam_size=5, max_steps=250):
device = inputs.device
bsz, max_len = inputs.size()
# chop input
inputs = inputs[:, :in_trans_lens[0]]
# encode inputs and trimmed outputs
enc_hids, enc_last_hid = self.encode_batch(inputs, in_trans_lens)
trim_hids, trim_last_hid = self.encode_batch(out_trimmed, out_trimmed_lens)
# initialize decoder hidden to last encoder hidden
hn = enc_last_hid.unsqueeze(0)
cn = torch.zeros(1, 1, self.d_hid, device=device)
# initialize beams (dictionary of batch_idx: beam params)
beam_dict = {}
for b_idx in range(trim_hids.size(0)):
beam_dict[b_idx] = [(0.0, hn, cn, [])]
nsteps = 0
# loop til max_decode, do lstm tick using previous prediction
while True:
# set up accumulators for predictions
# assumption: all examples have same number of beams at each timestep
prev_words = []
prev_hs = []
prev_cs = []
for b_idx in beam_dict:
beams = beam_dict[b_idx]
# loop over everything in the beam
beam_candidates = []
for b in beams:
curr_prob, prev_h, prev_c, seq = b
# start with last word in sequence, if eos end the beam
if len(seq) > 0:
prev_words.append(seq[-1])
else:
prev_words = None
prev_hs.append(prev_h)
prev_cs.append(prev_c)
# now batch decoder computations
hs = torch.cat(prev_hs, dim=1)
cs = torch.cat(prev_cs, dim=1)
num_examples = hs.size(1)
if prev_words is not None:
prev_words = torch.LongTensor(prev_words).to(device)
if num_examples != trim_hids.size(0):
d1, d2, d3 = trim_hids.size()
rep_factor = num_examples // d1
curr_out = trim_hids.unsqueeze(1).expand(d1, rep_factor, d2, d3).contiguous().view(-1, d2, d3)
curr_out_lens = out_trimmed_lens.unsqueeze(1).expand(d1, rep_factor).contiguous().view(-1)
else:
curr_out = trim_hids
curr_out_lens = out_trimmed_lens
# expand out inputs and encoder hiddens
_, in_len, hid_d = enc_hids.size()
curr_enc_hids = enc_hids.expand(num_examples, in_len, hid_d)
curr_enc_lens = in_trans_lens.expand(num_examples)
curr_inputs = inputs.expand(num_examples, in_trans_lens[0])
# concat prev word emb and prev attn input and feed to decoder lstm
hn, cn, attn_weights, attn_ctx, p_copy = self.decode_step(nsteps, prev_words, hs, cs, curr_enc_hids, curr_out, curr_enc_lens, curr_out_lens, num_examples, max_len)
# compute copy attn by scattering attn into vocab space
vocab_scores = torch.zeros(num_examples, self.len_voc, device=device)
vocab_scores = vocab_scores.scatter_add_(1, curr_inputs, attn_weights)
vocab_scores = torch.log(vocab_scores + 1e-20).squeeze()
# compute prediction over vocab for a single time step
pred_inp = torch.cat([hn.squeeze(0), attn_ctx], dim=1)
preds = self.out_dense_1(pred_inp)
preds = self.out_dense_2(preds)
preds = self.out_nonlin(preds).squeeze()
final_preds = p_copy.unsqueeze(1) * vocab_scores + (1 - p_copy.unsqueeze(1)) * preds
# now loop over the examples and sort each separately
for b_idx in beam_dict:
beam_candidates = []
# no words previously predicted
if num_examples == len(beam_dict):
ex_hn = hn[:,b_idx,:].unsqueeze(0)
ex_cn = cn[:,b_idx,:].unsqueeze(0)
preds = final_preds[b_idx]
_, top_indices = torch.sort(-preds)
# add top n candidates
for z in range(beam_size):
word_idx = top_indices[z].item()
beam_candidates.append((preds[word_idx].item(), ex_hn, ex_cn, [word_idx]))
beam_dict[b_idx] = beam_candidates
else:
origin_beams = beam_dict[b_idx]
start = b_idx * beam_size
end = (b_idx + 1) * beam_size
ex_hn = hn[:,start:end,:]
ex_cn = cn[:,start:end,:]
ex_preds = final_preds[start:end]
for o_idx, ob in enumerate(origin_beams):
curr_prob, _, _, seq = ob
# if one of the beams is already complete, add it to candidates
# note: this is inefficient, but whatever
if seq[-1] == eos_idx:
beam_candidates.append(ob)
preds = ex_preds[o_idx]
curr_hn = ex_hn[:,o_idx,:].unsqueeze(0)
curr_cn = ex_cn[:,o_idx,:].unsqueeze(0)
_, top_indices = torch.sort(-preds)
for z in range(beam_size):
word_idx = top_indices[z].item()
beam_candidates.append((curr_prob + float(preds[word_idx].cpu().item()), curr_hn, curr_cn, seq + [word_idx]))
s_inds = np.argsort([x[0] for x in beam_candidates])[::-1]
beam_candidates = [beam_candidates[x] for x in s_inds]
beam_dict[b_idx] = beam_candidates[:beam_size]
nsteps += 1
if nsteps > max_steps:
break
return beam_dict
class SCPN(nn.Module):
def __init__(self, d_word, d_hid, d_nt, d_trans,
len_voc, len_trans_voc, use_input_parse):
super(SCPN, self).__init__()
self.d_word = d_word
self.d_hid = d_hid
self.d_trans = d_trans
self.d_nt = d_nt + 1
self.len_voc = len_voc
self.len_trans_voc = len_trans_voc
self.use_input_parse = use_input_parse
# embeddings
self.word_embs = nn.Embedding(len_voc, d_word)
self.trans_embs = nn.Embedding(len_trans_voc, d_nt)
# lstms
if use_input_parse:
self.encoder = nn.LSTM(d_word + d_trans, d_hid, num_layers=1, bidirectional=True, batch_first=True)
else:
self.encoder = nn.LSTM(d_word, d_hid, num_layers=1, bidirectional=True, batch_first=True)
self.encoder_proj = nn.Linear(d_hid * 2, d_hid)
self.decoder = nn.LSTM(d_word + d_hid, d_hid, num_layers=2, batch_first=True)
self.trans_encoder = nn.LSTM(d_nt, d_trans, num_layers=1, batch_first=True)
# output softmax
self.out_dense_1 = nn.Linear(d_hid * 2, d_hid)
self.out_dense_2 = nn.Linear(d_hid, len_voc)
self.att_nonlin = nn.Softmax(dim=1)
self.out_nonlin = nn.LogSoftmax(dim=1)
# attention params
self.att_parse_proj = nn.Linear(d_trans, d_hid)
self.att_W = nn.Parameter(torch.Tensor(d_hid, d_hid))
self.att_parse_W = nn.Parameter(torch.Tensor(d_hid, d_hid))
self.copy_hid_v = nn.Parameter(torch.Tensor(d_hid, 1))
self.copy_att_v = nn.Parameter(torch.Tensor(d_hid, 1))
self.copy_inp_v = nn.Parameter(torch.Tensor(d_word + d_hid, 1))
# create matrix mask from length vector
def compute_mask(self, lengths):
device = lengths.device
max_len = torch.max(lengths)
range_row = torch.arange(0, max_len, device=device).unsqueeze(0).expand(lengths.size(0), max_len)
mask = lengths.unsqueeze(1).expand_as(range_row)
mask = (range_row < mask).float()
return mask
# masked softmax for attention
def masked_softmax(self, vector, mask):
result = torch.nn.functional.softmax(vector, dim=1)
result = result * mask
result = result / (result.sum(dim=1, keepdim=True) + 1e-13)
return result
# compute masked attention over enc hiddens with bilinear product
def compute_decoder_attention(self, hid_previous, enc_hids, in_lens):
mask = self.compute_mask(in_lens)
b_hn = hid_previous.mm(self.att_W)
scores = b_hn.unsqueeze(1) * enc_hids
scores = torch.sum(scores, dim=2)
scores = self.masked_softmax(scores, mask)
return scores
# compute masked attention over parse sequence with bilinear product
def compute_transformation_attention(self, hid_previous, trans_embs, trans_lens):
mask = self.compute_mask(trans_lens)
b_hn = hid_previous.mm(self.att_parse_W)
scores = b_hn.unsqueeze(1) * trans_embs
scores = torch.sum(scores, dim=2)
scores = self.masked_softmax(scores, mask)
return scores
# return encoding for an input batch
def encode_batch(self, inputs, trans, lengths):
device = inputs.device
bsz, max_len = inputs.size()
in_embs = self.word_embs(inputs)
lens, indices = torch.sort(lengths, 0, True)
# concat word embs with trans hid
if self.use_input_parse:
in_embs = torch.cat([in_embs, trans.unsqueeze(1).expand(bsz, max_len, self.d_trans)], dim=2)
e_hid_init = torch.zeros(2, bsz, self.d_hid, device=device)
e_cell_init = torch.zeros(2, bsz, self.d_hid, device=device)
all_hids, (enc_last_hid, _) = self.encoder(pack(in_embs[indices], lens.tolist(), batch_first=True), (e_hid_init, e_cell_init))
_, _indices = torch.sort(indices, 0)
all_hids = unpack(all_hids, batch_first=True)[0][_indices]
all_hids = self.encoder_proj(all_hids.view(-1, self.d_hid * 2)).view(bsz, max_len, self.d_hid)
enc_last_hid = torch.cat([enc_last_hid[0], enc_last_hid[1]], dim=1)
enc_last_hid = self.encoder_proj(enc_last_hid)[_indices]
return all_hids, enc_last_hid
# return encoding for an input batch
def encode_transformations(self, trans, lengths, return_last=True):
device = trans.device
bsz, _ = trans.size()
lens, indices = torch.sort(lengths, 0, True)
in_embs = self.trans_embs(trans)
t_hid_init = torch.zeros(1, bsz, self.d_trans, device=device)
t_cell_init = torch.zeros(1, bsz, self.d_trans, device=device)
all_hids, (enc_last_hid, _) = self.trans_encoder(pack(in_embs[indices], lens.tolist(), batch_first=True), (t_hid_init, t_cell_init))
_, _indices = torch.sort(indices, 0)
if return_last:
return enc_last_hid.squeeze(0)[_indices]
else:
all_hids = unpack(all_hids, batch_first=True)[0]
return all_hids[_indices]
# decode one timestep
def decode_step(self, idx, prev_words, prev_hid, prev_cell,
enc_hids, trans_embs, in_sent_lens, trans_lens, bsz, max_len):
device = self.word_embs.parameters().__next__().device
# initialize with zeros
if idx == 0:
word_input = torch.zeros(bsz, 1, self.d_word, device=device)
else:
word_input = self.word_embs(prev_words)
word_input = word_input.view(bsz, 1, self.d_word)
# concatenate w/ transformation embeddings
trans_weights = self.compute_transformation_attention(prev_hid[1], trans_embs, trans_lens)
trans_ctx = torch.sum(trans_weights.unsqueeze(2) * trans_embs, dim=1)
decoder_input = torch.cat([word_input, trans_ctx.unsqueeze(1)], dim=2)
# feed to decoder lstm
_, (hn, cn) = self.decoder(decoder_input, (prev_hid, prev_cell))
# compute attention for next time step and att weighted ave of encoder hiddens
attn_weights = self.compute_decoder_attention(hn[1], enc_hids, in_sent_lens)
attn_ctx = torch.sum(attn_weights.unsqueeze(2) * enc_hids, dim=1)
# compute copy prob as function of lotsa shit
p_copy = decoder_input.squeeze(1).mm(self.copy_inp_v)
p_copy += attn_ctx.mm(self.copy_att_v)
p_copy += hn[1].mm(self.copy_hid_v)
p_copy = torch.sigmoid(p_copy).squeeze(1)
return hn, cn, attn_weights, attn_ctx, p_copy
def forward(self):
raise NotImplemented
# beam search given a single sentence and a batch of transformations
def batch_beam_search(self, inputs, out_trans, in_sent_lens, out_trans_lens, eos_idx, beam_size=5, max_steps=70):
device = inputs.device
bsz, max_len = inputs.size()
# chop input
inputs = inputs[:, :in_sent_lens[0]]
# encode transformations
out_trans_hids = self.encode_transformations(out_trans, out_trans_lens, return_last=False)
out_trans_hids = self.att_parse_proj(out_trans_hids)
# encode input sentence
enc_hids, enc_last_hid = self.encode_batch(inputs, None, in_sent_lens)
# initialize decoder hidden to last encoder hidden
hn = enc_last_hid.unsqueeze(0).expand(2, bsz, self.d_hid).contiguous()
cn = torch.zeros(2, 1, self.d_hid, device=device)
# initialize beams (dictionary of batch_idx: beam params)
beam_dict = {}
for b_idx in range(out_trans.size(0)):
beam_dict[b_idx] = [(0.0, hn, cn, [])]
nsteps = 0
while True:
# set up accumulators for predictions
# assumption: all examples have same number of beams at each timestep
prev_words = []
prev_hs = []
prev_cs = []
for b_idx in beam_dict:
beams = beam_dict[b_idx]
# loop over everything in the beam
beam_candidates = []
for b in beams:
curr_prob, prev_h, prev_c, seq = b
# start with last word in sequence, if eos end the beam
if len(seq) > 0:
prev_words.append(seq[-1])
else:
prev_words = None
prev_hs.append(prev_h)
prev_cs.append(prev_c)
# now batch decoder computations
hs = torch.cat(prev_hs, dim=1)
cs = torch.cat(prev_cs, dim=1)
num_examples = hs.size(1)
if prev_words is not None:
prev_words = torch.LongTensor(prev_words).to(device)
# expand out parse states if necessary
if num_examples != out_trans_hids.size(0):
d1, d2, d3 = out_trans_hids.size()
rep_factor = num_examples // d1
curr_out = out_trans_hids.unsqueeze(1).expand(d1, rep_factor, d2, d3).contiguous().view(-1, d2, d3)
curr_out_lens = out_trans_lens.unsqueeze(1).expand(d1, rep_factor).contiguous().view(-1)
else:
curr_out = out_trans_hids
curr_out_lens = out_trans_lens
# expand out inputs and encoder hiddens
_, in_len, hid_d = enc_hids.size()
curr_enc_hids = enc_hids.expand(num_examples, in_len, hid_d)
curr_enc_lens = in_sent_lens.expand(num_examples)
curr_inputs = inputs.expand(num_examples, in_sent_lens[0])
# concat prev word emb and prev attn input and feed to decoder lstm
hn, cn, attn_weights, attn_ctx, p_copy = self.decode_step(nsteps, prev_words, hs, cs, curr_enc_hids, curr_out, curr_enc_lens, curr_out_lens, num_examples, max_len)
# compute copy attn by scattering attn into vocab space
vocab_scores = torch.zeros(num_examples, self.len_voc, device=device)
vocab_scores = vocab_scores.scatter_add_(1, curr_inputs, attn_weights)
vocab_scores = torch.log(vocab_scores + 1e-20).squeeze()
# compute prediction over vocab for a single time step
pred_inp = torch.cat([hn[1], attn_ctx], dim=1)
preds = self.out_dense_1(pred_inp)
preds = self.out_dense_2(preds)
preds = self.out_nonlin(preds).squeeze()
final_preds = p_copy.unsqueeze(1) * vocab_scores + (1 - p_copy.unsqueeze(1)) * preds
# now loop over the examples and sort each separately
for b_idx in beam_dict:
beam_candidates = []
# no words previously predicted
if num_examples == len(beam_dict):
ex_hn = hn[:,b_idx,:].unsqueeze(1)
ex_cn = cn[:,b_idx,:].unsqueeze(1)
preds = final_preds[b_idx]
_, top_indices = torch.sort(-preds)
# add top n candidates
for z in range(beam_size):
word_idx = top_indices[z].item()
beam_candidates.append((preds[word_idx].item(), ex_hn, ex_cn, [word_idx]))
beam_dict[b_idx] = beam_candidates
else:
origin_beams = beam_dict[b_idx]
start = b_idx * beam_size
end = (b_idx + 1) * beam_size
ex_hn = hn[:,start:end,:]
ex_cn = cn[:,start:end,:]
ex_preds = final_preds[start:end]
for o_idx, ob in enumerate(origin_beams):
curr_prob, _, _, seq = ob
# if one of the beams is already complete, add it to candidates
if seq[-1] == eos_idx:
beam_candidates.append(ob)
preds = ex_preds[o_idx]
curr_hn = ex_hn[:,o_idx,:]
curr_cn = ex_cn[:,o_idx,:]
_, top_indices = torch.sort(-preds)
for z in range(beam_size):
word_idx = top_indices[z].item()
beam_candidates.append((curr_prob + float(preds[word_idx].cpu().item()),curr_hn.unsqueeze(1), curr_cn.unsqueeze(1), seq + [word_idx]))
s_inds = np.argsort([x[0] for x in beam_candidates])[::-1]
beam_candidates = [beam_candidates[x] for x in s_inds]
beam_dict[b_idx] = beam_candidates[:beam_size]
nsteps += 1
if nsteps > max_steps:
break
return beam_dict | 23,359 | 43.495238 | 175 | py |
OpenAttack | OpenAttack-master/OpenAttack/attackers/scpn/__init__.py |
from typing import List, Optional
from ...text_process.tokenizer import Tokenizer, get_default_tokenizer
from ...text_process.constituency_parser import ConstituencyParser, get_default_constituency_parser
from ...utils import check_language
from ...tags import TAG_English, Tag
from ...data_manager import DataManager
from ..classification import ClassificationAttacker, ClassifierGoal, Classifier
import numpy as np
import pickle
import torch
DEFAULT_TEMPLATES = [
'( ROOT ( S ( NP ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( S ( VP ) ( . ) ) ) EOP',
'( ROOT ( NP ( NP ) ( . ) ) ) EOP',
'( ROOT ( FRAG ( SBAR ) ( . ) ) ) EOP',
'( ROOT ( S ( S ) ( , ) ( CC ) ( S ) ( . ) ) ) EOP',
'( ROOT ( S ( LST ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( SBARQ ( WHADVP ) ( SQ ) ( . ) ) ) EOP',
'( ROOT ( S ( PP ) ( , ) ( NP ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( S ( ADVP ) ( NP ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( S ( SBAR ) ( , ) ( NP ) ( VP ) ( . ) ) ) EOP'
]
def reverse_bpe(sent):
x = []
cache = ''
for w in sent:
if w.endswith('@@'):
cache += w.replace('@@', '')
elif cache != '':
x.append(cache + w)
cache = ''
else:
x.append(w)
return ' '.join(x)
class SCPNAttacker(ClassificationAttacker):
@property
def TAGS(self):
return { Tag("get_pred", "victim"), self.__lang_tag }
def __init__(self,
templates : List[str] = DEFAULT_TEMPLATES,
device : Optional[torch.device] = None,
tokenizer : Optional[Tokenizer] = None,
parser : Optional[ConstituencyParser] = None
):
"""
Adversarial Example Generation with Syntactically Controlled Paraphrase Networks. Mohit Iyyer, John Wieting, Kevin Gimpel, Luke Zettlemoyer. NAACL-HLT 2018.
`[pdf] <https://www.aclweb.org/anthology/N18-1170.pdf>`__
`[code] <https://github.com/miyyer/scpn>`__
Args:
templates: A list of templates used in SCPNAttacker. **Default:** ten manually selected templates.
device: The device to load SCPN models (pytorch). **Default:** Use "cpu" if cuda is not available else "cuda".
tokenizer: A tokenizer that will be used during the attack procedure. Must be an instance of :py:class:`.Tokenizer`
parser: A constituency parser.
:Language: english
:Classifier Capacity: get_pred
The default templates are:
.. code-block:: python
DEFAULT_TEMPLATES = [
'( ROOT ( S ( NP ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( S ( VP ) ( . ) ) ) EOP',
'( ROOT ( NP ( NP ) ( . ) ) ) EOP',
'( ROOT ( FRAG ( SBAR ) ( . ) ) ) EOP',
'( ROOT ( S ( S ) ( , ) ( CC ) ( S ) ( . ) ) ) EOP',
'( ROOT ( S ( LST ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( SBARQ ( WHADVP ) ( SQ ) ( . ) ) ) EOP',
'( ROOT ( S ( PP ) ( , ) ( NP ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( S ( ADVP ) ( NP ) ( VP ) ( . ) ) ) EOP',
'( ROOT ( S ( SBAR ) ( , ) ( NP ) ( VP ) ( . ) ) ) EOP'
]
"""
from . import models
from . import subword
if device is None:
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
else:
self.device = torch.device( device )
self.__lang_tag = TAG_English
if tokenizer is None:
self.tokenizer = get_default_tokenizer(self.__lang_tag)
else:
self.tokenizer = tokenizer
if parser is None:
self.parser = get_default_constituency_parser(self.__lang_tag)
else:
self.parser = parser
check_language([self.parser, self.tokenizer], self.__lang_tag)
self.templates = templates
# Use DataManager Here
model_path = DataManager.load("AttackAssist.SCPN")
pp_model = torch.load(model_path["scpn.pt"], map_location=self.device)
parse_model = torch.load(model_path["parse_generator.pt"], map_location=self.device)
pp_vocab, rev_pp_vocab = pickle.load(open(model_path["parse_vocab.pkl"], 'rb'))
bpe_codes = open(model_path["bpe.codes"], "r", encoding="utf-8")
bpe_vocab = open(model_path["vocab.txt"], "r", encoding="utf-8")
self.parse_gen_voc = pickle.load(open(model_path["ptb_tagset.pkl"], "rb"))
self.pp_vocab = pp_vocab
self.rev_pp_vocab = rev_pp_vocab
self.rev_label_voc = dict((v,k) for (k,v) in self.parse_gen_voc.items())
# load paraphrase network
pp_args = pp_model['config_args']
self.net = models.SCPN(pp_args.d_word, pp_args.d_hid, pp_args.d_nt, pp_args.d_trans, len(self.pp_vocab), len(self.parse_gen_voc) - 1, pp_args.use_input_parse)
self.net.load_state_dict(pp_model['state_dict'])
self.net = self.net.to(self.device).eval()
# load parse generator network
parse_args = parse_model['config_args']
self.parse_net = models.ParseNet(parse_args.d_nt, parse_args.d_hid, len(self.parse_gen_voc))
self.parse_net.load_state_dict(parse_model['state_dict'])
self.parse_net = self.parse_net.to(self.device).eval()
# instantiate BPE segmenter
bpe_vocab = subword.read_vocabulary(bpe_vocab, 50)
self.bpe = subword.BPE(bpe_codes, '@@', bpe_vocab, None)
def gen_paraphrase(self, sent, templates):
template_lens = [len(x.split()) for x in templates]
np_templates = np.zeros((len(templates), max(template_lens)), dtype='int32')
for z, template in enumerate(templates):
np_templates[z, :template_lens[z]] = [self.parse_gen_voc[w] for w in templates[z].split()]
tp_templates = torch.from_numpy(np_templates).long().to(self.device)
tp_template_lens = torch.LongTensor(template_lens).to(self.device)
ssent = ' '.join( self.tokenizer.tokenize(sent, pos_tagging=False) )
seg_sent = self.bpe.segment(ssent.lower()).split()
# encode sentence using pp_vocab, leave one word for EOS
seg_sent = [self.pp_vocab[w] for w in seg_sent if w in self.pp_vocab]
# add EOS
seg_sent.append(self.pp_vocab['EOS'])
torch_sent = torch.LongTensor(seg_sent).to(self.device)
torch_sent_len = torch.LongTensor([len(seg_sent)]).to(self.device)
# encode parse using parse vocab
# Stanford Parser
parse_tree = self.parser(sent)
parse_tree = " ".join(parse_tree.replace("\n", " ").split()).replace("(", "( ").replace(")", " )")
parse_tree = parse_tree.split()
for i in range(len(parse_tree) - 1):
if (parse_tree[i] not in "()") and (parse_tree[i + 1] not in "()"):
parse_tree[i + 1] = ""
parse_tree = " ".join(parse_tree).split() + ["EOP"]
torch_parse = torch.LongTensor([self.parse_gen_voc[w] for w in parse_tree]).to(self.device)
torch_parse_len = torch.LongTensor([len(parse_tree)]).to(self.device)
# generate full parses from templates
beam_dict = self.parse_net.batch_beam_search(torch_parse.unsqueeze(0), tp_templates, torch_parse_len[:], tp_template_lens, self.parse_gen_voc['EOP'], beam_size=3, max_steps=150)
seq_lens = []
seqs = []
for b_idx in beam_dict:
prob,_,_,seq = beam_dict[b_idx][0]
seq = seq[:-1] # chop off EOP
seq_lens.append(len(seq))
seqs.append(seq)
np_parses = np.zeros((len(seqs), max(seq_lens)), dtype='int32')
for z, seq in enumerate(seqs):
np_parses[z, :seq_lens[z]] = seq
tp_parses = torch.from_numpy(np_parses).long().to(self.device)
tp_len = torch.LongTensor(seq_lens).to(self.device)
# generate paraphrases from parses
ret = []
beam_dict = self.net.batch_beam_search(torch_sent.unsqueeze(0), tp_parses, torch_sent_len[:], tp_len, self.pp_vocab['EOS'], beam_size=3, max_steps=40)
for b_idx in beam_dict:
prob,_,_,seq = beam_dict[b_idx][0]
gen_parse = ' '.join([self.rev_label_voc[z] for z in seqs[b_idx]])
gen_sent = ' '.join([self.rev_pp_vocab[w] for w in seq[:-1]])
ret.append(reverse_bpe(gen_sent.split()))
return ret
def attack(self, victim: Classifier, sent, goal: ClassifierGoal):
try:
pps = self.gen_paraphrase(sent, self.templates)
except KeyError as e:
return None
preds = victim.get_pred(pps)
for idx, pred in enumerate(preds):
if goal.check(pps[idx], pred):
return pps[idx]
return None
| 8,934 | 39.986239 | 185 | py |
OpenAttack | OpenAttack-master/OpenAttack/victim/classifiers/transformers.py | import numpy as np
from .base import Classifier
from ...utils import language_by_name,get_language, HookCloser
from ...text_process.tokenizer import TransformersTokenizer
from ...attack_assist.word_embedding import WordEmbedding
import transformers
import torch
from ...tags import TAG_English
class TransformersClassifier(Classifier):
@property
def TAGS(self):
if self.__lang_tag is None:
return super().TAGS
return super().TAGS.union({ self.__lang_tag })
def __init__(self,
model : transformers.PreTrainedModel,
tokenizer : transformers.PreTrainedTokenizer,
embedding_layer,
device : torch.device = None,
max_length : int = 128,
batch_size : int = 8,
lang = None
):
"""
Args:
model: Huggingface model for classification.
tokenizer: Huggingface tokenizer for classification. **Default:** None
embedding_layer: The module of embedding_layer used in transformers models. For example, ``BertModel.bert.embeddings.word_embeddings``. **Default:** None
device: Device of pytorch model. **Default:** "cpu" if cuda is not available else "cuda"
max_len: Max length of input tokens. If input token list is too long, it will be truncated. Uses None for no truncation. **Default:** None
batch_size: Max batch size of this classifier.
lang: Language of this classifier. If is `None` then `TransformersClassifier` will intelligently select the language based on other parameters.
"""
self.model = model
if lang is not None:
self.__lang_tag = language_by_name(lang)
if self.__lang_tag is None:
raise ValueError("Unknown language `%s`" % lang)
elif tokenizer is not None:
self.__lang_tag = TAG_English
else:
self.__lang_tag = TAG_English
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.to(device)
self.curr_embedding = None
self.hook = embedding_layer.register_forward_hook( HookCloser(self) )
self.embedding_layer = embedding_layer
self.word2id = dict()
for i in range(tokenizer.vocab_size):
self.word2id[tokenizer.convert_ids_to_tokens(i)] = i
self.__tokenizer = tokenizer
self.embedding = embedding_layer.weight.detach().cpu().numpy()
self.token_unk = tokenizer.unk_token
self.token_unk_id = tokenizer.unk_token_id
self.max_length = max_length
self.batch_size = batch_size
@property
def tokenizer(self):
return TransformersTokenizer(self.__tokenizer, self.__lang_tag)
def to(self, device : torch.device):
"""
Args:
device: Device that moves model to.
"""
self.device = device
self.model = self.model.to(device)
return self
def get_pred(self, input_):
return self.get_prob(input_).argmax(axis=1)
def get_prob(self, input_):
return self.get_grad([
self.__tokenizer.tokenize(sent) for sent in input_
], [0] * len(input_))[0]
def get_grad(self, input_, labels):
v = self.predict(input_, labels)
return v[0], v[1]
def predict(self, sen_list, labels=None):
sen_list = [
sen[:self.max_length - 2] for sen in sen_list
]
sent_lens = [ len(sen) for sen in sen_list ]
batch_len = max(sent_lens) + 2
attentions = np.array([
[1] * (len(sen) + 2) + [0] * (batch_len - 2 - len(sen))
for sen in sen_list
], dtype='int64')
sen_list = [
self.__tokenizer.convert_tokens_to_ids(sen)
for sen in sen_list
]
tokeinzed_sen = np.array([
[self.__tokenizer.cls_token_id] + sen + [self.__tokenizer.sep_token_id] + ([self.__tokenizer.pad_token_id] * (batch_len - 2 - len(sen)))
for sen in sen_list
], dtype='int64')
result = None
result_grad = None
all_hidden_states = None
if labels is None:
labels = [0] * len(sen_list)
labels = torch.LongTensor(labels).to(self.device)
for i in range( (len(sen_list) + self.batch_size - 1) // self.batch_size):
curr_sen = tokeinzed_sen[ i * self.batch_size: (i + 1) * self.batch_size ]
curr_mask = attentions[ i * self.batch_size: (i + 1) * self.batch_size ]
xs = torch.from_numpy(curr_sen).long().to(self.device)
masks = torch.from_numpy(curr_mask).long().to(self.device)
outputs = self.model(input_ids = xs,attention_mask = masks, output_hidden_states=True, labels=labels[ i * self.batch_size: (i + 1) * self.batch_size ])
if i == 0:
all_hidden_states = outputs.hidden_states[-1].detach().cpu()
loss = outputs.loss
logits = outputs.logits
logits = torch.nn.functional.softmax(logits,dim=-1)
loss = - loss
loss.backward()
result_grad = self.curr_embedding.grad.clone().cpu()
self.curr_embedding.grad.zero_()
self.curr_embedding = None
result = logits.detach().cpu()
else:
all_hidden_states = torch.cat((all_hidden_states, outputs.hidden_states[-1].detach().cpu()), dim=0)
loss = outputs.loss
logits = outputs.logits
logits = torch.nn.functional.softmax(logits,dim=-1)
loss = - loss
loss.backward()
result_grad = torch.cat((result_grad, self.curr_embedding.grad.clone().cpu()), dim=0)
self.curr_embedding.grad.zero_()
self.curr_embedding = None
result = torch.cat((result, logits.detach().cpu()))
result = result.numpy()
all_hidden_states = all_hidden_states.numpy()
result_grad = result_grad.numpy()[:, 1:-1]
return result, result_grad, all_hidden_states
def get_hidden_states(self, input_, labels=None):
"""
:param list input_: A list of sentences of which we want to get the hidden states in the model.
:rtype torch.tensor
"""
return self.predict(input_, labels)[2]
def get_embedding(self):
return WordEmbedding(self.word2id, self.embedding)
| 6,612 | 37.447674 | 165 | py |
OpenAttack | OpenAttack-master/OpenAttack/attack_assist/substitute/word/embed_based.py | from typing import Dict, Optional
from .base import WordSubstitute
from ....exceptions import WordNotInDictionaryException
import torch
from ....tags import *
DEFAULT_CONFIG = {"cosine": False}
class EmbedBasedSubstitute(WordSubstitute):
def __init__(self, word2id : Dict[str, int], embedding : torch.Tensor, cosine=False, k = 50, threshold = 0.5, device = None):
"""
Embedding based word substitute.
Args:
word2id: A `dict` maps words to indexes.
embedding: A word embedding matrix.
cosine: If `true` then the cosine distance is used, otherwise the Euclidian distance is used.
threshold: Distance threshold. Default: 0.5
k: Top-k results to return. If k is `None`, all results will be returned. Default: 50
device: A pytocrh device for computing distances. Default: "cpu"
"""
if device is None:
device = "cpu"
self.word2id = word2id
self.embedding = embedding
self.cosine = cosine
self.k = k
self.threshold = threshold
self.id2word = {
val: key for key, val in self.word2id.items()
}
if cosine:
self.embedding = self.embedding / self.embedding.norm(dim=1, keepdim=True)
self.embedding = self.embedding.to(device)
def __call__(self, word: str, pos: Optional[str] = None):
return self.substitute(word, pos)
def substitute(self, word, pos):
if word not in self.word2id:
raise WordNotInDictionaryException()
wdid = self.word2id[word]
wdvec = self.embedding[wdid, :]
if self.cosine:
dis = 1 - (wdvec * self.embedding).sum(dim=1)
else:
dis = (wdvec - self.embedding).norm(dim=1)
idx = dis.argsort()
if self.k is not None:
idx = idx[:self.k]
threshold_end = 0
while threshold_end < len(idx) and dis[idx[threshold_end]] < self.threshold:
threshold_end += 1
idx = idx[:threshold_end].tolist()
return [
(self.id2word[id_], dis[id_].item()) for id_ in idx
]
| 2,227 | 31.764706 | 129 | py |
OpenAttack | OpenAttack-master/OpenAttack/attack_assist/substitute/word/english_word2vec.py | from .embed_based import EmbedBasedSubstitute
from ....data_manager import DataManager
from ....tags import TAG_English
import torch
class Word2VecSubstitute(EmbedBasedSubstitute):
TAGS = { TAG_English }
def __init__(self, cosine = False, k = 50, threshold = 0.5, device = None):
"""
English word substitute based on word2vec.
Args:
cosine: If `true` then the cosine distance is used, otherwise the Euclidian distance is used.
threshold: Distance threshold. Default: 0.5
k: Top-k results to return. If k is `None`, all results will be returned. Default: 50
device: A pytocrh device for computing distances. Default: "cpu"
:Data Requirements: :py:data:`.AttackAssist.GloVe`
:Language: english
"""
wordvec = DataManager.load("AttackAssist.Word2Vec")
super().__init__(
wordvec.word2id,
torch.from_numpy(wordvec.embedding),
cosine = cosine,
k = k,
threshold = threshold,
device = device
)
| 1,109 | 30.714286 | 105 | py |
OpenAttack | OpenAttack-master/OpenAttack/attack_assist/substitute/word/english_glove.py | import torch
from .embed_based import EmbedBasedSubstitute
from ....data_manager import DataManager
from ....tags import TAG_English
class GloveSubstitute(EmbedBasedSubstitute):
TAGS = { TAG_English }
def __init__(self, cosine = False, k = 50, threshold = 0.5, device = None):
"""
English word substitute based on GloVe word vectors.
`[pdf] <https://nlp.stanford.edu/pubs/glove.pdf>`__
Args:
cosine: If `true` then the cosine distance is used, otherwise the Euclidian distance is used.
threshold: Distance threshold. Default: 0.5
k: Top-k results to return. If k is `None`, all results will be returned. Default: 50
device: A pytocrh device for computing distances. Default: "cpu"
:Data Requirements: :py:data:`.AttackAssist.GloVe`
:Language: english
"""
wordvec = DataManager.load("AttackAssist.GloVe")
super().__init__(
wordvec.word2id,
torch.from_numpy(wordvec.embedding),
cosine = cosine,
k = k,
threshold = threshold,
device = device
)
| 1,184 | 31.027027 | 105 | py |
OpenAttack | OpenAttack-master/OpenAttack/attack_assist/substitute/word/english_counterfit.py | from .embed_based import EmbedBasedSubstitute
from ....data_manager import DataManager
from ....tags import TAG_English
import torch
class CounterFittedSubstitute(EmbedBasedSubstitute):
TAGS = { TAG_English }
def __init__(self, cosine : bool = False, k : int = 50, threshold : float = 0.5, device = None):
"""
English word substitute based on Counter-fitting word vectors.
`[pdf] <https://www.aclweb.org/anthology/N16-1018.pdf>`__
Args:
cosine: If `true` then the cosine distance is used, otherwise the Euclidian distance is used.
threshold: Distance threshold. Default: 0.5
k: Top-k results to return. If k is `None`, all results will be returned. Default: 50
device: A pytocrh device for computing distances. Default: "cpu"
:Data Requirements: :py:data:`.AttackAssist.CounterFit`
:Language: english
"""
wordvec = DataManager.load("AttackAssist.CounterFit")
super().__init__(
wordvec.word2id,
torch.from_numpy(wordvec.embedding),
cosine = cosine,
k = k,
threshold = threshold,
device = device
)
| 1,228 | 33.138889 | 105 | py |
OpenAttack | OpenAttack-master/OpenAttack/attack_assist/substitute/word/chinese_word2vec.py | from typing import Union
from .embed_based import EmbedBasedSubstitute
from ....data_manager import DataManager
from ....tags import TAG_Chinese
import torch
class ChineseWord2VecSubstitute(EmbedBasedSubstitute):
TAGS = { TAG_Chinese }
def __init__(self, cosine : bool = False, threshold : float = 0.5, k : int = 50, device : Union[str, torch.device, None] = None):
"""
Chinese word substitute based on word2vec.
Args:
cosine: If `true` then the cosine distance is used, otherwise the Euclidian distance is used.
threshold: Distance threshold. Default: 0.5
k: Top-k results to return. If k is `None`, all results will be returned. Default: 50
device: A pytocrh device for computing distances. Default: "cpu"
:Data Requirements: :py:data:`.AttackAssist.ChineseWord2Vec`
:Language: chinese
"""
wordvec = DataManager.load("AttackAssist.ChineseWord2Vec")
super().__init__(
wordvec.word2id,
embedding = torch.from_numpy(wordvec.embedding),
cosine = cosine,
k = k,
threshold = threshold,
device = device
)
| 1,226 | 33.083333 | 133 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_albert_ag.py | """
:type: OpenAttack.utils.AlbertClassifier
:Size: 788.697MB
:Package Requirements:
* transformers
* pytorch
Pretrained ALBERT model on AG-4 dataset. See :py:data:`Dataset.AG` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.ALBERT.AG"
URL = "/TAADToolbox/victim/albert_ag.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
from OpenAttack.victim.classifiers import TransformersClassifier
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=5, output_hidden_states=False)
return TransformersClassifier(model, tokenizer, model.albert.embeddings.word_embeddings) | 749 | 31.608696 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_roberta_imdb.py | """
:type: OpenAttack.utils.RobertaClassifier
:Size: 1.18GB
:Package Requirements:
* transformers
* pytorch
Pretrained ROBERTA model on IMDB dataset. See :py:data:`Dataset.IMDB` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.ROBERTA.IMDB"
URL = "/TAADToolbox/victim/roberta_imdb.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.roberta.embeddings.word_embeddings) | 762 | 30.791667 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_roberta_ag.py | """
:type: OpenAttack.utils.RobertaClassifier
:Size: 1.22GB
:Package Requirements:
* transformers
* pytorch
Pretrained ROBERTA model on AG-4 dataset. See :py:data:`Dataset.AG` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.ROBERTA.AG"
URL = "/TAADToolbox/victim/roberta_ag.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=5, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.roberta.embeddings.word_embeddings) | 752 | 30.375 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_roberta_sst.py | """
:type: OpenAttack.utils.RobertaClassifier
:Size: 1.18GB
:Package Requirements:
* transformers
* pytorch
Pretrained ROBERTA model on SST-2 dataset. See :py:data:`Dataset.SST` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.ROBERTA.SST"
URL = "/TAADToolbox/victim/roberta_sst.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.roberta.embeddings.word_embeddings) | 760 | 30.708333 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_albert_imdb.py | """
:type: OpenAttack.utils.AlbertClassifier
:Size: 788.662MB
:Package Requirements:
* transformers
* pytorch
Pretrained ALBERT model on IMDB dataset. See :py:data:`Dataset.IMDB` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.ALBERT.IMDB"
URL = "/TAADToolbox/victim/albert_imdb.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.albert.embeddings.word_embeddings) | 760 | 30.708333 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/translation_models.py | """
:type: dict
:Size: 1.22GB
:Package Requirements:
* **pytorch**
Pretrained translation models. See :py:data:`TranslationModels` for detail.
`[code] <https://github.com/OpenNMT/OpenNMT-py>`__
`[page] <https://opennmt.net/>`__
"""
from OpenAttack.utils import make_zip_downloader
import os
NAME = "AttackAssist.TranslationModels"
URL = "/TAADToolbox/translation_models.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
flist = ["english_french_model_acc_71.05_ppl_3.71_e13.pt", "english_portuguese_model_acc_70.75_ppl_4.32_e13.pt",
"french_english_model_acc_68.51_ppl_4.43_e13.pt", "portuguese_english_model_acc_69.93_ppl_5.04_e13.pt"]
return {
it: os.path.join(path, it) for it in flist
}
| 742 | 26.518519 | 117 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_albert_sst.py | """
:type: OpenAttack.utils.AlbertClassifier
:Size: 788.66MB
:Package Requirements:
* transformers
* pytorch
Pretrained ALBERT model on SST-2 dataset. See :py:data:`Dataset.SST` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.ALBERT.SST"
URL = "/TAADToolbox/victim/albert_sst.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.albert.embeddings.word_embeddings) | 757 | 30.583333 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/gan.py | """
:type: tuple
:Size: 55.041MB
:Package Requirements:
* **pytorch**
Pretrained GAN model on SNLI dataset used in :py:class:`.GANAttacker`. See :py:class:`.GANAttacker` for detail.
"""
import os
from OpenAttack.utils import make_zip_downloader
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import json
from torch.autograd import Variable
NAME = "AttackAssist.GAN"
URL = "/TAADToolbox/GNAE.zip"
DOWNLOAD = make_zip_downloader(URL)
try:
def to_gpu(_, x):
return x
class MLP_D(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.LeakyReLU(0.2), gpu=False):
super(MLP_D, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
# No batch normalization after first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
x = torch.mean(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_G(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_G, self).__init__()
self.ninput = ninput
self.noutput = noutput
self.gpu = gpu
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
if x.__class__.__name__ == "ndarray":
x = Variable(torch.FloatTensor(x)).cuda()
# x = x.cpu()
if x.__class__.__name__ == "FloatTensor":
x = Variable(x).cuda()
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_I(nn.Module):
# separate Inverter to map continuous code back to z inverter,从continuous->z?
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_I, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_I_AE(nn.Module):
# separate Inverter to map continuous code back to z (mean & std)
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_I_AE, self).__init__()
self.ninput = ninput
self.noutput = noutput
self.gpu = gpu
noutput_mu = noutput
noutput_var = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.linear_mu = nn.Linear(noutput, noutput_mu)
self.linear_var = nn.Linear(noutput, noutput_var)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
mu = self.linear_mu(x)
logvar = self.linear_var(x)
std = 0.5 * logvar
std = std.exp_() # std
epsilon = Variable(
std.data.new(std.size()).normal_()) # normal noise with the same type and size as std.data
if self.gpu:
epsilon = epsilon.cuda()
sample = mu + (epsilon * std)
return sample
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
self.linear_mu.weight.data.normal_(0, init_std)
self.linear_mu.bias.data.fill_(0)
self.linear_var.weight.data.normal_(0, init_std)
self.linear_var.bias.data.fill_(0)
class Seq2SeqCAE(nn.Module):
# CNN encoder, LSTM decoder
def __init__(self, emsize, nhidden, ntokens, nlayers, conv_windows="5-5-3", conv_strides="2-2-2",
conv_layer="500-700-1000", activation=nn.LeakyReLU(0.2, inplace=True),
noise_radius=0.2, hidden_init=False, dropout=0, gpu=True):
super(Seq2SeqCAE, self).__init__()
self.nhidden = nhidden # size of hidden vector in LSTM
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.arch_conv_filters = conv_layer
self.arch_conv_strides = conv_strides
self.arch_conv_windows = conv_windows
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
conv_layer_sizes = [emsize] + [int(x) for x in conv_layer.split('-')]
conv_strides_sizes = [int(x) for x in conv_strides.split('-')]
conv_windows_sizes = [int(x) for x in conv_windows.split('-')]
self.encoder = nn.Sequential()
for i in range(len(conv_layer_sizes) - 1):
layer = nn.Conv1d(conv_layer_sizes[i], conv_layer_sizes[i + 1], \
conv_windows_sizes[i], stride=conv_strides_sizes[i])
self.encoder.add_module("layer-" + str(i + 1), layer)
bn = nn.BatchNorm1d(conv_layer_sizes[i + 1])
self.encoder.add_module("bn-" + str(i + 1), bn)
self.encoder.add_module("activation-" + str(i + 1), activation)
self.linear = nn.Linear(1000, emsize)
decoder_input_size = emsize + nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
self.linear_dec = nn.Linear(nhidden, ntokens)
# 9-> 7-> 3 -> 1
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
embeddings = self.embedding_decoder(indices) # training stage
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
output, state = self.decoder(augmented_embeddings, state)
decoded = self.linear_dec(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, hidden, maxlen, sample=True, temp=1.0):
"""Generate through decoder; no backprop"""
if hidden.ndimension() == 1:
hidden = hidden.unsqueeze(0)
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
if not self.gpu:
self.start_symbols = self.start_symbols.cpu()
# <sos>
# self.start_symbols.data.resize_(batch_size, 1)
with torch.no_grad():
self.start_symbols.resize_(batch_size, 1)
# self.start_symbols.data.fill_(1)
with torch.no_grad():
self.start_symbols.fill_(1)
embedding = self.embedding_decoder(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
# unroll
all_indices = []
for i in range(maxlen):
output, state = self.decoder(inputs, state)
overvocab = self.linear_dec(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
else:
# sampling
probs = F.softmax(overvocab / temp, dim=1)
indices = torch.multinomial(probs, 1)
if indices.ndimension() == 1:
indices = indices.unsqueeze(1)
all_indices.append(indices)
embedding = self.embedding_decoder(indices)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
max_indices = torch.cat(all_indices, 1)
return max_indices
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding.weight.data[0].zero()
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
embeddings = embeddings.transpose(1, 2)
c_pre_lin = self.encoder(embeddings)
c_pre_lin = c_pre_lin.squeeze(2)
hidden = self.linear(c_pre_lin)
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
if norms.ndimension() == 1:
norms = norms.unsqueeze(1)
hidden = torch.div(hidden, norms.expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(mean=torch.zeros(hidden.size()),
std=self.noise_radius, generator=torch.Generator(), out=None) ###
if self.gpu:
gauss_noise = gauss_noise.cuda()
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2) # (hidden, cell)
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False, generator=None, inverter=None):
if not generator: # only enc -> dec
batch_size, maxlen = indices.size()
self.embedding.weight.data[0].fill_(0)
self.embedding_decoder.weight.data[0].fill_(0)
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
else: # enc -> inv -> gen -> dec
batch_size, maxlen = indices.size()
self.embedding.weight.data[0].fill_(0)
self.embedding_decoder.weight.data[0].fill_(0)
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
z_hat = inverter(hidden)
c_hat = generator(z_hat)
decoded = self.decode(c_hat, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
class Seq2Seq(nn.Module):
def __init__(self, emsize, nhidden, ntokens, nlayers, noise_radius=0.2,
hidden_init=False, dropout=0, gpu=True):
super(Seq2Seq, self).__init__()
self.nhidden = nhidden
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
# RNN Encoder and Decoder
self.encoder = nn.LSTM(input_size=emsize,
hidden_size=nhidden,
num_layers=nlayers,
dropout=dropout,
batch_first=True)
decoder_input_size = emsize + nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
# Initialize Linear Transformation
self.linear = nn.Linear(nhidden, ntokens)
self.init_weights()
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return (to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2)) # (hidden, cell)
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False, generator=None, inverter=None):
if not generator:
batch_size, maxlen = indices.size()
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
else:
batch_size, maxlen = indices.size()
self.embedding.weight.data[0].fill_(0)
self.embedding_decoder.weight.data[0].fill_(0)
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
z_hat = inverter(hidden)
c_hat = generator(z_hat)
decoded = self.decode(c_hat, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
# Encode
packed_output, state = self.encoder(packed_embeddings)
hidden, cell = state
# batch_size x nhidden
hidden = hidden[-1] # get hidden state of last layer of encoder
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
if norms.ndimension() == 1:
norms = norms.unsqueeze(1)
hidden = torch.div(hidden, norms.expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
std=self.noise_radius)
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
embeddings = self.embedding_decoder(indices)
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
packed_embeddings = pack_padded_sequence(input=augmented_embeddings,
lengths=lengths,
batch_first=True)
packed_output, state = self.decoder(packed_embeddings, state)
output, lengths = pad_packed_sequence(packed_output, batch_first=True)
# reshape to batch_size*maxlen x nhidden before linear over vocab
decoded = self.linear(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, hidden, maxlen, sample=True, temp=1.0):
"""Generate through decoder; no backprop"""
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
# <sos>
self.start_symbols.data.resize_(batch_size, 1)
self.start_symbols.data.fill_(1)
embedding = self.embedding_decoder(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
# unroll
all_indices = []
for i in range(maxlen):
output, state = self.decoder(inputs, state)
overvocab = self.linear(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
else:
# sampling
probs = F.softmax(overvocab / temp)
indices = torch.multinomial(probs, 1)
if indices.ndimension() == 1:
indices = indices.unsqueeze(1)
all_indices.append(indices)
embedding = self.embedding_decoder(indices)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
max_indices = torch.cat(all_indices, 1)
return max_indices
def LOAD(path):
word2idx = json.load(open(os.path.join(path, 'vocab.json'), 'r'))
ntokens = len(word2idx)
autoencoder = Seq2SeqCAE(emsize=300,
nhidden=300,
ntokens=ntokens,
nlayers=1,
noise_radius=0.2,
hidden_init=False,
dropout=0.0,
conv_layer='500-700-1000',
conv_windows='3-3-3',
conv_strides='1-2-2',
gpu=False)
inverter = MLP_I_AE(ninput=300, noutput=100, layers='300-300')
gan_gen = MLP_G(ninput=100, noutput=300, layers='300-300')
gan_disc = MLP_D(ninput=300, noutput=1, layers='300-300')
autoencoder.load_state_dict(torch.load(os.path.join(path, 'a.pkl')))
inverter.load_state_dict(torch.load(os.path.join(path, 'i.pkl')))
gan_gen.load_state_dict(torch.load(os.path.join(path, 'g.pkl')))
gan_disc.load_state_dict(torch.load(os.path.join(path, 'd.pkl')))
return word2idx, autoencoder, inverter, gan_gen, gan_disc
except ModuleNotFoundError as e:
def LOAD(path):
raise e | 25,710 | 38.253435 | 111 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_xlnet_imdb.py | """
:type: OpenAttack.utils.XlnetClassifier
:Size: 1.25GB
:Package Requirements:
* transformers
* pytorch
Pretrained XLNET model on IMDB dataset. See :py:data:`Dataset.IMDB` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.XLNET.IMDB"
URL = "/TAADToolbox/victim/xlnet_imdb.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.transformer.word_embedding) | 746 | 30.125 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_xlnet_sst.py | """
:type: OpenAttack.utils.XlnetClassifier
:Size: 1.25GB
:Package Requirements:
* transformers
* pytorch
Pretrained XLNET model on SST-2 dataset. See :py:data:`Dataset.SST` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.XLNET.SST"
URL = "/TAADToolbox/victim/xlnet_sst.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.transformer.word_embedding)
| 745 | 28.84 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_bert_amazon_zh.py | """
:type: OpenAttack.utils.BertClassifier
:Size: 992.75 MB
:Package Requirements:
* transformers
* pytorch
Pretrained BERT model on Amazon Reviews (Chinese) dataset.
"""
from OpenAttack.utils import make_zip_downloader
import os
NAME = "Victim.BERT.AMAZON_ZH"
URL = "/TAADToolbox/victim/bert_amazon_reviews_zh.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.BertTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=5, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.bert.embeddings.word_embeddings, lang="chinese")
| 771 | 28.692308 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_xlnet_ag.py | """
:type: OpenAttack.utils.XlnetClassifier
:Size: 1.25GB
:Package Requirements:
* transformers
* pytorch
Pretrained XLNET model on AG-4 dataset. See :py:data:`Dataset.AG` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.XLNET.AG"
URL = "/TAADToolbox/victim/xlnet_ag.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=4, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.transformer.word_embedding) | 736 | 29.708333 | 123 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/sgan.py | """
:type: tuple
:Size: 54.854MB
:Package Requirements:
* **pytorch**
Pretrained GAN model on SST-2 dataset used in :py:class:`.GANAttacker`. See :py:class:`.GANAttacker` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "AttackAssist.SGAN"
URL = "/TAADToolbox/SGNAE.zip"
DOWNLOAD = make_zip_downloader(URL)
def to_gpu(gpu, var):
return var
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import os, json
class MLP_D(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.LeakyReLU(0.2), gpu=False):
super(MLP_D, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
# No batch normalization after first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
x = torch.mean(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_G(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_G, self).__init__()
self.ninput = ninput
self.noutput = noutput
self.gpu = gpu
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
if x.__class__.__name__ == "ndarray":
x = Variable(torch.FloatTensor(x)).cuda()
# x = x.cpu()
if x.__class__.__name__ == "FloatTensor":
x = Variable(x).cuda()
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_I(nn.Module):
# separate Inverter to map continuous code back to z
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_I, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_I_AE(nn.Module):
# separate Inverter to map continuous code back to z (mean & std)
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_I_AE, self).__init__()
self.ninput = ninput
self.noutput = noutput
self.gpu = gpu
noutput_mu = noutput
noutput_var = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.append(layer)
self.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn" + str(i + 1), bn)
self.layers.append(activation)
self.add_module("activation" + str(i + 1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer" + str(len(self.layers)), layer)
self.linear_mu = nn.Linear(noutput, noutput_mu)
self.linear_var = nn.Linear(noutput, noutput_var)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
mu = self.linear_mu(x)
logvar = self.linear_var(x)
std = 0.5 * logvar
std = std.exp_() # std
epsilon = Variable(
std.data.new(std.size()).normal_()) # normal noise with the same type and size as std.data
if self.gpu:
epsilon = epsilon.cuda()
sample = mu + (epsilon * std)
return sample
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
self.linear_mu.weight.data.normal_(0, init_std)
self.linear_mu.bias.data.fill_(0)
self.linear_var.weight.data.normal_(0, init_std)
self.linear_var.bias.data.fill_(0)
class Seq2SeqCAE(nn.Module):
# CNN encoder, LSTM decoder
def __init__(self, emsize, nhidden, ntokens, nlayers, conv_windows="5-5-3", conv_strides="2-2-2",
conv_layer="500-700-1000", activation=nn.LeakyReLU(0.2, inplace=True),
noise_radius=0.2, hidden_init=False, dropout=0, gpu=True):
super(Seq2SeqCAE, self).__init__()
self.nhidden = nhidden # size of hidden vector in LSTM
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.arch_conv_filters = conv_layer
self.arch_conv_strides = conv_strides
self.arch_conv_windows = conv_windows
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
conv_layer_sizes = [emsize] + [int(x) for x in conv_layer.split('-')]
conv_strides_sizes = [int(x) for x in conv_strides.split('-')]
conv_windows_sizes = [int(x) for x in conv_windows.split('-')]
self.encoder = nn.Sequential()
for i in range(len(conv_layer_sizes) - 1):
layer = nn.Conv1d(conv_layer_sizes[i], conv_layer_sizes[i + 1], \
conv_windows_sizes[i], stride=conv_strides_sizes[i])
self.encoder.add_module("layer-" + str(i + 1), layer)
bn = nn.BatchNorm1d(conv_layer_sizes[i + 1])
self.encoder.add_module("bn-" + str(i + 1), bn)
self.encoder.add_module("activation-" + str(i + 1), activation)
self.linear = nn.Linear(1000, emsize)
self.linear_cnn = nn.Linear(23, 1)
decoder_input_size = emsize + nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
self.linear_dec = nn.Linear(nhidden, ntokens)
# 9-> 7-> 3 -> 1
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
embeddings = self.embedding_decoder(indices) # training stage
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
# print(embeddings.size())
# print(all_hidden.size())
# print(augmented_embeddings.size())
# print(state[0].size(), state[1].size())
output, state = self.decoder(augmented_embeddings, state)
decoded = self.linear_dec(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, hidden, maxlen, sample=True, temp=1.0):
"""Generate through decoder; no backprop"""
if hidden.ndimension() == 1:
hidden = hidden.unsqueeze(0)
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
if not self.gpu:
self.start_symbols = self.start_symbols.cpu()
# <sos>
self.start_symbols.resize_(batch_size, 1)
self.start_symbols.fill_(1)
embedding = self.embedding_decoder(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
# unroll
all_indices = []
for i in range(maxlen):
output, state = self.decoder(inputs, state)
overvocab = self.linear_dec(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
else:
# sampling
probs = F.softmax(overvocab / temp)
indices = torch.multinomial(probs, 1)
if indices.ndimension() == 1:
indices = indices.unsqueeze(1)
all_indices.append(indices)
embedding = self.embedding_decoder(indices)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
max_indices = torch.cat(all_indices, 1)
return max_indices
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding.weight.data[0].zero()
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def encode(self, indices, noise):
embeddings = self.embedding(indices)
embeddings = embeddings.transpose(1, 2)
c_pre_lin = self.encoder(embeddings)
c_pre_lin = self.linear_cnn(c_pre_lin)
# print(c_pre_lin.size())
c_pre_lin = c_pre_lin.squeeze(2)
hidden = self.linear(c_pre_lin)
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
if norms.ndimension() == 1:
norms = norms.unsqueeze(1)
hidden = torch.div(hidden, norms.expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(std=self.noise_radius, mean=torch.zeros(hidden.size())
)
if self.gpu:
gauss_noise = gauss_noise.cuda()
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2) # (hidden, cell)
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False, generator=None, inverter=None):
if not generator: # only enc -> dec
batch_size, maxlen = indices.size()
self.embedding.weight.data[0].fill_(0)
self.embedding_decoder.weight.data[0].fill_(0)
hidden = self.encode(indices, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
else: # enc -> inv -> gen -> dec
batch_size, maxlen = indices.size()
self.embedding.weight.data[0].fill_(0)
self.embedding_decoder.weight.data[0].fill_(0)
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
z_hat = inverter(hidden)
c_hat = generator(z_hat)
decoded = self.decode(c_hat, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
class Seq2Seq(nn.Module):
def __init__(self, emsize, nhidden, ntokens, nlayers, noise_radius=0.2,
hidden_init=False, dropout=0, gpu=True):
super(Seq2Seq, self).__init__()
self.nhidden = nhidden
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
# RNN Encoder and Decoder
self.encoder = nn.LSTM(input_size=emsize,
hidden_size=nhidden,
num_layers=nlayers,
dropout=dropout,
batch_first=True)
decoder_input_size = emsize + nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
# Initialize Linear Transformation
self.linear = nn.Linear(nhidden, ntokens)
self.init_weights()
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return (to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2)) # (hidden, cell)
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False, generator=None, inverter=None):
if not generator:
batch_size, maxlen = indices.size()
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
else:
batch_size, maxlen = indices.size()
self.embedding.weight.data[0].fill_(0)
self.embedding_decoder.weight.data[0].fill_(0)
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
z_hat = inverter(hidden)
c_hat = generator(z_hat)
decoded = self.decode(c_hat, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
# Encode
packed_output, state = self.encoder(packed_embeddings)
hidden, cell = state
# batch_size x nhidden
hidden = hidden[-1] # get hidden state of last layer of encoder
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
if norms.ndimension() == 1:
norms = norms.unsqueeze(1)
hidden = torch.div(hidden, norms.expand_as(hidden))
if noise and self.noise_radius > 0:
# gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
# std=self.noise_radius)
gauss_noise = torch.normal(mean=torch.zeros(hidden.size()), std=self.noise_radius)
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
embeddings = self.embedding_decoder(indices)
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
packed_embeddings = pack_padded_sequence(input=augmented_embeddings,
lengths=lengths,
batch_first=True)
packed_output, state = self.decoder(packed_embeddings, state)
output, lengths = pad_packed_sequence(packed_output, batch_first=True)
# reshape to batch_size*maxlen x nhidden before linear over vocab
decoded = self.linear(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, hidden, maxlen, sample=True, temp=1.0):
"""Generate through decoder; no backprop"""
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
# <sos>
self.start_symbols.resize_(batch_size, 1)
self.start_symbols.fill_(1)
embedding = self.embedding_decoder(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
# unroll
all_indices = []
for i in range(maxlen):
output, state = self.decoder(inputs, state)
overvocab = self.linear(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
else:
# sampling
probs = F.softmax(overvocab / temp)
indices = torch.multinomial(probs, 1)
if indices.ndimension() == 1:
indices = indices.unsqueeze(1)
all_indices.append(indices)
embedding = self.embedding_decoder(indices)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
max_indices = torch.cat(all_indices, 1)
return max_indices
def LOAD(path):
word2idx = json.load(open(os.path.join(path, 'vocab.json'), 'r'))
ntokens = len(word2idx)
autoencoder = Seq2SeqCAE(emsize=300,
nhidden=300,
ntokens=ntokens,
nlayers=1,
noise_radius=0.2,
hidden_init=False,
dropout=0.0,
conv_layer='500-700-1000',
conv_windows='3-3-3',
conv_strides='1-2-2',
gpu=False)
inverter = MLP_I_AE(ninput=300, noutput=100, layers='300-300')
gan_gen = MLP_G(ninput=100, noutput=300, layers='300-300')
gan_disc = MLP_D(ninput=300, noutput=1, layers='300-300')
autoencoder.load_state_dict(torch.load(os.path.join(path, 'a.pkl')))
inverter.load_state_dict(torch.load(os.path.join(path, 'i.pkl')))
gan_gen.load_state_dict(torch.load(os.path.join(path, 'g.pkl')))
gan_disc.load_state_dict(torch.load(os.path.join(path, 'd.pkl')))
return word2idx, autoencoder, inverter, gan_gen, gan_disc
except ModuleNotFoundError as e:
def LOAD(path):
raise e | 25,824 | 38.128788 | 112 | py |
OpenAttack | OpenAttack-master/OpenAttack/data/victim_bert.py | """
:type: OpenAttack.utils.BertClassifier
:Size: 386.584MB
:Package Requirements:
* transformers
* pytorch
Pretrained BERT model on SST-2 dataset. See :py:data:`Dataset.SST` for detail.
"""
from OpenAttack.utils import make_zip_downloader
NAME = "Victim.BERT.SST"
URL = "/TAADToolbox/victim/bert_sst.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
import transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(path)
model = transformers.AutoModelForSequenceClassification.from_pretrained(path, num_labels=2, output_hidden_states=False)
from OpenAttack.victim.classifiers import TransformersClassifier
return TransformersClassifier(model, tokenizer, model.bert.embeddings.word_embeddings)
| 749 | 29 | 123 | py |
espnet | espnet-master/setup.py | #!/usr/bin/env python3
"""ESPnet setup script."""
import os
from setuptools import find_packages, setup
requirements = {
"install": [
"setuptools>=38.5.1",
"packaging",
"configargparse>=1.2.1",
"typeguard==2.13.3",
"humanfriendly",
"scipy>=1.4.1",
"filelock",
"librosa==0.9.2",
"jamo==0.4.1", # For kss
"PyYAML>=5.1.2",
"soundfile>=0.10.2",
"h5py>=2.10.0",
"kaldiio>=2.18.0",
"torch>=1.3.0",
"torch_complex",
"nltk>=3.4.5",
# fix CI error due to the use of deprecated aliases
"numpy<1.24",
# https://github.com/espnet/espnet/runs/6646737793?check_suite_focus=true#step:8:7651
"protobuf<=3.20.1",
"hydra-core",
"opt-einsum",
# ASR
"sentencepiece==0.1.97",
"ctc-segmentation>=1.6.6",
# TTS
"pyworld>=0.2.10",
"pypinyin<=0.44.0",
"espnet_tts_frontend",
# ENH
"ci_sdr",
"pytorch_wpe",
"fast-bss-eval==0.1.3",
# UASR
"editdistance",
# fix CI error due to the use of deprecated functions
# https://github.com/espnet/espnet/actions/runs/3174416926/jobs/5171182884#step:8:8419
# https://importlib-metadata.readthedocs.io/en/latest/history.html#v5-0-0
"importlib-metadata<5.0",
],
# train: The modules invoked when training only.
"train": [
"matplotlib",
"pillow>=6.1.0",
"editdistance==0.5.2",
"wandb",
"tensorboard>=1.14",
],
# recipe: The modules actually are not invoked in the main module of espnet,
# but are invoked for the python scripts in each recipe
"recipe": [
"espnet_model_zoo",
"gdown",
"resampy",
"pysptk>=0.1.17",
"morfessor", # for zeroth-korean
"youtube_dl", # for laborotv
"nnmnkwii",
"museval>=0.2.1",
"pystoi>=0.2.2",
"mir-eval>=0.6",
"fastdtw",
"nara_wpe>=0.0.5",
"sacrebleu>=1.5.1",
"praatio>=6,<7", # for librispeech phoneme alignment
"scikit-learn>=1.0.0", # for HuBERT kmeans
],
# all: The modules should be optionally installled due to some reason.
# Please consider moving them to "install" occasionally
# NOTE(kamo): The modules in "train" and "recipe" are appended into "all"
"all": [
# NOTE(kamo): Append modules requiring specific pytorch version or torch>1.3.0
"torchaudio",
"torch_optimizer",
"fairscale",
"transformers",
"gtn==0.0.0",
],
"setup": [
"pytest-runner",
],
"test": [
"pytest>=3.3.0",
"pytest-timeouts>=1.2.1",
"pytest-pythonpath>=0.7.3",
"pytest-cov>=2.7.1",
"hacking>=2.0.0",
"mock>=2.0.0",
"pycodestyle",
"jsondiff<2.0.0,>=1.2.0",
"flake8>=3.7.8",
"flake8-docstrings>=1.3.1",
"black",
"isort",
],
"doc": [
"Jinja2<3.1",
"Sphinx==2.1.2",
"sphinx-rtd-theme>=0.2.4",
"sphinx-argparse>=0.2.5",
"commonmark==0.8.1",
"recommonmark>=0.4.0",
"nbsphinx>=0.4.2",
"sphinx-markdown-tables>=0.0.12",
],
}
requirements["all"].extend(requirements["train"] + requirements["recipe"])
requirements["test"].extend(requirements["train"])
install_requires = requirements["install"]
setup_requires = requirements["setup"]
tests_require = requirements["test"]
extras_require = {
k: v for k, v in requirements.items() if k not in ["install", "setup"]
}
dirname = os.path.dirname(__file__)
version_file = os.path.join(dirname, "espnet", "version.txt")
with open(version_file, "r") as f:
version = f.read().strip()
setup(
name="espnet",
version=version,
url="http://github.com/espnet/espnet",
author="Shinji Watanabe",
author_email="shinjiw@ieee.org",
description="ESPnet: end-to-end speech processing toolkit",
long_description=open(os.path.join(dirname, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
license="Apache Software License",
packages=find_packages(include=["espnet*"]),
package_data={"espnet": ["version.txt"]},
# #448: "scripts" is inconvenient for developping because they are copied
# scripts=get_all_scripts('espnet/bin'),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
python_requires=">=3.7.0",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 5,217 | 30.817073 | 94 | py |
espnet | espnet-master/tools/check_install.py | #!/usr/bin/env python3
"""Script to check whether the installation is done correctly."""
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import importlib
import re
import shutil
import subprocess
import sys
from pathlib import Path
from packaging.version import parse
module_list = [
("torchaudio", None, None),
("torch_optimizer", None, None),
("warprnnt_pytorch", None, "installers/install_warp-transducer.sh"),
("chainer_ctc", None, "installers/install_chainer_ctc.sh"),
("pyopenjtalk", None, "installers/install_pyopenjtalk.sh"),
("tdmelodic_pyopenjtalk", None, "installers/install_tdmelodic_pyopenjtalk.sh"),
("kenlm", None, "installers/install_kenlm.sh"),
("mmseg", None, "installers/install_py3mmseg.sh"),
("espnet", None, None),
("numpy", None, None),
("fairseq", None, "installers/install_fairseq.sh"),
("phonemizer", None, "installers/install_phonemizer.sh"),
("gtn", None, "installers/install_gtn.sh"),
("s3prl", None, "installers/install_s3prl.sh"),
("transformers", None, "installers/install_transformers.sh"),
("speechbrain", None, "installers/install_speechbrain.sh"),
("k2", None, "installers/install_k2.sh"),
("longformer", None, "installers/install_longformer.sh"),
("nlg-eval", None, "installers/install_longformer.sh"),
("datasets", None, "installers/install_longformer.sh"),
("pykeops", None, "installers/install_cauchy_mult.sh"),
("whisper", None, "installers/install_whisper.sh"),
("RawNet3", None, "installers/install_rawnet.sh"),
("reazonspeech", None, "installers/install_reazonspeech.sh"),
]
executable_list = [
("sclite", "installers/install_sctk.sh", None),
("sph2pipe", "installers/install_sph2pipe.sh", None),
("PESQ", "installers/install_pesq.sh", None),
("BeamformIt", "installers/install_beamformit.sh", None),
("spm_train", None, None),
("spm_encode", None, None),
("spm_decode", None, None),
("sox", None, "--version"),
("ffmpeg", None, "-version"),
("flac", None, "--version"),
("cmake", None, "--version"),
]
def main():
"""Check the installation."""
python_version = sys.version.replace("\n", " ")
print(f"[x] python={python_version}")
print()
print("Python modules:")
try:
import torch
print(f"[x] torch={torch.__version__}")
if torch.cuda.is_available():
print(f"[x] torch cuda={torch.version.cuda}")
else:
print("[ ] torch cuda")
if torch.backends.cudnn.is_available():
print(f"[x] torch cudnn={torch.backends.cudnn.version()}")
else:
print("[ ] torch cudnn")
if torch.distributed.is_nccl_available():
print("[x] torch nccl")
else:
print("[ ] torch nccl")
except ImportError:
print("[ ] torch")
try:
import chainer
print(f"[x] chainer={chainer.__version__}")
if parse(chainer.__version__) != parse("6.0.0"):
print(
f"Warning! chainer={chainer.__version__} is not supported. "
"Supported version is 6.0.0"
)
if chainer.backends.cuda.available:
print("[x] chainer cuda")
else:
print("[ ] chainer cuda")
if chainer.backends.cuda.cudnn_enabled:
print("[x] chainer cudnn")
else:
print("[ ] chainer cudnn")
except ImportError:
print("[ ] chainer")
try:
import cupy
print(f"[x] cupy={cupy.__version__}")
try:
from cupy.cuda import nccl # NOQA
print("[x] cupy nccl")
except ImportError:
print("[ ] cupy nccl")
except ImportError:
print("[ ] cupy")
to_install = []
for name, versions, installer in module_list:
try:
m = importlib.import_module(name)
if hasattr(m, "__version__"):
version = m.__version__
print(f"[x] {name}={version}")
if versions is not None and version not in versions:
print(
f"Warning! {name}={version} is not suppoted. "
"Supported versions are {versions}"
)
else:
print(f"[x] {name}")
except ImportError:
print(f"[ ] {name}")
if installer is not None:
to_install.append(f"Use '{installer}' to install {name}")
# check muskits install
if Path("muskits.done").exists():
print("[x] muskits")
else:
print("[ ] muskits")
to_install.append("Use 'installers/install_muskits.sh' to install muskits")
print()
print("Executables:")
pattern = re.compile(r"([0-9]+\.[0-9]+(?:\.[0-9]+[^\s]*)?)\s*")
for name, installer, version_option in executable_list:
if shutil.which(name) is not None:
string = f"[x] {name}"
if version_option is not None:
cp = subprocess.run(
[name, version_option], capture_output=True, text=True
)
if cp.returncode == 0:
ma = re.search(pattern, cp.stdout)
if ma is not None:
string = f"[x] {name}={ma.group(1)}"
else:
ma = re.search(pattern, cp.stderr)
if ma is not None:
string = f"[x] {name}={ma.group(1)}"
print(string)
else:
print(f"[ ] {name}")
if installer is not None:
to_install.append(f"Use '{installer}' to install {name}")
if not Path("kaldi/egs/wsj/s5/utils/parse_options.sh").exists():
print("[ ] Kaldi")
to_install.append(
"Type 'git clone --depth 1 https://github.com/kaldi-asr/kaldi'"
" and see 'kaldi/tools/INSTALL' to install Kaldi"
)
elif not Path("kaldi/src/bin/copy-matrix").exists():
print("[x] Kaldi (not compiled)")
to_install.append("See 'kaldi/tools/INSTALL' to install Kaldi")
else:
print("[x] Kaldi (compiled)")
print()
print("INFO:")
for m in to_install:
print(m)
if __name__ == "__main__":
main()
| 6,394 | 31.29798 | 83 | py |
espnet | espnet-master/test/test_e2e_compatibility.py | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import print_function
import importlib
import os
import re
import shutil
import subprocess
import tempfile
from os.path import join
import chainer
import numpy as np
import pytest
import torch
from espnet.asr.asr_utils import chainer_load, get_model_conf, torch_load
def download_zip_from_google_drive(download_dir, file_id):
# directory check
os.makedirs(download_dir, exist_ok=True)
tmpzip = join(download_dir, "tmp.zip")
# download zip file from google drive via wget
cmd = [
"wget",
"https://drive.google.com/uc?export=download&id=%s" % file_id,
"-O",
tmpzip,
]
subprocess.run(cmd, check=True)
try:
# unzip downloaded files
cmd = ["unzip", tmpzip, "-d", download_dir]
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError:
# sometimes, wget from google drive is failed due to virus check confirmation
# to avoid it, we need to do some tricky processings
# see
# https://stackoverflow.com/questions/20665881/direct-download-from-google-drive-using-google-drive-api
out = subprocess.check_output(
"curl -c /tmp/cookies "
'"https://drive.google.com/uc?export=download&id=%s"' % file_id,
shell=True,
)
out = out.decode("utf-8")
dllink = "https://drive.google.com{}".format(
re.findall(r'<a id="uc-download-link" [^>]* href="([^"]*)">', out)[
0
].replace("&", "&")
)
subprocess.call(
f'curl -L -b /tmp/cookies "{dllink}" > {tmpzip}', shell=True
) # NOQA
cmd = ["unzip", tmpzip, "-d", download_dir]
subprocess.run(cmd, check=True)
# get model file path
cmd = ["find", download_dir, "-name", "model.*.best"]
cmd_state = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return cmd_state.stdout.decode("utf-8").split("\n")[0]
# TODO(kan-bayashi): make it to be compatible with python2
# file id in google drive can be obtain from sharing link
# ref: https://qiita.com/namakemono/items/c963e75e0af3f7eed732
@pytest.mark.skipif(True, reason="Skip due to unstable download")
@pytest.mark.parametrize(
"module, download_info",
[
(
"espnet.nets.pytorch_backend.e2e_asr",
("v.0.3.0 egs/an4/asr1 pytorch", "1zF88bRNbJhw9hNBq3NrDg8vnGGibREmg"),
),
(
"espnet.nets.chainer_backend.e2e_asr",
("v.0.3.0 egs/an4/asr1 chainer", "1m2SZLNxvur3q13T6Zrx6rEVfqEifgPsx"),
),
],
)
def test_downloaded_asr_model_decodable(module, download_info):
# download model
print(download_info[0])
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir=".")
model_path = download_zip_from_google_drive(tmpdir, download_info[1])
# load trained model parameters
m = importlib.import_module(module)
idim, odim, train_args = get_model_conf(model_path)
model = m.E2E(idim, odim, train_args)
if "chainer" in module:
chainer_load(model_path, model)
else:
torch_load(model_path, model)
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(128, idim)
model.recognize(in_data, train_args, train_args.char_list) # decodable
# remove
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
| 3,521 | 30.72973 | 111 | py |
espnet | espnet-master/test/test_e2e_asr_conformer.py | import argparse
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_asr_conformer import E2E
from espnet.nets.pytorch_backend.transformer import plot
def make_arg(**kwargs):
defaults = dict(
adim=2,
aheads=1,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_decoder_selfattn_layer_type="selfattn",
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=True,
cnn_module_kernel=3,
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=True,
report_cer=False,
report_wer=False,
mtlalpha=0.0,
lsm_weight=0.001,
char_list=["<blank>", "a", "e", "i", "o", "u"],
ctc_type="builtin",
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare(args):
idim = 10
odim = 3
batchsize = 2
ilens = [10, 9]
olens = [3, 4]
n_token = odim - 1
model = E2E(idim, odim, args)
x = torch.randn(batchsize, max(ilens), idim)
y = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
for i in range(batchsize):
x[i, ilens[i] :] = -1
y[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, x, torch.tensor(ilens), y, data, uttid_list
conformer_mcnn_args = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=False,
)
conformer_mcnn_mmacaron_args = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=False,
use_cnn_module=False,
)
conformer_mcnn_mmacaron_mrelattn_args = dict(
transformer_encoder_pos_enc_layer_type="abs_pos",
transformer_encoder_selfattn_layer_type="selfattn",
macaron_style=False,
use_cnn_module=False,
)
conformer_ctc = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=False,
mtlalpha=1.0,
)
conformer_intermediate_ctc = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=False,
mtlalpha=1.0,
elayers=2,
intermediate_ctc_weight=0.3,
intermediate_ctc_layer="1",
stochastic_depth_rate=0.3,
)
conformer_selfconditioned_ctc = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=False,
mtlalpha=1.0,
elayers=2,
intermediate_ctc_weight=0.5,
intermediate_ctc_layer="1",
stochastic_depth_rate=0.0,
self_conditioning=True,
)
def _savefn(*args, **kwargs):
return
@pytest.mark.parametrize(
"model_dict",
[
{},
conformer_mcnn_args,
conformer_mcnn_mmacaron_args,
conformer_mcnn_mmacaron_mrelattn_args,
conformer_ctc,
conformer_intermediate_ctc,
conformer_selfconditioned_ctc,
],
)
def test_transformer_trainable_and_decodable(model_dict):
args = make_arg(**model_dict)
model, x, ilens, y, data, uttid_list = prepare(args)
# check for pure CTC and pure Attention
if args.mtlalpha == 1:
assert model.decoder is None
elif args.mtlalpha == 0:
assert model.ctc is None
# test beam search
recog_args = argparse.Namespace(
beam_size=1,
penalty=0.0,
ctc_weight=0.0,
maxlenratio=1.0,
lm_weight=0,
minlenratio=0,
nbest=1,
)
# test trainable
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(x, ilens, y)
optim.zero_grad()
loss.backward()
optim.step()
# test attention plot
attn_dict = model.calculate_all_attentions(x[0:1], ilens[0:1], y[0:1])
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test CTC plot
ctc_probs = model.calculate_all_ctc_probs(x[0:1], ilens[0:1], y[0:1])
if args.mtlalpha > 0:
print(ctc_probs.shape)
else:
assert ctc_probs is None
# test decodable
with torch.no_grad():
nbest = model.recognize(x[0, : ilens[0]].numpy(), recog_args)
print(y[0])
print(nbest[0]["yseq"][1:-1])
| 4,895 | 25.901099 | 83 | py |
espnet | espnet-master/test/test_custom_transducer.py | # coding: utf-8
import argparse
import json
import tempfile
import pytest
import torch
from packaging.version import parse as V
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.pytorch_backend.asr_init import load_trained_model
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr_transducer import E2E
from espnet.nets.pytorch_backend.transducer.blocks import build_blocks
is_torch_1_5_plus = V(torch.__version__) >= V("1.5.0")
def make_train_args(**kwargs):
train_defaults = dict(
transformer_init="pytorch",
etype="custom",
custom_enc_input_layer="conv2d",
custom_enc_self_attn_type="selfattn",
custom_enc_positional_encoding_type="abs_pos",
custom_enc_pw_activation_type="relu",
custom_enc_conv_mod_activation_type="relu",
enc_block_arch=[{"type": "transformer", "d_hidden": 2, "d_ff": 2, "heads": 1}],
enc_block_repeat=1,
dtype="custom",
custom_dec_input_layer="embed",
dec_block_arch=[{"type": "transformer", "d_hidden": 2, "d_ff": 2, "heads": 1}],
dec_block_repeat=1,
custom_dec_pw_activation_type="relu",
dropout_rate_embed_decoder=0.0,
joint_dim=2,
joint_activation_type="tanh",
transducer_loss_weight=1.0,
use_ctc_loss=False,
ctc_loss_weight=0.0,
ctc_loss_dropout_rate=0.0,
use_lm_loss=False,
lm_loss_weight=0.0,
use_aux_transducer_loss=False,
aux_transducer_loss_weight=0.0,
aux_transducer_loss_enc_output_layers=[],
use_symm_kl_div_loss=False,
symm_kl_div_loss_weight=0.0,
char_list=["a", "e", "i", "o", "u"],
sym_space="<space>",
sym_blank="<blank>",
report_cer=False,
report_wer=False,
search_type="default",
verbose=0,
outdir=None,
rnnlm=None,
model_module="espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def make_recog_args(**kwargs):
recog_defaults = dict(
batchsize=0,
beam_size=1,
nbest=1,
verbose=0,
search_type="default",
nstep=1,
max_sym_exp=2,
u_max=5,
prefix_alpha=2,
softmax_temperature=1.0,
score_norm_transducer=True,
rnnlm=None,
lm_weight=0.1,
)
recog_defaults.update(kwargs)
return argparse.Namespace(**recog_defaults)
def get_default_scope_inputs():
bs = 2
idim = 12
odim = 5
ilens = [15, 11]
olens = [13, 9]
return bs, idim, odim, ilens, olens
def get_lm():
n_layers = 1
n_units = 4
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(char_list), n_layers, n_units, typ="lstm")
)
return rnnlm
def get_wordlm():
n_layers = 1
n_units = 8
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
word_list = ["<blank>", "<unk>", "ab", "id", "ac", "bd", "<eos>"]
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(word_list), n_layers, n_units)
)
word_rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
return word_rnnlm
def prepare(args):
bs, idim, odim, ilens, olens = get_default_scope_inputs()
n_token = odim - 1
model = E2E(idim, odim, args)
feats = torch.randn(bs, max(ilens), idim)
labels = (torch.rand(bs, max(olens)) * n_token % n_token).long()
for i in range(bs):
feats[i, ilens[i] :] = -1
labels[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(bs):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, feats, torch.tensor(ilens), labels, data, uttid_list
@pytest.mark.parametrize(
"train_dic, recog_dic",
[
({}, {}),
({"enc_block_repeat": 2}, {}),
({"dec_block_repeat": 2}, {}),
(
{
"enc_block_arch": [
{
"type": "conformer",
"d_hidden": 2,
"d_ff": 2,
"heads": 1,
"macaron_style": True,
"use_conv_mod": True,
"conv_mod_kernel": 1,
}
],
"custom_enc_input_layer": "vgg2l",
"custom_enc_self_attn_type": "rel_self_attn",
"custom_enc_positional_encoding_type": "rel_pos",
},
{},
),
(
{
"enc_block_arch": [
{
"type": "conformer",
"d_hidden": 2,
"d_ff": 2,
"heads": 2,
"macaron_style": False,
"use_conv_mod": True,
"conv_mod_kernel": 1,
}
],
},
{"custom_dec_pw_activation_type": "swish"},
),
(
{
"custom_enc_input_layer": "linear",
"custom_enc_positional_encoding_type": "abs_pos",
"enc_block_arch": [
{
"type": "transformer",
"d_hidden": 32,
"d_ff": 4,
"heads": 1,
},
{
"type": "conv1d",
"idim": 32,
"odim": 16,
"kernel_size": 3,
"dilation": 2,
"stride": 2,
"dropout-rate": 0.3,
"use-relu": True,
"use-batch-norm": True,
},
{
"type": "transformer",
"d_hidden": 16,
"d_ff": 4,
"heads": 1,
"dropout-rate": 0.3,
"att-dropout-rate": 0.2,
"pos-dropout-rate": 0.1,
},
],
},
{},
),
(
{
"enc_block_arch": [
{
"type": "conv1d",
"idim": 8,
"odim": 8,
"kernel_size": 2,
"dilation": 2,
"stride": 1,
"dropout-rate": 0.3,
"use-relu": True,
"use-batch-norm": True,
},
{
"type": "conformer",
"d_hidden": 8,
"d_ff": 4,
"heads": 1,
"macaron_style": False,
"use_conv_mod": False,
},
],
"custom_enc_self_attn_type": "rel_self_attn",
"custom_enc_positional_encoding_type": "rel_pos",
},
{},
),
(
{
"enc_block_arch": [
{
"type": "conv1d",
"idim": 2,
"odim": 2,
"kernel_size": 2,
"dilation": 1,
"stride": 1,
}
]
},
{},
),
(
{
"dec_block_arch": [
{
"type": "causal-conv1d",
"idim": 8,
"odim": 8,
"kernel_size": 3,
"dropout-rate": 0.3,
"use-relu": True,
"use-batch-norm": True,
},
{"type": "transformer", "d_hidden": 8, "d_ff": 4, "heads": 1},
]
},
{},
),
({"custom_enc_pw_activation_type": "swish"}, {}),
({"custom_enc_pw_activation_type": "hardtanh"}, {}),
({"custom_dec_pw_activation_type": "swish"}, {}),
({"custom_dec_pw_activation_type": "hardtanh"}, {}),
({"custom_enc_positional_encoding_type": "scaled_abs_pos"}, {}),
({"joint_activation_type": "relu"}, {}),
({"joint_activation_type": "swish"}, {}),
({"custom_enc_input_layer": "vgg2l"}, {}),
({"custom_enc_input_layer": "linear"}, {}),
({"report_cer": True, "report_wer": True}, {}),
({"report_cer": True, "beam_size": 2}, {}),
({}, {"beam_size": 2}),
({}, {"beam_size": 2, "nbest": 2, "score_norm_transducer": False}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 3, "prefix_alpha": 1}),
({}, {"beam_size": 2, "search_type": "tsd", "max_sym_exp": 3}),
({}, {"beam_size": 2, "search_type": "alsd"}),
({}, {"beam_size": 2, "search_type": "alsd", "u_max": 10}),
({}, {"beam_size": 2, "search_type": "maes", "nstep": 3, "prefix_alpha": 1}),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_wordlm()}),
({}, {"beam_size": 2, "search_type": "maes", "nstep": 4, "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "maes", "rnnlm": get_wordlm()}),
({}, {"beam_size": 2, "softmax_temperature": 2.0, "rnnlm": get_wordlm()}),
({}, {"beam_size": 2, "search_type": "nsc", "softmax_temperature": 5.0}),
],
)
def test_custom_transducer_trainable_and_decodable(train_dic, recog_dic):
train_args = make_train_args(**train_dic)
recog_args = make_recog_args(**recog_dic)
model, feats, feats_len, labels, data, uttid_list = prepare(train_args)
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(feats, feats_len, labels)
optim.zero_grad()
loss.backward()
optim.step()
beam_search = BeamSearchTransducer(
decoder=model.decoder,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
softmax_temperature=recog_args.softmax_temperature,
)
with torch.no_grad():
nbest = model.recognize(feats[0, : feats_len[0]].numpy(), beam_search)
print(nbest[0]["yseq"][1:-1])
@pytest.mark.execution_timeout(4)
def test_calculate_plot_attention():
from espnet.nets.pytorch_backend.transformer import plot
train_args = make_train_args(report_cer=True)
model, feats, feats_len, labels, data, uttid_list = prepare(train_args)
model.attention_plot_class
attn_dict = model.calculate_all_attentions(feats[0:1], feats_len[0:1], labels[0:1])
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "/tmp/espnet-test")
@pytest.mark.parametrize(
"train_dic",
[
{
"enc_block_repeat": 2,
"use_aux_transducer_loss": True,
"aux_transducer_loss_enc_output_layers": [0],
},
{
"enc_block_arch": [
{
"type": "conformer",
"d_hidden": 2,
"d_ff": 2,
"heads": 1,
"macaron_style": True,
"use_conv_mod": True,
"conv_mod_kernel": 1,
}
],
"custom_enc_input_layer": "vgg2l",
"custom_enc_self_attn_type": "rel_self_attn",
"custom_enc_positional_encoding_type": "rel_pos",
"enc_block_repeat": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss_enc_output_layers": [0, 1],
},
{"aux_ctc": True, "aux_ctc_weight": 0.5},
{"aux_cross_entropy": True, "aux_cross_entropy_weight": 0.5},
],
)
def test_auxiliary_task(train_dic):
train_args = make_train_args(**train_dic)
recog_args = make_recog_args()
model, feats, feats_len, labels, data, uttid_list = prepare(train_args)
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(feats, feats_len, labels)
optim.zero_grad()
loss.backward()
optim.step()
beam_search = BeamSearchTransducer(
decoder=model.decoder,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(12, 5, vars(train_args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
with torch.no_grad():
model, _ = load_trained_model(tmpdir + "/model.dummy.best", training=False)
nbest = model.recognize(feats[0, : feats_len[0]].numpy(), beam_search)
print(nbest[0]["yseq"][1:-1])
def test_no_block_arch():
_, idim, odim, _, _ = get_default_scope_inputs()
args = make_train_args(enc_block_arch=None)
with pytest.raises(ValueError):
E2E(idim, odim, args)
args = make_train_args(dec_block_arch=None)
with pytest.raises(ValueError):
E2E(idim, odim, args)
def test_invalid_input_layer_type():
architecture = [
{
"type": "transformer",
"d_hidden": 2,
"d_ff": 2,
"heads": 1,
},
]
with pytest.raises(NotImplementedError):
_, _, _ = build_blocks("encoder", 4, "foo", architecture)
def test_invalid_architecture_layer_type():
with pytest.raises(NotImplementedError):
_, _, _ = build_blocks("encoder", 4, "linear", [{"type": "foo"}])
def test_invalid_block():
with pytest.raises(ValueError):
_, _, _ = build_blocks("encoder", 4, "linear", [{"foo": "foo"}])
def test_invalid_block_arguments():
with pytest.raises(ValueError):
_, _, _ = build_blocks("encoder", 4, "linear", [{"type": "transformer"}])
with pytest.raises(ValueError):
_, _, _ = build_blocks("encoder", 4, "linear", [{"type": "conformer"}])
with pytest.raises(ValueError):
_, _, _ = build_blocks(
"encoder",
4,
"linear",
[
{
"type": "conformer",
"d_hidden": 4,
"d_ff": 8,
"heads": 1,
"macaron_style": False,
"use_conv_mod": True,
}
],
)
with pytest.raises(ValueError):
_, _, _ = build_blocks("decoder", 4, "embed", [{"type": "conformer"}])
with pytest.raises(ValueError):
_, _, _ = build_blocks("encoder", 4, "embed", [{"type": "causal-conv1d"}])
with pytest.raises(ValueError):
_, _, _ = build_blocks("decoder", 4, "embed", [{"type": "conv1d"}])
with pytest.raises(ValueError):
_, _, _ = build_blocks(
"encoder",
4,
"embed",
[
{
"type": "transformer",
"d_hidden": 2,
"d_ff": 8,
"heads": 1,
},
],
positional_encoding_type="rel_pos",
self_attn_type="self_attn",
)
def test_invalid_block_io():
with pytest.raises(ValueError):
_, _, _ = build_blocks(
"encoder",
4,
"linear",
[
{
"type": "transformer",
"d_hidden": 2,
"d_ff": 8,
"heads": 1,
},
{
"type": "transformer",
"d_hidden": 4,
"d_ff": 8,
"heads": 1,
},
],
)
@pytest.mark.parametrize(
"train_dic",
[
{},
{
"enc_block_arch": [
{
"type": "conformer",
"d_hidden": 2,
"d_ff": 2,
"heads": 1,
"macaron_style": True,
"use_conv_mod": True,
"conv_mod_kernel": 1,
}
],
"custom_enc_input_layer": "vgg2l",
"custom_enc_self_attn_type": "rel_self_attn",
"custom_enc_positional_encoding_type": "rel_pos",
},
{
"enc_block_arch": [
{
"type": "conv1d",
"idim": 2,
"odim": 2,
"kernel_size": 2,
"dilation": 1,
"stride": 1,
"dropout-rate": 0.3,
"use-relu": True,
"use-batch-norm": True,
},
{
"type": "transformer",
"d_hidden": 2,
"d_ff": 2,
"heads": 1,
"macaron_style": False,
"use_conv_mod": False,
},
],
"custom_enc_input_layer": "linear",
},
{
"dec_block_arch": [
{"type": "causal-conv1d", "idim": 2, "odim": 2, "kernel_size": 1},
{"type": "transformer", "d_hidden": 2, "d_ff": 2, "heads": 1},
]
},
],
)
@pytest.mark.parametrize(
"recog_dic",
[
{},
{"beam_size": 2, "search_type": "default"},
{"beam_size": 2, "search_type": "alsd"},
{"beam_size": 2, "search_type": "tsd"},
{"beam_size": 2, "search_type": "nsc"},
{"beam_size": 2, "search_type": "maes"},
],
)
@pytest.mark.parametrize(
"quantize_dic",
[
{"mod": {torch.nn.Linear}, "dtype": torch.qint8},
{"mod": {torch.nn.Linear}, "dtype": torch.float16},
{"mod": {torch.nn.LSTM}, "dtype": torch.qint8},
{"mod": {torch.nn.LSTM}, "dtype": torch.float16},
{"mod": {torch.nn.Linear, torch.nn.LSTM}, "dtype": torch.qint8},
{"mod": {torch.nn.Linear, torch.nn.LSTM}, "dtype": torch.float16},
],
)
@pytest.mark.execution_timeout(4)
def test_dynamic_quantization(train_dic, recog_dic, quantize_dic):
train_args = make_train_args(**train_dic)
recog_args = make_recog_args(**recog_dic)
model, feats, feats_len, _, _, _ = prepare(train_args)
if not is_torch_1_5_plus and (
torch.nn.Linear in quantize_dic["mod"]
and quantize_dic["dtype"] == torch.float16
):
# In recognize(...) from asr.py we raise ValueError however
# AssertionError is originaly raised by torch.
with pytest.raises(AssertionError):
model = torch.quantization.quantize_dynamic(
model,
quantize_dic["mod"],
dtype=quantize_dic["dtype"],
)
pytest.skip("Skip rest of the test after checking AssertionError")
else:
model = torch.quantization.quantize_dynamic(
model,
quantize_dic["mod"],
dtype=quantize_dic["dtype"],
)
beam_search = BeamSearchTransducer(
decoder=model.decoder,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
quantization=True,
)
with torch.no_grad():
model.recognize(feats[0, : feats_len[0]].numpy(), beam_search)
@pytest.mark.parametrize(
"train_dic, subsample",
[
({}, 4),
({"custom_enc_input_layer": "vgg2l"}, 4),
({"custom_enc_input_layer": "linear"}, 1),
],
)
def test_subsampling(train_dic, subsample):
train_args = make_train_args(**train_dic)
model, feats, feats_len, _, _, _ = prepare(train_args)
assert model.get_total_subsampling_factor() == subsample
| 21,623 | 30.33913 | 87 | py |
espnet | espnet-master/test/test_batch_beam_search.py | import os
from argparse import Namespace
from test.test_beam_search import prepare, transformer_args
import numpy
import pytest
import torch
from espnet.nets.batch_beam_search import BatchBeamSearch, BeamSearch
from espnet.nets.beam_search import Hypothesis
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.nets.scorers.ngram import NgramFullScorer
def test_batchfy_hyp():
vocab_size = 5
eos = -1
# simplest beam search
beam = BatchBeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"a": 0.5, "b": 0.5},
scorers={"a": LengthBonus(vocab_size), "b": LengthBonus(vocab_size)},
pre_beam_score_key="a",
sos=eos,
eos=eos,
)
hs = [
Hypothesis(
yseq=torch.tensor([0, 1, 2]),
score=torch.tensor(0.15),
scores={"a": torch.tensor(0.1), "b": torch.tensor(0.2)},
states={"a": 1, "b": 2},
),
Hypothesis(
yseq=torch.tensor([0, 1]),
score=torch.tensor(0.1),
scores={"a": torch.tensor(0.0), "b": torch.tensor(0.2)},
states={"a": 3, "b": 4},
),
]
bs = beam.batchfy(hs)
assert torch.all(bs.yseq == torch.tensor([[0, 1, 2], [0, 1, eos]]))
assert torch.all(bs.score == torch.tensor([0.15, 0.1]))
assert torch.all(bs.scores["a"] == torch.tensor([0.1, 0.0]))
assert torch.all(bs.scores["b"] == torch.tensor([0.2, 0.2]))
assert bs.states["a"] == [1, 3]
assert bs.states["b"] == [2, 4]
us = beam.unbatchfy(bs)
for i in range(len(hs)):
assert us[i].yseq.tolist() == hs[i].yseq.tolist()
assert us[i].score == hs[i].score
assert us[i].scores == hs[i].scores
assert us[i].states == hs[i].states
lstm_lm = Namespace(type="lstm", layer=1, unit=2, dropout_rate=0.0)
gru_lm = Namespace(type="gru", layer=1, unit=2, dropout_rate=0.0)
transformer_lm = Namespace(
layer=1, unit=2, att_unit=2, embed_unit=2, head=1, pos_enc="none", dropout_rate=0.0
)
@pytest.mark.parametrize(
"model_class, args, ctc_weight, lm_nn, lm_args, lm_weight, ngram_weight, \
bonus, device, dtype",
[
(nn, args, ctc, lm_nn, lm_args, lm, ngram, bonus, device, dtype)
for device in ("cpu", "cuda")
# (("rnn", rnn_args),)
for nn, args in (("transformer", transformer_args),)
for ctc in (0.0, 0.5, 1.0)
for lm_nn, lm_args in (
("default", lstm_lm),
("default", gru_lm),
("transformer", transformer_lm),
)
for lm in (0.5,)
for ngram in (0.5,)
for bonus in (0.1,)
for dtype in ("float32", "float64") # TODO(karita): float16
],
)
def test_batch_beam_search_equal(
model_class,
args,
ctc_weight,
lm_nn,
lm_args,
lm_weight,
ngram_weight,
bonus,
device,
dtype,
):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
if device == "cpu" and dtype == "float16":
pytest.skip("cpu float16 implementation is not available in pytorch yet")
# seed setting
torch.manual_seed(123)
torch.backends.cudnn.deterministic = True
# https://github.com/pytorch/pytorch/issues/6351
torch.backends.cudnn.benchmark = False
dtype = getattr(torch, dtype)
model, x, ilens, y, data, train_args = prepare(
model_class, args, mtlalpha=ctc_weight
)
model.eval()
char_list = train_args.char_list
lm = dynamic_import_lm(lm_nn, backend="pytorch")(len(char_list), lm_args)
lm.eval()
root = os.path.dirname(os.path.abspath(__file__))
ngram = NgramFullScorer(os.path.join(root, "beam_search_test.arpa"), args.char_list)
# test previous beam search
args = Namespace(
beam_size=3,
penalty=bonus,
ctc_weight=ctc_weight,
maxlenratio=0,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
minlenratio=0,
nbest=5,
)
# new beam search
scorers = model.scorers()
if lm_weight != 0:
scorers["lm"] = lm
if ngram_weight != 0:
scorers["ngram"] = ngram
scorers["length_bonus"] = LengthBonus(len(char_list))
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=args.lm_weight,
ngram=args.ngram_weight,
length_bonus=args.penalty,
)
model.to(device, dtype=dtype)
model.eval()
with torch.no_grad():
enc = model.encode(x[0, : ilens[0]].to(device, dtype=dtype))
legacy_beam = BeamSearch(
beam_size=args.beam_size,
vocab_size=len(char_list),
weights=weights,
scorers=scorers,
token_list=train_args.char_list,
sos=model.sos,
eos=model.eos,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
)
legacy_beam.to(device, dtype=dtype)
legacy_beam.eval()
beam = BatchBeamSearch(
beam_size=args.beam_size,
vocab_size=len(char_list),
weights=weights,
scorers=scorers,
token_list=train_args.char_list,
sos=model.sos,
eos=model.eos,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
)
beam.to(device, dtype=dtype)
beam.eval()
with torch.no_grad():
legacy_nbest_bs = legacy_beam(
x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio
)
nbest_bs = beam(
x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio
)
for i, (expected, actual) in enumerate(zip(legacy_nbest_bs, nbest_bs)):
assert expected.yseq.tolist() == actual.yseq.tolist()
numpy.testing.assert_allclose(
expected.score.cpu(), actual.score.cpu(), rtol=1e-6
)
| 5,863 | 30.026455 | 88 | py |
espnet | espnet-master/test/test_e2e_mt.py | # coding: utf-8
# Copyright 2019 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import importlib
import os
import tempfile
from test.utils_test import make_dummy_json_mt
import chainer
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.utils.training.batchfy import make_batchset
def make_arg(**kwargs):
defaults = dict(
elayers=1,
subsample="2_2",
etype="blstm",
eunits=16,
eprojs=16,
dtype="lstm",
dlayers=1,
dunits=16,
atype="add",
aheads=2,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=16,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=5,
beam_size=3,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.0, # dummy
ctc_window_margin=0, # dummy
verbose=2,
char_list=["あ", "い", "う", "え", "お"],
outdir=None,
report_bleu=False,
sym_space="<space>",
sym_blank="<blank>",
sortagrad=0,
context_residual=False,
tie_src_tgt_embedding=False,
tie_classifier=False,
multilingual=False,
replace_sos=False,
tgt_lang=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare_inputs(mode, ilens=[20, 10], olens=[4, 3], is_cuda=False):
np.random.seed(1)
assert len(ilens) == len(olens)
xs = [np.random.randint(0, 5, ilen).astype(np.int32) for ilen in ilens]
ys = [np.random.randint(0, 5, olen).astype(np.int32) for olen in olens]
ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
if mode == "chainer":
raise NotImplementedError
elif mode == "pytorch":
ilens = torch.from_numpy(ilens).long()
xs_pad = pad_list([torch.from_numpy(x).long() for x in xs], 0)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_pad = xs_pad.cuda()
ilens = ilens.cuda()
ys_pad = ys_pad.cuda()
return xs_pad, ilens, ys_pad
else:
raise ValueError("Invalid mode")
def convert_batch(batch, backend="pytorch", is_cuda=False, idim=5, odim=5):
ilens = np.array([x[1]["output"][1]["shape"][0] for x in batch])
olens = np.array([x[1]["output"][0]["shape"][0] for x in batch])
xs = [np.random.randint(0, idim, ilen).astype(np.int32) for ilen in ilens]
ys = [np.random.randint(0, odim, olen).astype(np.int32) for olen in olens]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0)
ilens = torch.from_numpy(ilens).long()
ys = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs = xs.cuda()
ilens = ilens.cuda()
ys = ys.cuda()
else:
raise NotImplementedError
return xs, ilens, ys
@pytest.mark.parametrize(
"module, model_dict",
[
("espnet.nets.pytorch_backend.e2e_mt", {}),
("espnet.nets.pytorch_backend.e2e_mt", {"atype": "noatt"}),
("espnet.nets.pytorch_backend.e2e_mt", {"atype": "dot"}),
("espnet.nets.pytorch_backend.e2e_mt", {"atype": "coverage"}),
("espnet.nets.pytorch_backend.e2e_mt", {"atype": "multi_head_dot"}),
("espnet.nets.pytorch_backend.e2e_mt", {"atype": "multi_head_add"}),
("espnet.nets.pytorch_backend.e2e_mt", {"etype": "grup"}),
("espnet.nets.pytorch_backend.e2e_mt", {"etype": "lstmp"}),
("espnet.nets.pytorch_backend.e2e_mt", {"etype": "bgrup"}),
("espnet.nets.pytorch_backend.e2e_mt", {"etype": "blstmp"}),
("espnet.nets.pytorch_backend.e2e_mt", {"etype": "bgru"}),
("espnet.nets.pytorch_backend.e2e_mt", {"etype": "blstm"}),
("espnet.nets.pytorch_backend.e2e_mt", {"context_residual": True}),
],
)
def test_model_trainable_and_decodable(module, model_dict):
args = make_arg(**model_dict)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
else:
raise NotImplementedError
m = importlib.import_module(module)
model = m.E2E(6, 5, args)
loss = model(*batch)
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randint(0, 5, (1, 10))
model.translate(in_data, args, args.char_list) # decodable
if "pytorch" in module:
batch_in_data = np.random.randint(0, 5, (2, 10))
model.translate_batch(
batch_in_data, args, args.char_list
) # batch decodable
@pytest.mark.parametrize("module", ["pytorch"])
def test_sortagrad_trainable(module):
args = make_arg(sortagrad=1)
dummy_json = make_dummy_json_mt(4, [10, 20], [10, 20], idim=6, odim=5)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_mt as m
else:
import espnet.nets.chainer_backend.e2e_mt as m
batchset = make_batchset(
dummy_json, 2, 2**10, 2**10, shortest_first=True, mt=True, iaxis=1, oaxis=0
)
model = m.E2E(6, 5, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=6, odim=5))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randint(0, 5, (1, 100))
model.translate(in_data, args, args.char_list)
@pytest.mark.parametrize("module", ["pytorch"])
def test_sortagrad_trainable_with_batch_bins(module):
args = make_arg(sortagrad=1)
idim = 6
odim = 5
dummy_json = make_dummy_json_mt(4, [10, 20], [10, 20], idim=idim, odim=odim)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_mt as m
else:
raise NotImplementedError
batch_elems = 2000
batchset = make_batchset(
dummy_json,
batch_bins=batch_elems,
shortest_first=True,
mt=True,
iaxis=1,
oaxis=0,
)
for batch in batchset:
n = 0
for uttid, info in batch:
ilen = int(info["output"][1]["shape"][0])
olen = int(info["output"][0]["shape"][0])
n += ilen * idim + olen * odim
assert olen < batch_elems
model = m.E2E(6, 5, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=6, odim=5))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randint(0, 5, (1, 100))
model.translate(in_data, args, args.char_list)
@pytest.mark.parametrize("module", ["pytorch"])
def test_sortagrad_trainable_with_batch_frames(module):
args = make_arg(sortagrad=1)
idim = 6
odim = 5
dummy_json = make_dummy_json_mt(4, [10, 20], [10, 20], idim=idim, odim=odim)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_mt as m
else:
raise NotImplementedError
batch_frames_in = 20
batch_frames_out = 20
batchset = make_batchset(
dummy_json,
batch_frames_in=batch_frames_in,
batch_frames_out=batch_frames_out,
shortest_first=True,
mt=True,
iaxis=1,
oaxis=0,
)
for batch in batchset:
i = 0
o = 0
for uttid, info in batch:
i += int(info["output"][1]["shape"][0])
o += int(info["output"][0]["shape"][0])
assert i <= batch_frames_in
assert o <= batch_frames_out
model = m.E2E(6, 5, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=6, odim=5))
loss.backward()
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randint(0, 5, (1, 100))
model.translate(in_data, args, args.char_list)
def init_torch_weight_const(m, val):
for p in m.parameters():
if p.dim() > 1:
p.data.fill_(val)
@pytest.mark.parametrize("etype", ["blstm"])
def test_loss(etype):
# ch = importlib.import_module('espnet.nets.chainer_backend.e2e_mt')
th = importlib.import_module("espnet.nets.pytorch_backend.e2e_mt")
args = make_arg(etype=etype)
th_model = th.E2E(6, 5, args)
const = 1e-4
init_torch_weight_const(th_model, const)
th_batch = prepare_inputs("pytorch")
th_model(*th_batch)
th_att = th_model.loss
th_model.zero_grad()
th_model(*th_batch)
th_att = th_model.loss
th_att.backward()
@pytest.mark.parametrize("etype", ["blstm"])
def test_zero_length_target(etype):
th = importlib.import_module("espnet.nets.pytorch_backend.e2e_mt")
args = make_arg(etype=etype)
th_model = th.E2E(6, 5, args)
th_batch = prepare_inputs("pytorch", olens=[4, 0])
th_model(*th_batch)
# NOTE: We ignore all zero length case because chainer also fails.
# Have a nice data-prep!
# out_data = ""
# data = [
# ("aaa",
# dict(feat=np.random.randint(0, 5, (1, 200)).astype(np.float32), tokenid="")),
# ("bbb",
# dict(feat=np.random.randint(0, 5, (1, 100)).astype(np.float32), tokenid="")),
# ("cc",
# dict(feat=np.random.randint(0, 5, (1, 100)).astype(np.float32), tokenid=""))
# ]
# th_ctc, th_att, th_acc = th_model(data)
@pytest.mark.parametrize(
"module, atype",
[
("espnet.nets.pytorch_backend.e2e_mt", "noatt"),
("espnet.nets.pytorch_backend.e2e_mt", "dot"),
("espnet.nets.pytorch_backend.e2e_mt", "add"),
("espnet.nets.pytorch_backend.e2e_mt", "coverage"),
("espnet.nets.pytorch_backend.e2e_mt", "multi_head_dot"),
("espnet.nets.pytorch_backend.e2e_mt", "multi_head_add"),
],
)
def test_calculate_all_attentions(module, atype):
m = importlib.import_module(module)
args = make_arg(atype=atype)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
else:
raise NotImplementedError
model = m.E2E(6, 5, args)
with chainer.no_backprop_mode():
if "pytorch" in module:
att_ws = model.calculate_all_attentions(*batch)[0]
else:
raise NotImplementedError
print(att_ws.shape)
def test_torch_save_and_load():
m = importlib.import_module("espnet.nets.pytorch_backend.e2e_mt")
utils = importlib.import_module("espnet.asr.asr_utils")
args = make_arg()
model = m.E2E(6, 5, args)
# initialize randomly
for p in model.parameters():
p.data.uniform_()
if not os.path.exists(".pytest_cache"):
os.makedirs(".pytest_cache")
tmppath = tempfile.mktemp()
utils.torch_save(tmppath, model)
p_saved = [p.data.numpy() for p in model.parameters()]
# set constant value
for p in model.parameters():
p.data.zero_()
utils.torch_load(tmppath, model)
for p1, p2 in zip(p_saved, model.parameters()):
np.testing.assert_array_equal(p1, p2.data.numpy())
if os.path.exists(tmppath):
os.remove(tmppath)
@pytest.mark.skipif(
not torch.cuda.is_available() and not chainer.cuda.available, reason="gpu required"
)
@pytest.mark.parametrize("module", ["espnet.nets.pytorch_backend.e2e_mt"])
def test_gpu_trainable(module):
m = importlib.import_module(module)
args = make_arg()
model = m.E2E(6, 5, args)
if "pytorch" in module:
batch = prepare_inputs("pytorch", is_cuda=True)
model.cuda()
else:
raise NotImplementedError
loss = model(*batch)
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize("module", ["espnet.nets.pytorch_backend.e2e_mt"])
def test_multi_gpu_trainable(module):
m = importlib.import_module(module)
ngpu = 2
device_ids = list(range(ngpu))
args = make_arg()
model = m.E2E(6, 5, args)
if "pytorch" in module:
model = torch.nn.DataParallel(model, device_ids)
batch = prepare_inputs("pytorch", is_cuda=True)
model.cuda()
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu)) # trainable
else:
raise NotImplementedError
| 12,854 | 31.298995 | 88 | py |
espnet | espnet-master/test/test_multi_spkrs.py | # coding: utf-8
# Copyright 2018 Hiroshi Seki
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import importlib
import re
import numpy
import pytest
import torch
def make_arg(**kwargs):
defaults = dict(
aconv_chans=2,
aconv_filts=20,
adim=20,
aheads=4,
apply_uttmvn=False,
atype="location",
awin=5,
badim=20,
batch_bins=0,
batch_count="auto",
batch_frames_in=0,
batch_frames_inout=0,
batch_frames_out=0,
batch_size=2,
bdropout_rate=0.0,
beam_size=3,
blayers=2,
bnmask=3,
bprojs=10,
btype="blstmp",
bunits=10,
char_list=["a", "i", "u", "e", "o"],
context_residual=False,
ctc_type="builtin",
ctc_weight=0.2,
dlayers=1,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
dtype="lstm",
dunits=10,
elayers_sd=1,
elayers=2,
etype="vggblstmp",
eprojs=10,
eunits=10,
fbank_fmax=None,
fbank_fmin=0.0,
fbank_fs=16000,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
nbest=5,
maxlenratio=1.0,
minlenratio=0.0,
n_mels=80,
num_spkrs=1,
outdir=None,
penalty=0.5,
ref_channel=0,
replace_sos=False,
report_cer=False,
report_wer=False,
sortagrad=0,
spa=False,
stats_file=None,
subsample="1_2_2_1_1",
sym_blank="<blank>",
sym_space="<space>",
tgt_lang=False,
use_beamformer=False,
use_dnn_mask_for_wpe=False,
use_frontend=False,
use_wpe=False,
uttmvn_norm_means=False,
uttmvn_norm_vars=False,
verbose=2,
wdropout_rate=0.0,
weight_decay=0.0,
wlayers=2,
wpe_delay=3,
wpe_taps=5,
wprojs=10,
wtype="blstmp",
wunits=10,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def init_torch_weight_const(m, val):
for p in m.parameters():
p.data.fill_(val)
def init_chainer_weight_const(m, val):
for p in m.params():
p.data[:] = val
@pytest.mark.parametrize(
("etype", "dtype", "num_spkrs", "spa", "m_str", "text_idx1"),
[
("vggblstmp", "lstm", 2, True, "espnet.nets.pytorch_backend.e2e_asr_mix", 0),
("vggbgrup", "gru", 2, True, "espnet.nets.pytorch_backend.e2e_asr_mix", 1),
],
)
def test_recognition_results_multi_outputs(
etype, dtype, num_spkrs, spa, m_str, text_idx1
):
const = 1e-4
numpy.random.seed(1)
# ctc_weight: 0.5 (hybrid CTC/attention), cannot be 0.0 (attention) or 1.0 (CTC)
for text_idx2, ctc_weight in enumerate([0.5]):
args = make_arg(
etype=etype, ctc_weight=ctc_weight, num_spkrs=num_spkrs, spa=spa
)
m = importlib.import_module(m_str)
model = m.E2E(40, 5, args)
if "pytorch" in m_str:
init_torch_weight_const(model, const)
else:
init_chainer_weight_const(model, const)
data = [
(
"aaa",
dict(
feat=numpy.random.randn(100, 40).astype(numpy.float32),
token=["", ""],
),
)
]
in_data = data[0][1]["feat"]
nbest_hyps = model.recognize(in_data, args, args.char_list)
for i in range(num_spkrs):
y_hat = nbest_hyps[i][0]["yseq"][1:]
seq_hat = [args.char_list[int(idx)] for idx in y_hat]
seq_hat_text = "".join(seq_hat).replace("<space>", " ")
assert re.match(r"[aiueo]+", seq_hat_text)
@pytest.mark.parametrize(
("etype", "dtype", "num_spkrs", "m_str", "data_idx"),
[("vggblstmp", "lstm", 2, "espnet.nets.pytorch_backend.e2e_asr_mix", 0)],
)
def test_pit_process(etype, dtype, num_spkrs, m_str, data_idx):
bs = 10
m = importlib.import_module(m_str)
losses_2 = torch.ones([bs, 4], dtype=torch.float32)
for i in range(bs):
losses_2[i][i % 4] = 0
true_losses_2 = torch.ones(bs, dtype=torch.float32) / 2
perm_choices_2 = [[0, 1], [1, 0], [1, 0], [0, 1]]
true_perm_2 = []
for i in range(bs):
true_perm_2.append(perm_choices_2[i % 4])
true_perm_2 = torch.tensor(true_perm_2).long()
losses = [losses_2]
true_losses = [torch.mean(true_losses_2)]
true_perm = [true_perm_2]
args = make_arg(etype=etype, num_spkrs=num_spkrs)
model = m.E2E(40, 5, args)
min_loss, min_perm = model.pit.pit_process(losses[data_idx])
assert min_loss == true_losses[data_idx]
assert torch.equal(min_perm, true_perm[data_idx])
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize(
("use_frontend", "use_beamformer", "bnmask", "num_spkrs", "m_str"),
[(True, True, 3, 2, "espnet.nets.pytorch_backend.e2e_asr_mix")],
)
def test_dnn_beamformer(use_frontend, use_beamformer, bnmask, num_spkrs, m_str):
bs = 4
m = importlib.import_module(m_str)
const = 1e-4
numpy.random.seed(1)
args = make_arg(
use_frontend=use_frontend,
use_beamformer=use_beamformer,
bnmask=bnmask,
num_spkrs=num_spkrs,
)
model = m.E2E(257, 5, args)
beamformer = model.frontend.beamformer
mask_estimator = beamformer.mask
if "pytorch" in m_str:
init_torch_weight_const(model, const)
else:
init_chainer_weight_const(model, const)
# STFT feature
feat_real = torch.from_numpy(numpy.random.uniform(size=(bs, 100, 2, 257))).float()
feat_imag = torch.from_numpy(numpy.random.uniform(size=(bs, 100, 2, 257))).float()
feat = m.to_torch_tensor({"real": feat_real, "imag": feat_imag})
ilens = torch.tensor([100] * bs).long()
# dnn_beamformer
enhanced, ilens, mask_speeches = beamformer(feat, ilens)
assert (bnmask - 1) == len(mask_speeches)
assert (bnmask - 1) == len(enhanced)
# beamforming by hand
feat = feat.permute(0, 3, 2, 1)
masks, _ = mask_estimator(feat, ilens)
mask_speech1, mask_speech2, mask_noise = masks
b = importlib.import_module("espnet.nets.pytorch_backend.frontends.beamformer")
psd_speech1 = b.get_power_spectral_density_matrix(feat, mask_speech1)
psd_speech2 = b.get_power_spectral_density_matrix(feat, mask_speech2)
psd_noise = b.get_power_spectral_density_matrix(feat, mask_noise)
u1 = torch.zeros(*(feat.size()[:-3] + (feat.size(-2),)), device=feat.device)
u1[..., args.ref_channel].fill_(1)
u2 = torch.zeros(*(feat.size()[:-3] + (feat.size(-2),)), device=feat.device)
u2[..., args.ref_channel].fill_(1)
ws1 = b.get_mvdr_vector(psd_speech1, psd_speech2 + psd_noise, u1)
ws2 = b.get_mvdr_vector(psd_speech2, psd_speech1 + psd_noise, u2)
enhanced1 = b.apply_beamforming_vector(ws1, feat).transpose(-1, -2)
enhanced2 = b.apply_beamforming_vector(ws2, feat).transpose(-1, -2)
assert torch.equal(enhanced1.real, enhanced[0].real)
assert torch.equal(enhanced2.real, enhanced[1].real)
assert torch.equal(enhanced1.imag, enhanced[0].imag)
assert torch.equal(enhanced2.imag, enhanced[1].imag)
| 7,289 | 28.51417 | 86 | py |
espnet | espnet-master/test/test_positional_encoding.py | import pytest
import torch
from espnet.nets.pytorch_backend.transformer.embedding import (
LearnableFourierPosEnc,
PositionalEncoding,
ScaledPositionalEncoding,
)
@pytest.mark.parametrize(
"dtype, device",
[(dt, dv) for dt in ("float32", "float64") for dv in ("cpu", "cuda")],
)
def test_pe_extendable(dtype, device):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
dtype = getattr(torch, dtype)
dim = 2
pe = PositionalEncoding(dim, 0.0, 3).to(dtype=dtype, device=device)
x = torch.rand(2, 3, dim, dtype=dtype, device=device)
y = pe(x)
init_cache = pe.pe
# test not extended from init
x = torch.rand(2, 3, dim, dtype=dtype, device=device)
y = pe(x)
assert pe.pe is init_cache
x = torch.rand(2, 5, dim, dtype=dtype, device=device)
y = pe(x)
sd = pe.state_dict()
assert len(sd) == 0, "PositionalEncoding should save nothing"
pe2 = PositionalEncoding(dim, 0.0, 3).to(dtype=dtype, device=device)
pe2.load_state_dict(sd)
y2 = pe2(x)
assert torch.allclose(y, y2)
@pytest.mark.parametrize(
"dtype, device, apply_scaling, hidden_dim",
[
(dt, dv, scal, hd)
for dt in ("float32", "float64")
for dv in ("cpu", "cuda")
for scal in [True, False]
for hd in [None, 12]
],
)
def test_learnedFourierPe_extendable(dtype, device, apply_scaling, hidden_dim):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
dtype = getattr(torch, dtype)
dim = 2
pe = LearnableFourierPosEnc(
dim, apply_scaling=apply_scaling, hidden_dim=hidden_dim
).to(dtype=dtype, device=device)
x = torch.rand(2, 3, dim, dtype=dtype, device=device)
pe(x)
x = torch.rand(2, 5, dim, dtype=dtype, device=device)
pe(x)
@pytest.mark.parametrize(
"dtype, device",
[(dt, dv) for dt in ("float32", "float64") for dv in ("cpu", "cuda")],
)
def test_scaled_pe_extendable(dtype, device):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
dtype = getattr(torch, dtype)
dim = 2
pe = ScaledPositionalEncoding(dim, 0.0, 3).to(dtype=dtype, device=device)
x = torch.rand(2, 3, dim, dtype=dtype, device=device)
y = pe(x)
init_cache = pe.pe
# test not extended from init
x = torch.rand(2, 3, dim, dtype=dtype, device=device)
y = pe(x)
assert pe.pe is init_cache
x = torch.rand(2, 5, dim, dtype=dtype, device=device)
y = pe(x)
sd = pe.state_dict()
assert sd == {"alpha": pe.alpha}, "ScaledPositionalEncoding should save only alpha"
pe2 = ScaledPositionalEncoding(dim, 0.0, 3).to(dtype=dtype, device=device)
pe2.load_state_dict(sd)
y2 = pe2(x)
assert torch.allclose(y, y2)
class LegacyPositionalEncoding(torch.nn.Module):
"""Positional encoding module until v.0.5.2."""
def __init__(self, d_model, dropout_rate, max_len=5000):
import math
super().__init__()
self.dropout = torch.nn.Dropout(p=dropout_rate)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.max_len = max_len
self.xscale = math.sqrt(d_model)
self.register_buffer("pe", pe)
def forward(self, x):
x = x * self.xscale + self.pe[:, : x.size(1)]
return self.dropout(x)
class LegacyScaledPositionalEncoding(LegacyPositionalEncoding):
"""Positional encoding module until v.0.5.2."""
def __init__(self, d_model, dropout_rate, max_len=5000):
super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
self.alpha = torch.nn.Parameter(torch.tensor(1.0))
def forward(self, x):
x = x + self.alpha * self.pe[:, : x.size(1)]
return self.dropout(x)
def test_compatibility():
"""Regression test for #1121"""
x = torch.rand(2, 3, 4)
legacy_net = torch.nn.Sequential(
LegacyPositionalEncoding(4, 0.0), torch.nn.Linear(4, 2)
)
latest_net = torch.nn.Sequential(PositionalEncoding(4, 0.0), torch.nn.Linear(4, 2))
latest_net.load_state_dict(legacy_net.state_dict())
legacy = legacy_net(x)
latest = latest_net(x)
assert torch.allclose(legacy, latest)
legacy_net = torch.nn.Sequential(
LegacyScaledPositionalEncoding(4, 0.0), torch.nn.Linear(4, 2)
)
latest_net = torch.nn.Sequential(
ScaledPositionalEncoding(4, 0.0), torch.nn.Linear(4, 2)
)
latest_net.load_state_dict(legacy_net.state_dict())
legacy = legacy_net(x)
latest = latest_net(x)
assert torch.allclose(legacy, latest)
| 5,074 | 30.32716 | 87 | py |
espnet | espnet-master/test/test_asr_init.py | # coding: utf-8
import argparse
import json
import os
import tempfile
import numpy as np
import pytest
import torch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.asr_utils import torch_save
from espnet.asr.pytorch_backend.asr_init import freeze_modules, load_trained_modules
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.nets_utils import pad_list
def get_rnn_args(**kwargs):
train_defaults = dict(
elayers=1,
subsample="1_2_2_1_1",
etype="vggblstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="location",
aheads=1,
awin=2,
aconv_chans=1,
aconv_filts=2,
mtlalpha=1.0,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=2,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=1,
beam_size=1,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
lm_weight=0.0,
rnnlm=None,
verbose=2,
char_list=["a", "e", "i", "o", "u"],
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
replace_sos=False,
tgt_lang=False,
enc_init=None,
enc_init_mods="enc.",
dec_init=None,
dec_init_mods="dec.",
freeze_mods=None,
model_module="espnet.nets.pytorch_backend.e2e_asr:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_rnnt_args(**kwargs):
train_defaults = dict(
etype="vggblstm",
elayers=1,
subsample="1_2_2_1_1",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=4,
dec_embed_dim=4,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
dropout_rate_embed_decoder=0.0,
joint_dim=2,
joint_activation_type="tanh",
aux_task_type=None,
rnnt_mode="rnnt",
trans_type="warp-transducer",
char_list=["a", "b", "c", "d"],
sym_space="<space>",
sym_blank="<blank>",
report_cer=False,
report_wer=False,
beam_size=1,
nbest=1,
verbose=0,
outdir=None,
rnnlm=None,
enc_init=None,
enc_init_mods="enc.",
dec_init=None,
dec_init_mods="dec.",
freeze_mods=None,
model_module="espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_default_scope_inputs():
idim = 10
odim = 5
ilens = [10, 6]
olens = [4, 3]
return idim, odim, ilens, olens
def get_lm(n_layers, n_units, char_list):
char_list = ["<blank>"] + char_list + ["<eos>"]
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(char_list), n_layers, n_units, typ="lstm")
)
return rnnlm
def pytorch_prepare_inputs(idim, odim, ilens, olens, is_cuda=False):
np.random.seed(1)
xs = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
ys = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
ilens = torch.from_numpy(ilens).long()
if is_cuda:
xs_pad = xs_pad.cuda()
ys_pad = ys_pad.cuda()
ilens = ilens.cuda()
return xs_pad, ilens, ys_pad
@pytest.mark.parametrize(
"main_model_type, pt_model_type, finetune_dic",
[
(
"rnn",
"rnn",
{
"enc_init": None,
"dec_init": True,
"dec_init_mods": ["dec.", "att."],
"mtlalpha": 0.5,
"use_lm": None,
},
),
(
"rnnt",
"rnn",
{
"enc_init": True,
"enc_init_mods": ["enc."],
"dec_init": None,
"mtlalpha": 1.0,
"use_lm": None,
},
),
(
"rnnt",
"lm",
{
"enc_init": None,
"dec_init": True,
"dec_init_mods": ["dec.decoder."],
"use_lm": True,
},
),
],
)
def test_pytorch_trainable_and_transferable(
main_model_type, pt_model_type, finetune_dic
):
idim, odim, ilens, olens = get_default_scope_inputs()
batch = pytorch_prepare_inputs(idim, odim, ilens, olens)
if pt_model_type == "lm":
pt_args = get_rnnt_args() if main_model_type == "rnnt" else get_rnn_args()
pt_model = get_lm(pt_args.dlayers, pt_args.dunits, pt_args.char_list)
prefix_tmppath = "_rnnlm"
else:
if pt_model_type == "rnn":
from espnet.nets.pytorch_backend.e2e_asr import E2E
pt_args = get_rnn_args()
else:
from espnet.nets.pytorch_backend.e2e_asr_transducer import E2E
pt_args = get_rnnt_args()
pt_model = E2E(idim, odim, pt_args)
prefix_tmppath = ""
loss = pt_model(*batch)
loss.backward()
if not os.path.exists(".pytest_cache"):
os.makedirs(".pytest_cache")
tmppath = tempfile.mktemp() + prefix_tmppath
torch_save(tmppath, pt_model)
# create dummy model.json for saved model to go through
# get_model_conf(...) called in load_trained_modules method.
model_conf = os.path.dirname(tmppath) + "/model.json"
with open(model_conf, "wb") as f:
f.write(
json.dumps(
(idim, odim, vars(pt_args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
if finetune_dic["enc_init"] is not None:
finetune_dic["enc_init"] = tmppath
if finetune_dic["dec_init"] is not None:
finetune_dic["dec_init"] = tmppath
if main_model_type == "rnn":
main_args = get_rnn_args(**finetune_dic)
else:
main_args = get_rnnt_args(**finetune_dic)
main_model = load_trained_modules(idim, odim, main_args)
loss = main_model(*batch)
loss.backward()
if main_model_type == "rnnt":
beam_search = BeamSearchTransducer(
decoder=main_model.dec,
joint_network=main_model.transducer_tasks.joint_network,
beam_size=1,
lm=None,
lm_weight=0.0,
search_type="default",
max_sym_exp=2,
u_max=10,
nstep=1,
prefix_alpha=1,
score_norm=False,
)
with torch.no_grad():
in_data = np.random.randn(10, idim)
main_model.recognize(in_data, beam_search)
else:
with torch.no_grad():
in_data = np.random.randn(10, idim)
main_model.recognize(in_data, main_args, main_args.char_list)
# todo (b-flo): add test for frozen layers
def test_pytorch_freezable():
from espnet.nets.pytorch_backend.e2e_asr import E2E
idim, odim, ilens, olens = get_default_scope_inputs()
args = get_rnn_args(freeze_mods="enc.enc.0.")
model = E2E(idim, odim, args)
model, model_params = freeze_modules(model, args.freeze_mods)
model.train()
| 7,583 | 25.989324 | 84 | py |
espnet | espnet-master/test/test_e2e_mt_transformer.py | # coding: utf-8
# Copyright 2019 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_mt_transformer import E2E
from espnet.nets.pytorch_backend.transformer import plot
def make_arg(**kwargs):
defaults = dict(
adim=2,
aheads=1,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=2,
eunits=2,
dlayers=2,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_decoder_selfattn_layer_type="selfattn",
transformer_encoder_selfattn_layer_type="selfattn",
transformer_init="pytorch",
transformer_input_layer="linear",
transformer_length_normalized_loss=True,
report_bleu=False,
lsm_weight=0.001,
char_list=["<blank>", "a", "e", "i", "o", "u"],
tie_src_tgt_embedding=False,
tie_classifier=False,
multilingual=False,
replace_sos=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare(args):
idim = 5
odim = 5
model = E2E(idim, odim, args)
batchsize = 2
n_token = odim - 1
y_src = (torch.randn(batchsize, 4) * n_token % n_token).long() + 1
y_tgt = (torch.randn(batchsize, 4) * n_token % n_token).long() + 1
# NOTE: + 1 to avoid to assign idx:0
ilens = [3, 4]
olens = [4, 3]
for i in range(batchsize):
y_src[i, ilens[i] :] = model.pad
y_tgt[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i]]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, y_src, torch.tensor(ilens), y_tgt, data, uttid_list
ldconv_lconv_args = dict(
transformer_decoder_selfattn_layer_type="lightconv",
transformer_encoder_selfattn_layer_type="lightconv",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_dconv_args = dict(
transformer_decoder_selfattn_layer_type="dynamicconv",
transformer_encoder_selfattn_layer_type="dynamicconv",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
def _savefn(*args, **kwargs):
return
@pytest.mark.parametrize(
"model_dict",
[
{},
ldconv_lconv_args,
ldconv_dconv_args,
{"report_bleu": True},
{"tie_src_tgt_embedding": True},
{"tie_classifier": True},
{"tie_src_tgt_embedding": True, "tie_classifier": True},
],
)
def test_transformer_trainable_and_decodable(model_dict):
args = make_arg(**model_dict)
model, y_src, ilens, y_tgt, data, uttid_list = prepare(args)
# test beam search
trans_args = argparse.Namespace(
beam_size=1,
penalty=0.0,
ctc_weight=0.0,
maxlenratio=1.0,
lm_weight=0,
minlenratio=0,
nbest=1,
tgt_lang=False,
)
# test trainable
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(y_src, ilens, y_tgt)
optim.zero_grad()
loss.backward()
optim.step()
# test attention plot
attn_dict = model.calculate_all_attentions(y_src[0:1], ilens[0:1], y_tgt[0:1])
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test decodable
with torch.no_grad():
nbest = model.translate(
[y_src[0, : ilens[0]].numpy()], trans_args, args.char_list
)
print(y_tgt[0])
print(nbest[0]["yseq"][1:-1])
| 3,754 | 26.014388 | 83 | py |
espnet | espnet-master/test/test_e2e_asr_transformer.py | import argparse
import chainer
import numpy
import pytest
import torch
import espnet.nets.chainer_backend.e2e_asr_transformer as ch
import espnet.nets.pytorch_backend.e2e_asr_transformer as th
from espnet.nets.pytorch_backend.nets_utils import rename_state_dict
from espnet.nets.pytorch_backend.transformer import plot
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask, target_mask
def test_sequential():
class Masked(torch.nn.Module):
def forward(self, x, m):
return x, m
from espnet.nets.pytorch_backend.transformer.repeat import MultiSequential
f = MultiSequential(Masked(), Masked())
x = torch.randn(2, 3)
m = torch.randn(2, 3) > 0
assert len(f(x, m)) == 2
if torch.cuda.is_available():
f = torch.nn.DataParallel(f)
f.cuda()
assert len(f(x.cuda(), m.cuda())) == 2
def ref_subsequent_mask(size):
# http://nlp.seas.harvard.edu/2018/04/03/attention.html
"Mask out subsequent positions."
attn_shape = (1, size, size)
mask = numpy.triu(numpy.ones(attn_shape), k=1).astype("uint8")
return torch.from_numpy(mask) == 0
def test_mask():
m = subsequent_mask(3)
assert (m.unsqueeze(0) == ref_subsequent_mask(3)).all()
def make_arg(**kwargs):
defaults = dict(
adim=2,
aheads=1,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_decoder_selfattn_layer_type="selfattn",
transformer_encoder_selfattn_layer_type="selfattn",
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=True,
report_cer=False,
report_wer=False,
mtlalpha=0.0,
lsm_weight=0.001,
char_list=["<blank>", "a", "e", "i", "o", "u"],
ctc_type="builtin",
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare(backend, args):
idim = 10
odim = 3
batchsize = 2
ilens = [30, 20]
olens = [5, 4]
n_token = odim - 1
if backend == "pytorch":
model = th.E2E(idim, odim, args)
x = torch.randn(batchsize, max(ilens), idim)
y = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
else:
model = ch.E2E(idim, odim, args)
x = numpy.random.randn(batchsize, max(ilens), idim).astype(numpy.float32)
y = numpy.random.rand(batchsize, max(olens)) * n_token % n_token
y = y.astype(numpy.int32)
for i in range(batchsize):
x[i, ilens[i] :] = -1
y[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
if backend == "pytorch":
return model, x, torch.tensor(ilens), y, data, uttid_list
else:
return model, x, ilens, y, data, uttid_list
def test_transformer_mask():
args = make_arg()
model, x, ilens, y, data, uttid_list = prepare("pytorch", args)
yi, yo = add_sos_eos(y, model.sos, model.eos, model.ignore_id)
y_mask = target_mask(yi, model.ignore_id)
y = model.decoder.embed(yi)
y[0, 3:] = float("nan")
a = model.decoder.decoders[0].self_attn
a(y, y, y, y_mask)
assert not numpy.isnan(a.attn[0, :, :3, :3].detach().numpy()).any()
ldconv_lconv_args = dict(
transformer_decoder_selfattn_layer_type="lightconv",
transformer_encoder_selfattn_layer_type="lightconv",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_dconv_args = dict(
transformer_decoder_selfattn_layer_type="dynamicconv",
transformer_encoder_selfattn_layer_type="dynamicconv",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_lconv2d_args = dict(
transformer_decoder_selfattn_layer_type="lightconv2d",
transformer_encoder_selfattn_layer_type="lightconv2d",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_dconv2d_args = dict(
transformer_decoder_selfattn_layer_type="dynamicconv2d",
transformer_encoder_selfattn_layer_type="dynamicconv2d",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
interctc_args = dict(
mtlalpha=1.0,
elayers=2,
intermediate_ctc_weight=0.3,
intermediate_ctc_layer="1",
stochastic_depth_rate=0.3,
)
selfconditionedctc_args = dict(
mtlalpha=1.0,
elayers=2,
intermediate_ctc_weight=0.3,
intermediate_ctc_layer="1",
stochastic_depth_rate=0.0,
self_conditioning=True,
)
def _savefn(*args, **kwargs):
return
@pytest.mark.parametrize(
"module, model_dict",
[
("pytorch", {}),
("pytorch", ldconv_lconv_args),
("pytorch", ldconv_dconv_args),
("pytorch", ldconv_lconv2d_args),
("pytorch", ldconv_dconv2d_args),
("pytorch", {"report_cer": True}),
("pytorch", {"report_wer": True}),
("pytorch", {"report_cer": True, "report_wer": True}),
("pytorch", {"report_cer": True, "report_wer": True, "mtlalpha": 0.0}),
("pytorch", {"report_cer": True, "report_wer": True, "mtlalpha": 1.0}),
("pytorch", interctc_args),
("pytorch", selfconditionedctc_args),
("chainer", {}),
],
)
def test_transformer_trainable_and_decodable(module, model_dict):
args = make_arg(**model_dict)
model, x, ilens, y, data, uttid_list = prepare(module, args)
# check for pure CTC and pure Attention
if args.mtlalpha == 1:
assert model.decoder is None
elif args.mtlalpha == 0:
assert model.ctc is None
# test beam search
recog_args = argparse.Namespace(
beam_size=1,
penalty=0.0,
ctc_weight=0.0,
maxlenratio=1.0,
lm_weight=0,
minlenratio=0,
nbest=1,
)
if module == "pytorch":
# test trainable
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(x, ilens, y)
optim.zero_grad()
loss.backward()
optim.step()
# test attention plot
attn_dict = model.calculate_all_attentions(x[0:1], ilens[0:1], y[0:1])
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test CTC plot
ctc_probs = model.calculate_all_ctc_probs(x[0:1], ilens[0:1], y[0:1])
if args.mtlalpha > 0:
print(ctc_probs.shape)
else:
assert ctc_probs is None
# test decodable
with torch.no_grad():
nbest = model.recognize(x[0, : ilens[0]].numpy(), recog_args)
print(y[0])
print(nbest[0]["yseq"][1:-1])
else:
# test trainable
optim = chainer.optimizers.Adam(0.01)
optim.setup(model)
loss, loss_ctc, loss_att, acc = model(x, ilens, y)
model.cleargrads()
loss.backward()
optim.update()
# test attention plot
attn_dict = model.calculate_all_attentions(x[0:1], ilens[0:1], y[0:1])
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test decodable
with chainer.no_backprop_mode():
nbest = model.recognize(x[0, : ilens[0]], recog_args)
print(y[0])
print(nbest[0]["yseq"][1:-1])
# https://github.com/espnet/espnet/issues/1750
def test_v0_3_transformer_input_compatibility():
args = make_arg()
model, x, ilens, y, data, uttid_list = prepare("pytorch", args)
# these old names are used in v.0.3.x
state_dict = model.state_dict()
prefix = "encoder."
rename_state_dict(prefix + "embed.", prefix + "input_layer.", state_dict)
rename_state_dict(prefix + "after_norm.", prefix + "norm.", state_dict)
prefix = "decoder."
rename_state_dict(prefix + "after_norm.", prefix + "output_norm.", state_dict)
model.load_state_dict(state_dict)
| 8,385 | 29.717949 | 87 | py |
espnet | espnet-master/test/test_beam_search_timesync.py | from argparse import Namespace
import pytest
import torch
from espnet.nets.asr_interface import dynamic_import_asr
from espnet.nets.beam_search_timesync import BeamSearchTimeSync
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.nets.scorers.length_bonus import LengthBonus
rnn_args = Namespace(
elayers=1,
subsample=None,
etype="vgglstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="dot",
aheads=2,
awin=2,
aconv_chans=2,
aconv_filts=2,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=2,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=3,
beam_size=2,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
lm_weight=0.0,
rnnlm=None,
streaming_min_blank_dur=10,
streaming_onset_margin=2,
streaming_offset_margin=2,
verbose=2,
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
sortagrad=0,
grad_noise=False,
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False,
)
transformer_args = Namespace(
adim=4,
aheads=2,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=True,
report_cer=False,
report_wer=False,
ctc_type="builtin",
lsm_weight=0.001,
)
ldconv_args = Namespace(
**vars(transformer_args),
transformer_decoder_selfattn_layer_type="lightconv",
transformer_encoder_selfattn_layer_type="lightconv",
wshare=2,
ldconv_encoder_kernel_length="31_31",
ldconv_decoder_kernel_length="11_11",
ldconv_usebias=False,
)
# from test.test_e2e_asr_transformer import prepare
def prepare(E2E, args, mtlalpha=0.0):
args.mtlalpha = mtlalpha
args.char_list = ["a", "e", "i", "o", "u"]
idim = 8
odim = len(args.char_list)
model = dynamic_import_asr(E2E, "pytorch")(idim, odim, args)
batchsize = 1
x = torch.randn(batchsize, 20, idim)
ilens = [20, 15]
n_token = odim - 1
# avoid 0 for eps in ctc
y = (torch.rand(batchsize, 10) * n_token % (n_token - 1)).long() + 1
olens = [10, 2]
for i in range(batchsize):
x[i, ilens[i] :] = -1
y[i, olens[i] :] = -1
data = []
for i in range(batchsize):
data.append(
(
"utt%d" % i,
{
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
},
)
)
return model, x, torch.tensor(ilens), y, data, args
@pytest.mark.parametrize(
"model_class, args, mtlalpha, ctc_weight, lm_weight, bonus, device, dtype",
[
(nn, args, ctc_train, ctc_recog, lm, bonus, device, dtype)
for device in ("cpu", "cuda")
for nn, args in (
("transformer", transformer_args),
("transformer", ldconv_args),
("rnn", rnn_args),
)
for ctc_train in (0.0, 0.5, 1.0)
for ctc_recog in (0.0, 0.5, 1.0)
for lm in (0.5,)
for bonus in (0.1,)
for dtype in ("float16", "float32", "float64")
],
)
def test_beam_search_equal(
model_class, args, mtlalpha, ctc_weight, lm_weight, bonus, device, dtype
):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
if device == "cpu" and dtype == "float16":
pytest.skip("cpu float16 implementation is not available in pytorch yet")
if mtlalpha == 0.0 or ctc_weight == 0:
pytest.skip("no CTC.")
if mtlalpha == 1.0 and ctc_weight < 1.0:
pytest.skip("pure CTC + attention decoding")
# seed setting
torch.manual_seed(123)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = (
False # https://github.com/pytorch/pytorch/issues/6351
)
dtype = getattr(torch, dtype)
model, x, ilens, y, data, train_args = prepare(model_class, args, mtlalpha=mtlalpha)
model.eval()
char_list = train_args.char_list
lm_args = Namespace(type="lstm", layer=1, unit=2, embed_unit=2, dropout_rate=0.0)
lm = dynamic_import_lm("default", backend="pytorch")(len(char_list), lm_args)
lm.eval()
# test previous beam search
args = Namespace(
beam_size=3,
penalty=bonus,
ctc_weight=ctc_weight,
maxlenratio=0,
lm_weight=lm_weight,
minlenratio=0,
nbest=3,
)
feat = x[0, : ilens[0]].numpy()
# new beam search
scorers = model.scorers()
scorers["ctc"] = model.ctc
if lm_weight != 0:
scorers["lm"] = lm
scorers["length_bonus"] = LengthBonus(len(char_list))
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=args.lm_weight,
length_bonus=args.penalty,
)
model.to(device, dtype=dtype)
model.eval()
beam = BeamSearchTimeSync(
beam_size=args.beam_size,
weights=weights,
scorers=scorers,
sos=model.sos,
token_list=train_args.char_list,
)
beam.to(device, dtype=dtype)
beam.eval()
with torch.no_grad():
enc = model.encode(torch.as_tensor(feat).to(device, dtype=dtype))
beam(x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio)
# just checking it is decodable
return
| 5,619 | 26.149758 | 88 | py |
espnet | espnet-master/test/test_e2e_tts_transformer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from argparse import Namespace
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_tts_transformer import Transformer, subsequent_mask
from espnet.nets.pytorch_backend.nets_utils import pad_list
def make_transformer_args(**kwargs):
defaults = dict(
embed_dim=32,
spk_embed_dim=None,
eprenet_conv_layers=2,
eprenet_conv_filts=5,
eprenet_conv_chans=32,
dprenet_layers=2,
dprenet_units=32,
adim=32,
aheads=4,
elayers=2,
eunits=32,
dlayers=2,
dunits=32,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
eprenet_dropout_rate=0.1,
dprenet_dropout_rate=0.5,
postnet_dropout_rate=0.1,
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
spk_embed_integration_type="add",
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_batch_norm=True,
use_scaled_pos_enc=True,
encoder_normalize_before=True,
decoder_normalize_before=True,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
reduction_factor=1,
loss_type="L1",
use_guided_attn_loss=False,
num_heads_applied_guided_attn=2,
num_layers_applied_guided_attn=2,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
modules_applied_guided_attn=["encoder", "decoder", "encoder-decoder"],
)
defaults.update(kwargs)
return defaults
def make_inference_args(**kwargs):
defaults = dict(threshold=0.5, maxlenratio=5.0, minlenratio=0.0)
defaults.update(kwargs)
return defaults
def prepare_inputs(
idim, odim, ilens, olens, spk_embed_dim=None, device=torch.device("cpu")
):
xs = [np.random.randint(0, idim, lg) for lg in ilens]
ys = [np.random.randn(lg, odim) for lg in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1 :] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
batch["spembs"] = torch.FloatTensor(
np.random.randn(len(ilens), spk_embed_dim)
).to(device)
return batch
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"use_masking": False}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"reduction_factor": 2}),
({"reduction_factor": 3}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"loss_type": "L1"}),
({"loss_type": "L2"}),
({"loss_type": "L1+L2"}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
({"use_guided_attn_loss": True}),
({"use_guided_attn_loss": True, "reduction_factor": 3}),
(
{
"use_guided_attn_loss": True,
"modules_applied_guided_attn": ["encoder-decoder"],
}
),
(
{
"use_guided_attn_loss": True,
"modules_applied_guided_attn": ["encoder", "decoder"],
}
),
({"use_guided_attn_loss": True, "num_heads_applied_guided_attn": -1}),
({"use_guided_attn_loss": True, "num_layers_applied_guided_attn": -1}),
(
{
"use_guided_attn_loss": True,
"modules_applied_guided_attn": ["encoder"],
"elayers": 2,
"dlayers": 3,
}
),
],
)
def test_transformer_trainable_and_decodable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
inference_args = make_inference_args()
# setup batch
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"])
# define model
model = Transformer(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.use_scaled_pos_enc:
assert model.encoder.embed[1].alpha.grad is not None
assert model.decoder.embed[1].alpha.grad is not None
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]],
Namespace(**inference_args),
spemb=spemb,
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
],
)
def test_transformer_gpu_trainable_and_decodable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
inference_args = make_inference_args()
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
device = torch.device("cuda")
batch = prepare_inputs(
idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device
)
# define model
model = Transformer(idim, odim, Namespace(**model_args))
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.use_scaled_pos_enc:
assert model.encoder.embed[1].alpha.grad is not None
assert model.decoder.embed[1].alpha.grad is not None
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]],
Namespace(**inference_args),
spemb=spemb,
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
],
)
def test_transformer_multi_gpu_trainable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
# setup batch
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
device = torch.device("cuda")
batch = prepare_inputs(
idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device
)
# define model
ngpu = 2
device_ids = list(range(ngpu))
model = Transformer(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.module.use_scaled_pos_enc:
assert model.module.encoder.embed[1].alpha.grad is not None
assert model.module.decoder.embed[1].alpha.grad is not None
@pytest.mark.parametrize("model_dict", [({})])
def test_attention_masking(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
# setup batch
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens)
# define model
model = Transformer(idim, odim, Namespace(**model_args))
# test encoder self-attention
xs = model.encoder.embed(batch["xs"])
xs[1, ilens[1] :] = float("nan")
x_masks = model._source_mask(batch["ilens"])
a = model.encoder.encoders[0].self_attn
a(xs, xs, xs, x_masks)
aws = a.attn.detach().numpy()
for aw, ilen in zip(aws, batch["ilens"]):
assert not np.isnan(aw[:, :ilen, :ilen]).any()
np.testing.assert_almost_equal(
aw[:, :ilen, :ilen].sum(), float(aw.shape[0] * ilen), decimal=4
)
assert aw[:, ilen:, ilen:].sum() == 0.0
# test encoder-decoder attention
ys = model.decoder.embed(batch["ys"])
ys[1, olens[1] :] = float("nan")
xy_masks = x_masks
a = model.decoder.decoders[0].src_attn
a(ys, xs, xs, xy_masks)
aws = a.attn.detach().numpy()
for aw, ilen, olen in zip(aws, batch["ilens"], batch["olens"]):
assert not np.isnan(aw[:, :olen, :ilen]).any()
np.testing.assert_almost_equal(
aw[:, :olen, :ilen].sum(), float(aw.shape[0] * olen), decimal=4
)
assert aw[:, olen:, ilen:].sum() == 0.0
# test decoder self-attention
y_masks = model._target_mask(batch["olens"])
a = model.decoder.decoders[0].self_attn
a(ys, ys, ys, y_masks)
aws = a.attn.detach().numpy()
for aw, olen in zip(aws, batch["olens"]):
assert not np.isnan(aw[:, :olen, :olen]).any()
np.testing.assert_almost_equal(
aw[:, :olen, :olen].sum(), float(aw.shape[0] * olen), decimal=4
)
assert aw[:, olen:, olen:].sum() == 0.0
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"reduction_factor": 3}),
({"reduction_factor": 4}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
],
)
def test_forward_and_inference_are_equal(model_dict):
# make args
model_args = make_transformer_args(dprenet_dropout_rate=0.0, **model_dict)
# setup batch
idim = 5
odim = 10
ilens = [10]
olens = [20]
batch = prepare_inputs(idim, odim, ilens, olens)
xs = batch["xs"]
ilens = batch["ilens"]
ys = batch["ys"]
olens = batch["olens"]
# define model
model = Transformer(idim, odim, Namespace(**model_args))
model.eval()
# TODO(kan-bayashi): update following ugly part
with torch.no_grad():
# --------- forward calculation ---------
x_masks = model._source_mask(ilens)
hs_fp, h_masks = model.encoder(xs, x_masks)
if model.reduction_factor > 1:
ys_in = ys[:, model.reduction_factor - 1 :: model.reduction_factor]
olens_in = olens.new([olen // model.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
ys_in = model._add_first_frame_and_remove_last_frame(ys_in)
y_masks = model._target_mask(olens_in)
zs, _ = model.decoder(ys_in, y_masks, hs_fp, h_masks)
before_outs = model.feat_out(zs).view(zs.size(0), -1, model.odim)
logits = model.prob_out(zs).view(zs.size(0), -1)
after_outs = before_outs + model.postnet(before_outs.transpose(1, 2)).transpose(
1, 2
)
# --------- forward calculation ---------
# --------- inference calculation ---------
hs_ir, _ = model.encoder(xs, None)
maxlen = ys_in.shape[1]
minlen = ys_in.shape[1]
idx = 0
# this is the inferene calculation but we use groundtruth to check the behavior
ys_in_ = ys_in[0, idx].view(1, 1, model.odim)
np.testing.assert_array_equal(
ys_in_.new_zeros(1, 1, model.odim).detach().cpu().numpy(),
ys_in_.detach().cpu().numpy(),
)
outs, probs = [], []
while True:
idx += 1
y_masks = subsequent_mask(idx).unsqueeze(0)
z = model.decoder.forward_one_step(ys_in_, y_masks, hs_ir)[
0
] # (B, idx, adim)
outs += [model.feat_out(z).view(1, -1, model.odim)] # [(1, r, odim), ...]
probs += [torch.sigmoid(model.prob_out(z))[0]] # [(r), ...]
if idx >= maxlen:
if idx < minlen:
continue
outs = torch.cat(outs, dim=1).transpose(
1, 2
) # (1, L, odim) -> (1, odim, L)
if model.postnet is not None:
outs = outs + model.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
ys_in_ = torch.cat(
(ys_in_, ys_in[0, idx].view(1, 1, model.odim)), dim=1
) # (1, idx + 1, odim)
# --------- inference calculation ---------
# check both are equal
np.testing.assert_array_almost_equal(
hs_fp.detach().cpu().numpy(),
hs_ir.detach().cpu().numpy(),
)
np.testing.assert_array_almost_equal(
after_outs.squeeze(0).detach().cpu().numpy(),
outs.detach().cpu().numpy(),
)
np.testing.assert_array_almost_equal(
torch.sigmoid(logits.squeeze(0)).detach().cpu().numpy(),
probs.detach().cpu().numpy(),
)
| 15,609 | 32.354701 | 88 | py |
espnet | espnet-master/test/test_asr_interface.py | import pytest
from espnet.nets.asr_interface import dynamic_import_asr
@pytest.mark.parametrize(
"name, backend",
[(nn, backend) for nn in ("transformer", "rnn") for backend in ("pytorch",)],
)
def test_asr_build(name, backend):
model = dynamic_import_asr(name, backend).build(
10, 10, mtlalpha=0.123, adim=4, eunits=3, dunits=3, elayers=2, dlayers=2
)
assert model.mtlalpha == 0.123
| 415 | 26.733333 | 81 | py |
espnet | espnet-master/test/test_e2e_asr.py | # coding: utf-8
# Copyright 2017 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import importlib
import os
import tempfile
from test.utils_test import make_dummy_json
import chainer
import numpy as np
import pytest
import torch
import espnet.nets.chainer_backend.e2e_asr as ch_asr
import espnet.nets.pytorch_backend.e2e_asr as th_asr
from espnet.asr import asr_utils
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.pytorch_backend.streaming.segment import SegmentStreamingE2E
from espnet.nets.pytorch_backend.streaming.window import WindowStreamingE2E
from espnet.utils.training.batchfy import make_batchset
def make_arg(**kwargs):
defaults = dict(
elayers=1,
subsample="1_2_2_1_1",
etype="vggblstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="location",
aheads=1,
awin=3,
aconv_chans=1,
aconv_filts=1,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=2,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=3,
beam_size=2,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
ctc_window_margin=0,
lm_weight=0.0,
rnnlm=None,
streaming_min_blank_dur=10,
streaming_onset_margin=2,
streaming_offset_margin=2,
verbose=2,
char_list=["あ", "い", "う", "え", "お"],
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
sortagrad=0,
grad_noise=False,
context_residual=False,
use_frontend=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare_inputs(mode, ilens=[14, 13], olens=[4, 3], is_cuda=False):
np.random.seed(1)
assert len(ilens) == len(olens)
xs = [np.random.randn(ilen, 10).astype(np.float32) for ilen in ilens]
ys = [np.random.randint(1, 5, olen).astype(np.int32) for olen in olens]
ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
if mode == "chainer":
if is_cuda:
xp = importlib.import_module("cupy")
xs = [chainer.Variable(xp.array(x)) for x in xs]
ys = [chainer.Variable(xp.array(y)) for y in ys]
ilens = xp.array(ilens)
else:
xs = [chainer.Variable(x) for x in xs]
ys = [chainer.Variable(y) for y in ys]
return xs, ilens, ys
elif mode == "pytorch":
ilens = torch.from_numpy(ilens).long()
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_pad = xs_pad.cuda()
ilens = ilens.cuda()
ys_pad = ys_pad.cuda()
return xs_pad, ilens, ys_pad
else:
raise ValueError("Invalid mode")
def convert_batch(batch, backend="pytorch", is_cuda=False, idim=10, odim=5):
ilens = np.array([x[1]["input"][0]["shape"][0] for x in batch])
olens = np.array([x[1]["output"][0]["shape"][0] for x in batch])
xs = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
ys = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ilens = torch.from_numpy(ilens).long()
ys = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs = xs.cuda()
ilens = ilens.cuda()
ys = ys.cuda()
else:
if is_cuda:
xp = importlib.import_module("cupy")
xs = [chainer.Variable(xp.array(x)) for x in xs]
ys = [chainer.Variable(xp.array(y)) for y in ys]
ilens = xp.array(ilens)
else:
xs = [chainer.Variable(x) for x in xs]
ys = [chainer.Variable(y) for y in ys]
return xs, ilens, ys
@pytest.mark.parametrize(
"module, model_dict",
[
("espnet.nets.chainer_backend.e2e_asr", {}),
("espnet.nets.chainer_backend.e2e_asr", {"elayers": 2, "dlayers": 2}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vggblstmp"}),
(
"espnet.nets.chainer_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "noatt"},
),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vggblstmp", "atype": "dot"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "grup"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "lstmp"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "bgrup"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "blstmp"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "bgru"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "blstm"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vgggru"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vggbgrup"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vgglstm"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vgglstmp"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vggbgru"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vggbgrup"}),
("espnet.nets.chainer_backend.e2e_asr", {"etype": "vggblstmp", "dtype": "gru"}),
("espnet.nets.chainer_backend.e2e_asr", {"mtlalpha": 0.0}),
("espnet.nets.chainer_backend.e2e_asr", {"mtlalpha": 1.0}),
("espnet.nets.chainer_backend.e2e_asr", {"sampling_probability": 0.5}),
("espnet.nets.chainer_backend.e2e_asr", {"ctc_type": "builtin"}),
("espnet.nets.chainer_backend.e2e_asr", {"ctc_weight": 0.0}),
("espnet.nets.chainer_backend.e2e_asr", {"ctc_weight": 1.0}),
("espnet.nets.chainer_backend.e2e_asr", {"report_cer": True}),
("espnet.nets.chainer_backend.e2e_asr", {"report_wer": True}),
(
"espnet.nets.chainer_backend.e2e_asr",
{"report_cer": True, "report_wer": True},
),
(
"espnet.nets.chainer_backend.e2e_asr",
{"report_cer": True, "report_wer": True, "mtlalpha": 0.0},
),
(
"espnet.nets.chainer_backend.e2e_asr",
{"report_cer": True, "report_wer": True, "mtlalpha": 1.0},
),
("espnet.nets.pytorch_backend.e2e_asr", {}),
("espnet.nets.pytorch_backend.e2e_asr", {"elayers": 2, "dlayers": 2}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "grup"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "lstmp"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "bgrup"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "blstmp"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "bgru"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "blstm"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vgggru"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vgggrup"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vgglstm"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vgglstmp"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vggbgru"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vggbgrup"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vggblstmp", "dtype": "gru"}),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "noatt"},
),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vggblstmp", "atype": "add"}),
("espnet.nets.pytorch_backend.e2e_asr", {"etype": "vggblstmp", "atype": "dot"}),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "coverage"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "coverage_location"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "location2d"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "location_recurrent"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "multi_head_dot"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "multi_head_add"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "multi_head_loc"},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"etype": "vggblstmp", "atype": "multi_head_multi_res_loc"},
),
("espnet.nets.pytorch_backend.e2e_asr", {"mtlalpha": 0.0}),
("espnet.nets.pytorch_backend.e2e_asr", {"mtlalpha": 1.0}),
("espnet.nets.pytorch_backend.e2e_asr", {"sampling_probability": 0.5}),
("espnet.nets.pytorch_backend.e2e_asr", {"ctc_type": "builtin"}),
("espnet.nets.pytorch_backend.e2e_asr", {"ctc_weight": 0.0}),
("espnet.nets.pytorch_backend.e2e_asr", {"ctc_weight": 1.0}),
("espnet.nets.pytorch_backend.e2e_asr", {"context_residual": True}),
("espnet.nets.pytorch_backend.e2e_asr", {"grad_noise": True}),
("espnet.nets.pytorch_backend.e2e_asr", {"report_cer": True}),
("espnet.nets.pytorch_backend.e2e_asr", {"report_wer": True}),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"report_cer": True, "report_wer": True},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"report_cer": True, "report_wer": True, "mtlalpha": 0.0},
),
(
"espnet.nets.pytorch_backend.e2e_asr",
{"report_cer": True, "report_wer": True, "mtlalpha": 1.0},
),
],
)
def test_model_trainable_and_decodable(module, model_dict):
args = make_arg(**model_dict)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
m = th_asr
else:
batch = prepare_inputs("chainer")
m = ch_asr
model = m.E2E(10, 5, args)
loss = model(*batch)
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(10, 10)
model.recognize(in_data, args, args.char_list) # decodable
if "pytorch" in module:
batch_in_data = [np.random.randn(10, 10), np.random.randn(5, 10)]
model.recognize_batch(
batch_in_data, args, args.char_list
) # batch decodable
def test_window_streaming_e2e_encoder_and_ctc_with_offline_attention():
args = make_arg()
model = th_asr.E2E(10, 5, args)
asr = WindowStreamingE2E(model, args)
in_data = np.random.randn(100, 10)
for i in range(2):
asr.accept_input(in_data)
asr.decode_with_attention_offline()
def test_segment_streaming_e2e():
args = make_arg()
args.etype = "vgglstm" # uni-directional
args.batchsize = 0
model = th_asr.E2E(10, 5, args)
asr = SegmentStreamingE2E(model, args)
in_data = np.random.randn(50, 10)
r = np.prod(model.subsample)
for i in range(0, 50, r):
asr.accept_input(in_data[i : i + r])
args.batchsize = 1
for i in range(0, 50, r):
asr.accept_input(in_data[i : i + r])
@pytest.mark.parametrize("module", ["pytorch"])
def test_gradient_noise_injection(module):
args = make_arg(grad_noise=True)
args_org = make_arg()
dummy_json = make_dummy_json(2, [3, 4], [3, 4], idim=10, odim=5)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_asr as m
else:
import espnet.nets.chainer_backend.e2e_asr as m
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
model = m.E2E(10, 5, args)
model_org = m.E2E(10, 5, args_org)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=10, odim=5))
loss_org = model_org(*convert_batch(batch, module, idim=10, odim=5))
loss.backward()
grad = [param.grad for param in model.parameters()][10]
loss_org.backward()
grad_org = [param.grad for param in model_org.parameters()][10]
assert grad[0] != grad_org[0]
@pytest.mark.parametrize("module", ["pytorch", "chainer"])
def test_sortagrad_trainable(module):
args = make_arg(sortagrad=1)
idim = 10
odim = 5
dummy_json = make_dummy_json(2, [3, 5], [3, 5], idim=idim, odim=odim)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_asr as m
else:
import espnet.nets.chainer_backend.e2e_asr as m
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
model = m.E2E(idim, odim, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=idim, odim=odim))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(10, idim)
model.recognize(in_data, args, args.char_list)
@pytest.mark.parametrize("module", ["pytorch", "chainer"])
def test_sortagrad_trainable_with_batch_bins(module):
args = make_arg(sortagrad=1)
idim = 10
odim = 5
dummy_json = make_dummy_json(2, [3, 5], [3, 5], idim=idim, odim=odim)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_asr as m
else:
import espnet.nets.chainer_backend.e2e_asr as m
batch_elems = 2000
batchset = make_batchset(dummy_json, batch_bins=batch_elems, shortest_first=True)
for batch in batchset:
n = 0
for uttid, info in batch:
ilen = int(info["input"][0]["shape"][0])
olen = int(info["output"][0]["shape"][0])
n += ilen * idim + olen * odim
assert olen < batch_elems
model = m.E2E(idim, odim, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=idim, odim=odim))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(10, idim)
model.recognize(in_data, args, args.char_list)
@pytest.mark.parametrize("module", ["pytorch", "chainer"])
def test_sortagrad_trainable_with_batch_frames(module):
args = make_arg(sortagrad=1)
idim = 10
odim = 5
dummy_json = make_dummy_json(2, [3, 5], [3, 5], idim=idim, odim=odim)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_asr as m
else:
import espnet.nets.chainer_backend.e2e_asr as m
batch_frames_in = 50
batch_frames_out = 50
batchset = make_batchset(
dummy_json,
batch_frames_in=batch_frames_in,
batch_frames_out=batch_frames_out,
shortest_first=True,
)
for batch in batchset:
i = 0
o = 0
for uttid, info in batch:
i += int(info["input"][0]["shape"][0])
o += int(info["output"][0]["shape"][0])
assert i <= batch_frames_in
assert o <= batch_frames_out
model = m.E2E(idim, odim, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=idim, odim=odim))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(10, idim)
model.recognize(in_data, args, args.char_list)
def init_torch_weight_const(m, val):
for p in m.parameters():
if p.dim() > 1:
p.data.fill_(val)
def init_chainer_weight_const(m, val):
for p in m.params():
if p.data.ndim > 1:
p.data[:] = val
def test_loss_and_ctc_grad():
args = make_arg(etype="vggblstmp")
ch_model = ch_asr.E2E(10, 5, args)
ch_model.cleargrads()
th_model = th_asr.E2E(10, 5, args)
const = 1e-4
init_torch_weight_const(th_model, const)
init_chainer_weight_const(ch_model, const)
ch_batch = prepare_inputs("chainer")
th_batch = prepare_inputs("pytorch")
_, ch_ctc, ch_att, ch_acc = ch_model(*ch_batch)
th_model(*th_batch)
th_ctc, th_att = th_model.loss_ctc, th_model.loss_att
# test masking
ch_ench = ch_model.att.pre_compute_enc_h.data
th_ench = th_model.att[0].pre_compute_enc_h.detach().numpy()
np.testing.assert_equal(ch_ench == 0.0, th_ench == 0.0)
# test loss with constant weights (1.0) and bias (0.0) except for foget-bias (1.0)
np.testing.assert_allclose(ch_ctc.data, th_ctc.detach().numpy())
np.testing.assert_allclose(ch_att.data, th_att.detach().numpy())
# test ctc grads
ch_ctc.backward()
th_ctc.backward()
np.testing.assert_allclose(
ch_model.ctc.ctc_lo.W.grad,
th_model.ctc.ctc_lo.weight.grad.data.numpy(),
1e-7,
1e-8,
)
np.testing.assert_allclose(
ch_model.ctc.ctc_lo.b.grad,
th_model.ctc.ctc_lo.bias.grad.data.numpy(),
1e-5,
1e-6,
)
# test cross-entropy grads
ch_model.cleargrads()
th_model.zero_grad()
_, ch_ctc, ch_att, ch_acc = ch_model(*ch_batch)
th_model(*th_batch)
th_ctc, th_att = th_model.loss_ctc, th_model.loss_att
ch_att.backward()
th_att.backward()
np.testing.assert_allclose(
ch_model.dec.output.W.grad,
th_model.dec.output.weight.grad.data.numpy(),
1e-7,
1e-8,
)
np.testing.assert_allclose(
ch_model.dec.output.b.grad,
th_model.dec.output.bias.grad.data.numpy(),
1e-5,
1e-6,
)
@pytest.mark.parametrize("etype", ["blstmp", "vggblstmp"])
def test_mtl_loss(etype):
args = make_arg(etype=etype)
ch_model = ch_asr.E2E(10, 5, args)
th_model = th_asr.E2E(10, 5, args)
const = 1e-4
init_torch_weight_const(th_model, const)
init_chainer_weight_const(ch_model, const)
ch_batch = prepare_inputs("chainer")
th_batch = prepare_inputs("pytorch")
_, ch_ctc, ch_att, ch_acc = ch_model(*ch_batch)
th_model(*th_batch)
th_ctc, th_att = th_model.loss_ctc, th_model.loss_att
# test masking
ch_ench = ch_model.att.pre_compute_enc_h.data
th_ench = th_model.att[0].pre_compute_enc_h.detach().numpy()
np.testing.assert_equal(ch_ench == 0.0, th_ench == 0.0)
# test loss with constant weights (1.0) and bias (0.0) except for foget-bias (1.0)
np.testing.assert_allclose(ch_ctc.data, th_ctc.detach().numpy())
np.testing.assert_allclose(ch_att.data, th_att.detach().numpy())
# test grads in mtl mode
ch_loss = ch_ctc * 0.5 + ch_att * 0.5
th_loss = th_ctc * 0.5 + th_att * 0.5
ch_model.cleargrads()
th_model.zero_grad()
ch_loss.backward()
th_loss.backward()
np.testing.assert_allclose(
ch_model.ctc.ctc_lo.W.grad,
th_model.ctc.ctc_lo.weight.grad.data.numpy(),
1e-7,
1e-8,
)
np.testing.assert_allclose(
ch_model.ctc.ctc_lo.b.grad,
th_model.ctc.ctc_lo.bias.grad.data.numpy(),
1e-5,
1e-6,
)
np.testing.assert_allclose(
ch_model.dec.output.W.grad,
th_model.dec.output.weight.grad.data.numpy(),
1e-7,
1e-8,
)
np.testing.assert_allclose(
ch_model.dec.output.b.grad,
th_model.dec.output.bias.grad.data.numpy(),
1e-5,
1e-6,
)
@pytest.mark.parametrize("etype", ["blstmp", "vggblstmp"])
def test_zero_length_target(etype):
args = make_arg(etype=etype)
ch_model = ch_asr.E2E(10, 5, args)
ch_model.cleargrads()
th_model = th_asr.E2E(10, 5, args)
ch_batch = prepare_inputs("chainer", olens=[4, 0])
th_batch = prepare_inputs("pytorch", olens=[4, 0])
ch_model(*ch_batch)
th_model(*th_batch)
# NOTE: We ignore all zero length case because chainer also fails.
# Have a nice data-prep!
# out_data = ""
# data = [
# ("aaa", dict(feat=np.random.randn(200, 10).astype(np.float32), tokenid="")),
# ("bbb", dict(feat=np.random.randn(100, 10).astype(np.float32), tokenid="")),
# ("cc", dict(feat=np.random.randn(100, 10).astype(np.float32), tokenid=""))
# ]
# ch_ctc, ch_att, ch_acc = ch_model(data)
# th_ctc, th_att, th_acc = th_model(data)
@pytest.mark.parametrize(
"module, atype",
[
("espnet.nets.chainer_backend.e2e_asr", "noatt"),
("espnet.nets.chainer_backend.e2e_asr", "dot"),
("espnet.nets.chainer_backend.e2e_asr", "location"),
("espnet.nets.pytorch_backend.e2e_asr", "noatt"),
("espnet.nets.pytorch_backend.e2e_asr", "dot"),
("espnet.nets.pytorch_backend.e2e_asr", "add"),
("espnet.nets.pytorch_backend.e2e_asr", "location"),
("espnet.nets.pytorch_backend.e2e_asr", "coverage"),
("espnet.nets.pytorch_backend.e2e_asr", "coverage_location"),
("espnet.nets.pytorch_backend.e2e_asr", "location2d"),
("espnet.nets.pytorch_backend.e2e_asr", "location_recurrent"),
("espnet.nets.pytorch_backend.e2e_asr", "multi_head_dot"),
("espnet.nets.pytorch_backend.e2e_asr", "multi_head_add"),
("espnet.nets.pytorch_backend.e2e_asr", "multi_head_loc"),
("espnet.nets.pytorch_backend.e2e_asr", "multi_head_multi_res_loc"),
],
)
def test_calculate_all_attentions(module, atype):
m = importlib.import_module(module)
args = make_arg(atype=atype)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
else:
batch = prepare_inputs("chainer")
model = m.E2E(10, 5, args)
with chainer.no_backprop_mode():
if "pytorch" in module:
att_ws = model.calculate_all_attentions(*batch)[0]
else:
att_ws = model.calculate_all_attentions(*batch)
print(att_ws.shape)
@pytest.mark.parametrize("mtlalpha", [0.0, 0.5, 1.0])
def test_calculate_all_ctc_probs(mtlalpha):
args = make_arg(mtlalpha=mtlalpha)
batch = prepare_inputs("pytorch")
model = th_asr.E2E(10, 5, args)
ctc_probs = model.calculate_all_ctc_probs(*batch)
if mtlalpha > 0:
print(ctc_probs.shape)
else:
assert ctc_probs is None
def test_chainer_save_and_load():
args = make_arg()
model = ch_asr.E2E(10, 5, args)
# initialize randomly
for p in model.params():
p.data = np.random.randn(*p.data.shape)
tmppath = tempfile.mktemp()
chainer.serializers.save_npz(tmppath, model)
p_saved = [p.data for p in model.params()]
# set constant value
for p in model.params():
p.data = np.zeros_like(p.data)
asr_utils.chainer_load(tmppath, model)
for p1, p2 in zip(p_saved, model.params()):
np.testing.assert_array_equal(p1, p2.data)
if os.path.exists(tmppath):
os.remove(tmppath)
def test_torch_save_and_load():
args = make_arg()
model = th_asr.E2E(10, 5, args)
# initialize randomly
for p in model.parameters():
p.data.uniform_()
if not os.path.exists(".pytest_cache"):
os.makedirs(".pytest_cache")
tmppath = tempfile.mktemp()
asr_utils.torch_save(tmppath, model)
p_saved = [p.data.numpy() for p in model.parameters()]
# set constant value
for p in model.parameters():
p.data.zero_()
asr_utils.torch_load(tmppath, model)
for p1, p2 in zip(p_saved, model.parameters()):
np.testing.assert_array_equal(p1, p2.data.numpy())
if os.path.exists(tmppath):
os.remove(tmppath)
@pytest.mark.skipif(
not torch.cuda.is_available() and not chainer.cuda.available, reason="gpu required"
)
@pytest.mark.parametrize(
"module",
["espnet.nets.chainer_backend.e2e_asr", "espnet.nets.pytorch_backend.e2e_asr"],
)
def test_gpu_trainable(module):
m = importlib.import_module(module)
args = make_arg()
model = m.E2E(10, 5, args)
if "pytorch" in module:
batch = prepare_inputs("pytorch", is_cuda=True)
model.cuda()
else:
batch = prepare_inputs("chainer", is_cuda=True)
model.to_gpu()
loss = model(*batch)
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"module",
["espnet.nets.chainer_backend.e2e_asr", "espnet.nets.pytorch_backend.e2e_asr"],
)
def test_multi_gpu_trainable(module):
m = importlib.import_module(module)
ngpu = 2
device_ids = list(range(ngpu))
args = make_arg()
model = m.E2E(10, 5, args)
if "pytorch" in module:
model = torch.nn.DataParallel(model, device_ids)
batch = prepare_inputs("pytorch", is_cuda=True)
model.cuda()
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu)) # trainable
else:
import copy
import cupy
losses = []
for device in device_ids:
with cupy.cuda.Device(device):
batch = prepare_inputs("chainer", is_cuda=True)
_model = copy.deepcopy(
model
) # Transcribed from training.updaters.ParallelUpdater
_model.to_gpu()
loss = 1.0 / ngpu * _model(*batch)[0]
losses.append(loss)
for loss in losses:
loss.backward() # trainable
| 26,215 | 34.331536 | 88 | py |
espnet | espnet-master/test/test_asr_quantize.py | # Copyright 2021 Gaopeng Xu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import pytest
import torch
from espnet.nets.asr_interface import dynamic_import_asr
@pytest.mark.parametrize(
"name, backend",
[(nn, backend) for nn in ("transformer", "rnn") for backend in ("pytorch",)],
)
def test_asr_quantize(name, backend):
model = dynamic_import_asr(name, backend).build(
10, 10, mtlalpha=0.123, adim=4, eunits=2, dunits=2, elayers=1, dlayers=1
)
quantized_model = torch.quantization.quantize_dynamic(
model, {torch.nn.Linear}, dtype=torch.qint8
)
assert quantized_model.state_dict()
| 642 | 28.227273 | 81 | py |
espnet | espnet-master/test/test_optimizer.py | # coding: utf-8
# Copyright 2017 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import chainer
import numpy
import pytest
import torch
from espnet.optimizer.factory import dynamic_import_optimizer
from espnet.optimizer.pytorch import OPTIMIZER_FACTORY_DICT
class ChModel(chainer.Chain):
def __init__(self):
super(ChModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(3, 1)
def __call__(self, x):
return chainer.functions.sum(self.a(x))
class ThModel(torch.nn.Module):
def __init__(self):
super(ThModel, self).__init__()
self.a = torch.nn.Linear(3, 1)
def forward(self, x):
return self.a(x).sum()
@pytest.mark.parametrize("name", OPTIMIZER_FACTORY_DICT.keys())
def test_optimizer_backend_compatible(name):
torch.set_grad_enabled(True)
# model construction
ch_model = ChModel()
th_model = ThModel()
# copy params
th_model.a.weight.data = torch.from_numpy(numpy.copy(ch_model.a.W.data))
th_model.a.bias.data = torch.from_numpy(numpy.copy(ch_model.a.b.data))
# optimizer setup
th_opt = dynamic_import_optimizer(name, "pytorch").build(th_model.parameters())
ch_opt = dynamic_import_optimizer(name, "chainer").build(ch_model)
# forward
ch_model.cleargrads()
data = numpy.random.randn(2, 3).astype(numpy.float32)
ch_loss = ch_model(data)
th_loss = th_model(torch.from_numpy(data))
chainer.functions.sum(ch_loss).backward()
th_loss.backward()
numpy.testing.assert_allclose(ch_loss.data, th_loss.item(), rtol=1e-6)
ch_opt.update()
th_opt.step()
numpy.testing.assert_allclose(
ch_model.a.W.data, th_model.a.weight.data.numpy(), rtol=1e-6
)
numpy.testing.assert_allclose(
ch_model.a.b.data, th_model.a.bias.data.numpy(), rtol=1e-6
)
def test_pytorch_optimizer_factory():
model = torch.nn.Linear(2, 1)
opt_class = dynamic_import_optimizer("adam", "pytorch")
optimizer = opt_class.build(model.parameters(), lr=0.9)
for g in optimizer.param_groups:
assert g["lr"] == 0.9
opt_class = dynamic_import_optimizer("sgd", "pytorch")
optimizer = opt_class.build(model.parameters(), lr=0.9)
for g in optimizer.param_groups:
assert g["lr"] == 0.9
opt_class = dynamic_import_optimizer("adadelta", "pytorch")
optimizer = opt_class.build(model.parameters(), rho=0.9)
for g in optimizer.param_groups:
assert g["rho"] == 0.9
def test_chainer_optimizer_factory():
model = chainer.links.Linear(2, 1)
opt_class = dynamic_import_optimizer("adam", "chainer")
optimizer = opt_class.build(model, lr=0.9)
assert optimizer.alpha == 0.9
opt_class = dynamic_import_optimizer("sgd", "chainer")
optimizer = opt_class.build(model, lr=0.9)
assert optimizer.lr == 0.9
opt_class = dynamic_import_optimizer("adadelta", "chainer")
optimizer = opt_class.build(model, rho=0.9)
assert optimizer.rho == 0.9
| 3,023 | 29.857143 | 83 | py |
espnet | espnet-master/test/test_loss.py | # Copyright 2017 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import chainer.functions as F
import numpy
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
@pytest.mark.parametrize("ctc_type", ["builtin", "gtnctc", "cudnnctc"])
@pytest.mark.parametrize(
"in_length,out_length", [([11, 17, 15], [4, 2, 3]), ([4], [1])]
)
def test_ctc_loss(in_length, out_length, ctc_type):
if ctc_type == "builtin" or ctc_type == "cudnnctc":
_ctcloss_sum = torch.nn.CTCLoss(reduction="sum")
def torch_ctcloss(th_pred, th_target, th_ilen, th_olen):
th_pred = th_pred.log_softmax(2)
loss = _ctcloss_sum(th_pred, th_target, th_ilen, th_olen)
# Batch-size average
loss = loss / th_pred.size(1)
return loss
elif ctc_type == "gtnctc":
pytest.importorskip("gtn")
from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
_ctcloss_sum = GTNCTCLossFunction.apply
def torch_ctcloss(th_pred, th_target, th_ilen, th_olen):
targets = [t.tolist() for t in th_target]
log_probs = torch.nn.functional.log_softmax(th_pred, dim=2)
loss = _ctcloss_sum(log_probs, targets, th_ilen, 0, "none")
return loss
n_out = 7
input_length = numpy.array(in_length, dtype=numpy.int32)
label_length = numpy.array(out_length, dtype=numpy.int32)
np_pred = [
numpy.random.rand(il, n_out).astype(numpy.float32) for il in input_length
]
np_target = [
numpy.random.randint(0, n_out, size=ol, dtype=numpy.int32)
for ol in label_length
]
# NOTE: np_pred[i] seems to be transposed and used axis=-1 in e2e_asr.py
ch_pred = F.separate(F.pad_sequence(np_pred), axis=-2)
ch_target = F.pad_sequence(np_target, padding=-1)
ch_loss = F.connectionist_temporal_classification(
ch_pred, ch_target, 0, input_length, label_length
).data
th_pred = pad_list([torch.from_numpy(x) for x in np_pred], 0.0).transpose(0, 1)
if ctc_type == "gtnctc":
# gtn implementation expects targets as list
th_target = np_target
# keep as B x T x H for gtn
th_pred = th_pred.transpose(0, 1)
else:
th_target = torch.from_numpy(numpy.concatenate(np_target))
th_ilen = torch.from_numpy(input_length)
th_olen = torch.from_numpy(label_length)
th_loss = torch_ctcloss(th_pred, th_target, th_ilen, th_olen).numpy()
numpy.testing.assert_allclose(th_loss, ch_loss, 0.05)
def test_attn_loss():
n_out = 7
_eos = n_out - 1
n_batch = 3
label_length = numpy.array([4, 2, 3], dtype=numpy.int32)
np_pred = numpy.random.rand(n_batch, max(label_length) + 1, n_out).astype(
numpy.float32
)
# NOTE: 0 is only used for CTC, never appeared in attn target
np_target = [
numpy.random.randint(1, n_out - 1, size=ol, dtype=numpy.int32)
for ol in label_length
]
eos = numpy.array([_eos], "i")
ys_out = [F.concat([y, eos], axis=0) for y in np_target]
# padding for ys with -1
# pys: utt x olen
# NOTE: -1 is default ignore index for chainer
pad_ys_out = F.pad_sequence(ys_out, padding=-1)
y_all = F.reshape(np_pred, (n_batch * (max(label_length) + 1), n_out))
ch_loss = F.softmax_cross_entropy(y_all, F.concat(pad_ys_out, axis=0))
# NOTE: this index 0 is only for CTC not attn. so it can be ignored
# unfortunately, torch cross_entropy does not accept out-of-bound ids
th_ignore = 0
th_pred = torch.from_numpy(y_all.data)
th_target = pad_list([torch.from_numpy(t.data).long() for t in ys_out], th_ignore)
th_loss = torch.nn.functional.cross_entropy(
th_pred,
th_target.view(-1),
ignore_index=th_ignore,
reduction="mean",
)
print(ch_loss)
print(th_loss)
# NOTE: chainer's default setting are normalized by batch-size
loss_data = float(th_loss)
numpy.testing.assert_allclose(loss_data, ch_loss.data, 0.05)
def test_train_acc():
n_out = 7
_eos = n_out - 1
n_batch = 3
label_length = numpy.array([4, 2, 3], dtype=numpy.int32)
np_pred = numpy.random.rand(n_batch, max(label_length) + 1, n_out).astype(
numpy.float32
)
# NOTE: 0 is only used for CTC, never appeared in attn target
np_target = [
numpy.random.randint(1, n_out - 1, size=ol, dtype=numpy.int32)
for ol in label_length
]
eos = numpy.array([_eos], "i")
ys_out = [F.concat([y, eos], axis=0) for y in np_target]
# padding for ys with -1
# pys: utt x olen
# NOTE: -1 is default ignore index for chainer
pad_ys_out = F.pad_sequence(ys_out, padding=-1)
y_all = F.reshape(np_pred, (n_batch * (max(label_length) + 1), n_out))
ch_acc = F.accuracy(y_all, F.concat(pad_ys_out, axis=0), ignore_label=-1)
# NOTE: this index 0 is only for CTC not attn. so it can be ignored
# unfortunately, torch cross_entropy does not accept out-of-bound ids
th_ignore = 0
th_pred = torch.from_numpy(y_all.data)
th_ys = [torch.from_numpy(numpy.append(t, eos)).long() for t in np_target]
th_target = pad_list(th_ys, th_ignore)
th_acc = th_accuracy(th_pred, th_target, th_ignore)
numpy.testing.assert_allclose(ch_acc.data, th_acc)
| 5,405 | 35.527027 | 86 | py |
espnet | espnet-master/test/test_beam_search.py | from argparse import Namespace
import numpy
import pytest
import torch
from espnet.nets.asr_interface import dynamic_import_asr
from espnet.nets.beam_search import BeamSearch
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.nets.scorers.length_bonus import LengthBonus
rnn_args = Namespace(
elayers=1,
subsample=None,
etype="vgglstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="dot",
aheads=2,
awin=2,
aconv_chans=2,
aconv_filts=2,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=2,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=3,
beam_size=2,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
lm_weight=0.0,
rnnlm=None,
streaming_min_blank_dur=10,
streaming_onset_margin=2,
streaming_offset_margin=2,
verbose=2,
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
sortagrad=0,
grad_noise=False,
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False,
)
transformer_args = Namespace(
adim=4,
aheads=2,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=True,
report_cer=False,
report_wer=False,
ctc_type="builtin",
lsm_weight=0.001,
)
ldconv_args = Namespace(
**vars(transformer_args),
transformer_decoder_selfattn_layer_type="lightconv",
transformer_encoder_selfattn_layer_type="lightconv",
wshare=2,
ldconv_encoder_kernel_length="31_31",
ldconv_decoder_kernel_length="11_11",
ldconv_usebias=False,
)
# from test.test_e2e_asr_transformer import prepare
def prepare(E2E, args, mtlalpha=0.0):
args.mtlalpha = mtlalpha
args.char_list = ["a", "e", "i", "o", "u"]
idim = 8
odim = len(args.char_list)
model = dynamic_import_asr(E2E, "pytorch")(idim, odim, args)
batchsize = 2
x = torch.randn(batchsize, 20, idim)
ilens = [20, 15]
n_token = odim - 1
# avoid 0 for eps in ctc
y = (torch.rand(batchsize, 10) * n_token % (n_token - 1)).long() + 1
olens = [10, 2]
for i in range(batchsize):
x[i, ilens[i] :] = -1
y[i, olens[i] :] = -1
data = []
for i in range(batchsize):
data.append(
(
"utt%d" % i,
{
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
},
)
)
return model, x, torch.tensor(ilens), y, data, args
@pytest.mark.parametrize(
"model_class, args, mtlalpha, ctc_weight, lm_weight, bonus, device, dtype",
[
(nn, args, ctc_train, ctc_recog, lm, bonus, device, dtype)
for device in ("cpu", "cuda")
for nn, args in (
("transformer", transformer_args),
("transformer", ldconv_args),
("rnn", rnn_args),
)
for ctc_train in (0.0, 0.5, 1.0)
for ctc_recog in (0.0, 0.5, 1.0)
for lm in (0.5,)
for bonus in (0.1,)
for dtype in ("float16", "float32", "float64")
],
)
def test_beam_search_equal(
model_class, args, mtlalpha, ctc_weight, lm_weight, bonus, device, dtype
):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
if device == "cpu" and dtype == "float16":
pytest.skip("cpu float16 implementation is not available in pytorch yet")
if mtlalpha == 0.0 and ctc_weight > 0.0:
pytest.skip("no CTC + CTC decoding.")
if mtlalpha == 1.0 and ctc_weight < 1.0:
pytest.skip("pure CTC + attention decoding")
# TODO(hirofumi0810): pure CTC beam search is not implemented
if ctc_weight == 1.0 and model_class == "transformer":
pytest.skip("pure CTC beam search is not implemented")
# seed setting
torch.manual_seed(123)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = (
False # https://github.com/pytorch/pytorch/issues/6351
)
dtype = getattr(torch, dtype)
model, x, ilens, y, data, train_args = prepare(model_class, args, mtlalpha=mtlalpha)
model.eval()
char_list = train_args.char_list
lm_args = Namespace(type="lstm", layer=1, unit=2, embed_unit=2, dropout_rate=0.0)
lm = dynamic_import_lm("default", backend="pytorch")(len(char_list), lm_args)
lm.eval()
# test previous beam search
args = Namespace(
beam_size=3,
penalty=bonus,
ctc_weight=ctc_weight,
maxlenratio=0,
lm_weight=lm_weight,
minlenratio=0,
nbest=3,
)
feat = x[0, : ilens[0]].numpy()
# legacy beam search
with torch.no_grad():
nbest = model.recognize(feat, args, char_list, lm.model)
# new beam search
scorers = model.scorers()
if lm_weight != 0:
scorers["lm"] = lm
scorers["length_bonus"] = LengthBonus(len(char_list))
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=args.lm_weight,
length_bonus=args.penalty,
)
model.to(device, dtype=dtype)
model.eval()
beam = BeamSearch(
beam_size=args.beam_size,
vocab_size=len(char_list),
weights=weights,
scorers=scorers,
token_list=train_args.char_list,
sos=model.sos,
eos=model.eos,
pre_beam_score_key=None if ctc_weight == 1.0 else "decoder",
)
beam.to(device, dtype=dtype)
beam.eval()
with torch.no_grad():
enc = model.encode(torch.as_tensor(feat).to(device, dtype=dtype))
nbest_bs = beam(
x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio
)
if dtype == torch.float16:
# skip because results are different. just checking it is decodable
return
for i, (expected, actual) in enumerate(zip(nbest, nbest_bs)):
actual = actual.asdict()
assert expected["yseq"] == actual["yseq"]
numpy.testing.assert_allclose(expected["score"], actual["score"], rtol=1e-6)
| 6,366 | 27.55157 | 88 | py |
espnet | espnet-master/test/test_lm.py | from test.test_beam_search import prepare, rnn_args
import chainer
import numpy
import pytest
import torch
import espnet.lm.chainer_backend.lm as lm_chainer
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.beam_search import beam_search
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.nets.scorers.length_bonus import LengthBonus
def transfer_lstm(ch_lstm, th_lstm):
ch_lstm.upward.W.data[:] = 1
th_lstm.weight_ih.data[:] = torch.from_numpy(ch_lstm.upward.W.data)
ch_lstm.upward.b.data[:] = 1
th_lstm.bias_hh.data[:] = torch.from_numpy(ch_lstm.upward.b.data)
# NOTE: only lateral weight can directly transfer
# rest of the weights and biases have quite different placements
th_lstm.weight_hh.data[:] = torch.from_numpy(ch_lstm.lateral.W.data)
th_lstm.bias_ih.data.zero_()
def transfer_lm(ch_rnnlm, th_rnnlm):
assert isinstance(ch_rnnlm, lm_chainer.RNNLM)
assert isinstance(th_rnnlm, lm_pytorch.RNNLM)
th_rnnlm.embed.weight.data = torch.from_numpy(ch_rnnlm.embed.W.data)
if th_rnnlm.typ == "lstm":
for n in range(ch_rnnlm.n_layers):
transfer_lstm(ch_rnnlm.rnn[n], th_rnnlm.rnn[n])
else:
assert False
th_rnnlm.lo.weight.data = torch.from_numpy(ch_rnnlm.lo.W.data)
th_rnnlm.lo.bias.data = torch.from_numpy(ch_rnnlm.lo.b.data)
def test_lm():
n_vocab = 3
n_layers = 2
n_units = 2
batchsize = 5
for typ in ["lstm"]: # TODO(anyone) gru
rnnlm_ch = lm_chainer.ClassifierWithState(
lm_chainer.RNNLM(n_vocab, n_layers, n_units, typ=typ)
)
rnnlm_th = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(n_vocab, n_layers, n_units, typ=typ)
)
transfer_lm(rnnlm_ch.predictor, rnnlm_th.predictor)
# test prediction equality
x = torch.from_numpy(numpy.random.randint(n_vocab, size=batchsize)).long()
with torch.no_grad(), chainer.no_backprop_mode(), chainer.using_config(
"train", False
):
rnnlm_th.predictor.eval()
state_th, y_th = rnnlm_th.predictor(None, x.long())
state_ch, y_ch = rnnlm_ch.predictor(None, x.data.numpy())
for k in state_ch.keys():
for n in range(len(state_th[k])):
print(k, n)
print(state_th[k][n].data.numpy())
print(state_ch[k][n].data)
numpy.testing.assert_allclose(
state_th[k][n].data.numpy(), state_ch[k][n].data, 1e-5
)
numpy.testing.assert_allclose(y_th.data.numpy(), y_ch.data, 1e-5)
@pytest.mark.parametrize(
"lm_name, lm_args, device, dtype",
[
(nn, args, device, dtype)
for nn, args in (
(
"default",
dict(
type="lstm", layer=2, unit=2, dropout_rate=0.5, emb_dropout_rate=0.3
),
),
(
"default",
dict(type="lstm", layer=2, unit=2, dropout_rate=0.5, tie_weights=True),
),
("default", dict(type="lstm", layer=2, unit=2, dropout_rate=0.5)),
("default", dict(type="gru", layer=2, unit=2, dropout_rate=0.5)),
("seq_rnn", dict(type="lstm", layer=2, unit=2, dropout_rate=0.5)),
("seq_rnn", dict(type="gru", layer=2, unit=2, dropout_rate=0.5)),
(
"transformer",
dict(
layer=2,
unit=2,
att_unit=2,
head=2,
dropout_rate=0.5,
embed_unit=2,
tie_weights=True,
),
),
(
"transformer",
dict(
layer=2,
unit=2,
att_unit=2,
head=2,
dropout_rate=0.5,
embed_unit=3,
emb_dropout_rate=0.3,
),
),
(
"transformer",
dict(
layer=2,
unit=2,
att_unit=2,
head=2,
dropout_rate=0.5,
embed_unit=3,
att_dropout_rate=0.3,
),
),
(
"transformer",
dict(
layer=2, unit=2, att_unit=2, head=2, dropout_rate=0.5, embed_unit=3
),
),
(
"transformer",
dict(
layer=2,
unit=2,
att_unit=2,
head=2,
dropout_rate=0.5,
pos_enc="none",
embed_unit=3,
),
),
)
for device in ("cpu", "cuda")
for dtype in ("float16", "float32", "float64")
],
)
def test_lm_trainable_and_decodable(lm_name, lm_args, device, dtype):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
if device == "cpu" and dtype == "float16":
pytest.skip("cpu float16 implementation is not available in pytorch yet")
dtype = getattr(torch, dtype)
model, x, ilens, y, data, train_args = prepare("rnn", rnn_args)
char_list = train_args.char_list
n_vocab = len(char_list)
lm = dynamic_import_lm(lm_name, backend="pytorch").build(n_vocab, **lm_args)
lm.to(device=device, dtype=dtype)
# test trainable
a = torch.randint(1, n_vocab, (3, 2), device=device)
b = torch.randint(1, n_vocab, (3, 2), device=device)
loss, logp, count = lm(a, b)
loss.backward()
for p in lm.parameters():
assert p.grad is not None
# test decodable
model.to(device=device, dtype=dtype).eval()
lm.eval()
scorers = model.scorers()
scorers["lm"] = lm
scorers["length_bonus"] = LengthBonus(len(char_list))
weights = dict(decoder=1.0, lm=1.0, length_bonus=1.0)
with torch.no_grad():
feat = x[0, : ilens[0]].to(device=device, dtype=dtype)
enc = model.encode(feat)
beam_size = 3
result = beam_search(
x=enc,
sos=model.sos,
eos=model.eos,
beam_size=beam_size,
vocab_size=len(train_args.char_list),
weights=weights,
scorers=scorers,
token_list=train_args.char_list,
)
assert len(result) >= beam_size
| 6,647 | 33.268041 | 88 | py |
espnet | espnet-master/test/test_e2e_tts_fastspeech.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import json
import os
import shutil
import tempfile
from argparse import Namespace
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_tts_fastspeech import FeedForwardTransformer
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import Tacotron2
from espnet.nets.pytorch_backend.e2e_tts_transformer import Transformer
from espnet.nets.pytorch_backend.fastspeech.duration_calculator import ( # noqa: H301
DurationCalculator,
)
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import pad_list
def prepare_inputs(
idim, odim, ilens, olens, spk_embed_dim=None, device=torch.device("cpu")
):
xs = [np.random.randint(0, idim, lg) for lg in ilens]
ys = [np.random.randn(lg, odim) for lg in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, lg in enumerate(olens):
labels[i, lg - 1 :] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
batch["spembs"] = torch.FloatTensor(
np.random.randn(len(ilens), spk_embed_dim)
).to(device)
return batch
def make_taco2_args(**kwargs):
defaults = dict(
model_module="espnet.nets.pytorch_backend.e2e_tts_tacotron2:Tacotron2",
use_speaker_embedding=False,
spk_embed_dim=None,
embed_dim=32,
elayers=1,
eunits=32,
econv_layers=2,
econv_filts=5,
econv_chans=32,
dlayers=2,
dunits=32,
prenet_layers=2,
prenet_units=32,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
output_activation=None,
atype="location",
adim=32,
aconv_chans=16,
aconv_filts=5,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
use_residual=False,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1,
threshold=0.5,
maxlenratio=5.0,
minlenratio=0.0,
use_cbhg=False,
spc_dim=None,
cbhg_conv_bank_layers=4,
cbhg_conv_bank_chans=32,
cbhg_conv_proj_filts=3,
cbhg_conv_proj_chans=32,
cbhg_highway_layers=4,
cbhg_highway_units=32,
cbhg_gru_units=32,
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_guided_attn_loss=False,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
)
defaults.update(kwargs)
return defaults
def make_transformer_args(**kwargs):
defaults = dict(
model_module="espnet.nets.pytorch_backend.e2e_tts_transformer:Transformer",
embed_dim=0,
spk_embed_dim=None,
eprenet_conv_layers=0,
eprenet_conv_filts=0,
eprenet_conv_chans=0,
dprenet_layers=2,
dprenet_units=64,
adim=32,
aheads=4,
elayers=2,
eunits=32,
dlayers=2,
dunits=32,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
eprenet_dropout_rate=0.1,
dprenet_dropout_rate=0.5,
postnet_dropout_rate=0.1,
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
spk_embed_integration_type="add",
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_batch_norm=True,
use_scaled_pos_enc=True,
encoder_normalize_before=True,
decoder_normalize_before=True,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
reduction_factor=1,
loss_type="L1",
use_guided_attn_loss=False,
num_heads_applied_guided_attn=2,
num_layers_applied_guided_attn=2,
guided_attn_loss_sigma=0.4,
modules_applied_guided_attn=["encoder", "decoder", "encoder-decoder"],
)
defaults.update(kwargs)
return defaults
def make_feedforward_transformer_args(**kwargs):
defaults = dict(
spk_embed_dim=None,
adim=32,
aheads=4,
elayers=2,
eunits=32,
dlayers=2,
dunits=32,
duration_predictor_layers=2,
duration_predictor_chans=64,
duration_predictor_kernel_size=3,
duration_predictor_dropout_rate=0.1,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
postnet_layers=0,
postnet_filts=5,
postnet_chans=32,
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
spk_embed_integration_type="add",
use_masking=True,
use_weighted_masking=False,
use_scaled_pos_enc=True,
encoder_normalize_before=True,
decoder_normalize_before=True,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
transfer_encoder_from_teacher=False,
transferred_encoder_module="all",
reduction_factor=1,
teacher_model=None,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"teacher_type, model_dict",
[
("transformer", {}),
("transformer", {"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
("transformer", {"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
("transformer", {"use_masking": False}),
("transformer", {"use_scaled_pos_enc": False}),
(
"transformer",
{"positionwise_layer_type": "conv1d", "positionwise_conv_kernel_size": 3},
),
(
"transformer",
{
"positionwise_layer_type": "conv1d-linear",
"positionwise_conv_kernel_size": 3,
},
),
("transformer", {"encoder_normalize_before": False}),
("transformer", {"decoder_normalize_before": False}),
(
"transformer",
{"encoder_normalize_before": False, "decoder_normalize_before": False},
),
("transformer", {"encoder_concat_after": True}),
("transformer", {"decoder_concat_after": True}),
("transformer", {"encoder_concat_after": True, "decoder_concat_after": True}),
("transformer", {"transfer_encoder_from_teacher": True}),
(
"transformer",
{
"transfer_encoder_from_teacher": True,
"transferred_encoder_module": "embed",
},
),
("transformer", {"use_masking": False}),
("transformer", {"use_masking": False, "use_weighted_masking": True}),
("transformer", {"postnet_layers": 2}),
("transformer", {"reduction_factor": 2}),
("transformer", {"reduction_factor": 3}),
("transformer", {"reduction_factor": 4}),
("transformer", {"reduction_factor": 5}),
("tacotron2", {}),
("tacotron2", {"spk_embed_dim": 16}),
("tacotron2", {"reduction_factor": 2}),
("tacotron2", {"reduction_factor": 3}),
("tacotron2", {"reduction_factor": 4}),
("tacotron2", {"reduction_factor": 5}),
],
)
def test_fastspeech_trainable_and_decodable(teacher_type, model_dict):
# make args
idim, odim = 10, 25
model_args = make_feedforward_transformer_args(**model_dict)
# setup batch
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"])
# define teacher model and save it
if teacher_type == "transformer":
teacher_model_args = make_transformer_args(**model_dict)
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
elif teacher_type == "tacotron2":
teacher_model_args = make_taco2_args(**model_dict)
teacher_model = Tacotron2(idim, odim, Namespace(**teacher_model_args))
else:
raise ValueError()
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, teacher_model_args),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
# define model
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(batch["xs"][0][: batch["ilens"][0]], None, spemb=spemb)
model.calculate_all_attentions(**batch)
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"teacher_type, model_dict",
[
("transformer", {}),
("transformer", {"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
("transformer", {"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
("transformer", {"use_masking": False}),
("transformer", {"use_masking": False, "use_weighted_masking": True}),
("transformer", {"use_scaled_pos_enc": False}),
("transformer", {"encoder_normalize_before": False}),
("transformer", {"decoder_normalize_before": False}),
(
"transformer",
{"encoder_normalize_before": False, "decoder_normalize_before": False},
),
("transformer", {"encoder_concat_after": True}),
("transformer", {"decoder_concat_after": True}),
("transformer", {"encoder_concat_after": True, "decoder_concat_after": True}),
("transformer", {"transfer_encoder_from_teacher": True}),
(
"transformer",
{
"transfer_encoder_from_teacher": True,
"transferred_encoder_module": "embed",
},
),
("tacotron2", {}),
("tacotron2", {"spk_embed_dim": 16}),
],
)
def test_fastspeech_gpu_trainable_and_decodable(teacher_type, model_dict):
# make args
idim, odim = 10, 25
model_args = make_feedforward_transformer_args(**model_dict)
# setup batch
ilens = [10, 5]
olens = [20, 15]
device = torch.device("cuda")
batch = prepare_inputs(
idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device
)
# define teacher model and save it
if teacher_type == "transformer":
teacher_model_args = make_transformer_args(**model_dict)
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
elif teacher_type == "tacotron2":
teacher_model_args = make_taco2_args(**model_dict)
teacher_model = Tacotron2(idim, odim, Namespace(**teacher_model_args))
else:
raise ValueError()
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, teacher_model_args),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
# define model
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(batch["xs"][0][: batch["ilens"][0]], None, spemb=spemb)
model.calculate_all_attentions(**batch)
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"teacher_type, model_dict",
[
("transformer", {}),
("transformer", {"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
("transformer", {"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
("transformer", {"use_masking": False}),
("transformer", {"use_masking": False, "use_weighted_masking": True}),
("transformer", {"use_scaled_pos_enc": False}),
("transformer", {"encoder_normalize_before": False}),
("transformer", {"decoder_normalize_before": False}),
(
"transformer",
{"encoder_normalize_before": False, "decoder_normalize_before": False},
),
("transformer", {"encoder_concat_after": True}),
("transformer", {"decoder_concat_after": True}),
("transformer", {"encoder_concat_after": True, "decoder_concat_after": True}),
("transformer", {"transfer_encoder_from_teacher": True}),
(
"transformer",
{
"transfer_encoder_from_teacher": True,
"transferred_encoder_module": "embed",
},
),
("tacotron2", {}),
("tacotron2", {"spk_embed_dim": 16}),
],
)
def test_fastspeech_multi_gpu_trainable(teacher_type, model_dict):
# make args
idim, odim = 10, 25
model_args = make_feedforward_transformer_args(**model_dict)
# setup batch
ilens = [10, 5]
olens = [20, 15]
device = torch.device("cuda")
batch = prepare_inputs(
idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device
)
# define teacher model and save it
if teacher_type == "transformer":
teacher_model_args = make_transformer_args(**model_dict)
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
elif teacher_type == "tacotron2":
teacher_model_args = make_taco2_args(**model_dict)
teacher_model = Tacotron2(idim, odim, Namespace(**teacher_model_args))
else:
raise ValueError()
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, teacher_model_args),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
# define model
ngpu = 2
device_ids = list(range(ngpu))
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
@pytest.mark.parametrize(
"model_dict",
[
({"transfer_encoder_from_teacher": True}),
(
{
"transfer_encoder_from_teacher": True,
"transferred_encoder_module": "embed",
}
),
({"transfer_encoder_from_teacher": True, "use_scaled_pos_enc": False}),
({"transfer_encoder_from_teacher": True, "encoder_normalize_before": False}),
({"transfer_encoder_from_teacher": True, "decoder_normalize_before": False}),
(
{
"transfer_encoder_from_teacher": True,
"encoder_normalize_before": False,
"decoder_normalize_before": False,
}
),
({"transfer_encoder_from_teacher": True, "encoder_concat_after": True}),
({"transfer_encoder_from_teacher": True, "decoder_concat_after": True}),
(
{
"transfer_encoder_from_teacher": True,
"encoder_concat_after": True,
"decoder_concat_after": True,
}
),
],
)
def test_initialization(model_dict):
# make args
idim, odim = 10, 25
teacher_model_args = make_transformer_args(**model_dict)
model_args = make_feedforward_transformer_args(**model_dict)
# define teacher model and save it
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, teacher_model_args),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
# define model
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
# check initialization
if model_args["transferred_encoder_module"] == "all":
for p1, p2 in zip(
model.encoder.parameters(), model.teacher.encoder.parameters()
):
np.testing.assert_array_equal(p1.data.cpu().numpy(), p2.data.cpu().numpy())
else:
np.testing.assert_array_equal(
model.encoder.embed[0].weight.data.cpu().numpy(),
model.teacher.encoder.embed[0].weight.data.cpu().numpy(),
)
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def test_length_regulator():
# prepare inputs
idim = 5
ilens = [10, 5, 3]
xs = pad_list([torch.randn((ilen, idim)) for ilen in ilens], 0.0)
ds = pad_list([torch.arange(ilen) for ilen in ilens], 0)
# test with non-zero durations
length_regulator = LengthRegulator()
xs_expand = length_regulator(xs, ds)
assert int(xs_expand.shape[1]) == int(ds.sum(dim=-1).max())
# test with duration including zero
ds[:, 2] = 0
xs_expand = length_regulator(xs, ds)
assert int(xs_expand.shape[1]) == int(ds.sum(dim=-1).max())
def test_duration_calculator():
# define duration calculator
idim, odim = 10, 25
teacher_model_args = make_transformer_args()
teacher = Transformer(idim, odim, Namespace(**teacher_model_args))
duration_calculator = DurationCalculator(teacher)
# setup batch
ilens = [10, 5, 3]
olens = [20, 15, 10]
batch = prepare_inputs(idim, odim, ilens, olens)
# calculate durations
ds = duration_calculator(batch["xs"], batch["ilens"], batch["ys"], batch["olens"])
np.testing.assert_array_equal(
ds.sum(dim=-1).cpu().numpy(), batch["olens"].cpu().numpy()
)
@pytest.mark.parametrize(
"alpha",
[(1.0), (0.5), (2.0)],
)
def test_fastspeech_inference(alpha):
# make args
idim, odim = 10, 25
model_args = make_feedforward_transformer_args()
# setup batch
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"])
# define model
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
# test inference
inference_args = Namespace(**{"fastspeech_alpha": alpha})
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]],
inference_args,
spemb=spemb,
)
| 21,140 | 32.398104 | 87 | py |
espnet | espnet-master/test/test_distributed_launch.py | # coding: utf-8
#
# SPDX-FileCopyrightText:
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import itertools
import os
import sys
from multiprocessing import Queue
import pytest
from espnet.distributed.pytorch_backend.launch import WorkerError, launch
@pytest.mark.parametrize("nprocs", [1, 2])
@pytest.mark.execution_timeout(4.0)
def test_simple_function_ok(nprocs):
args = None
def simple_func(args):
# NOP
return 0
launch(simple_func, args, nprocs)
@pytest.mark.parametrize("nprocs", [1, 2])
@pytest.mark.execution_timeout(4.0)
def test_simple_function_ok_with_args(nprocs):
args = argparse.Namespace(**{f"param{v}": v for v in range(10)})
def simple_func(args):
args_dict = vars(args)
for v in range(10):
key = f"param{v}"
assert key in args_dict
assert args_dict[key] == v
return 0
launch(simple_func, args, nprocs)
@pytest.mark.parametrize("nprocs", [1, 2])
@pytest.mark.execution_timeout(4.0)
def test_simple_function_ok_with_right_envvar(nprocs):
queue = Queue()
def simple_func(queue):
worldsize = os.environ.get("WORLD_SIZE", None)
rank = os.environ.get("RANK", None)
localrank = os.environ.get("LOCAL_RANK", None)
assert worldsize is not None
assert rank is not None
assert localrank is not None
queue.put(
{
"worldsize": int(worldsize),
"rank": int(rank),
"localrank": int(localrank),
}
)
return 0
launch(simple_func, queue, nprocs)
results = [queue.get() for _ in range(nprocs)]
pids = set(range(nprocs))
for r in results:
worldsize = r["worldsize"]
rank = r["rank"]
localrank = r["localrank"]
assert worldsize == nprocs
assert rank in pids
assert localrank in pids
pids.remove(rank)
assert len(pids) == 0
assert queue.empty()
@pytest.mark.parametrize(
"nprocs, exitcode",
[
(1, 1),
(2, 1),
(1, 2),
(2, 2),
],
)
@pytest.mark.execution_timeout(10.0)
def test_worker_exits_nonzero_code_ng(nprocs, exitcode):
for combination in itertools.product(range(2), repeat=nprocs):
n_activated = sum(combination)
if n_activated != 1 and n_activated != nprocs:
# skip.
continue
if n_activated == 1:
exit_idx = combination.index(1)
else:
exit_idx = None
args = None
def simple_func(args):
# NOP
rank = os.environ.get("RANK", None)
assert rank is not None
rank = int(rank)
if n_activated == 1 and rank != exit_idx:
return
sys.exit(exitcode)
with pytest.raises(WorkerError) as excinfo:
launch(simple_func, args, nprocs)
assert excinfo.value.exitcode == exitcode
if n_activated == 1:
assert excinfo.value.worker_id == exit_idx
@pytest.mark.parametrize("nprocs", [1, 2])
@pytest.mark.execution_timeout(10.0)
def test_worker_raises_exception_ng(nprocs):
for combination in itertools.product(range(2), repeat=nprocs):
n_activated = sum(combination)
if n_activated != 1 and n_activated != nprocs:
# skip.
continue
if n_activated == 1:
exit_idx = combination.index(1)
else:
exit_idx = None
args = None
def simple_func(args):
# NOP
rank = os.environ.get("RANK", None)
assert rank is not None
rank = int(rank)
if n_activated == 1 and rank != exit_idx:
return
raise RuntimeError("error")
with pytest.raises(WorkerError) as excinfo:
launch(simple_func, args, nprocs)
assert excinfo.value.exitcode == 1
if n_activated == 1:
assert excinfo.value.worker_id == exit_idx
| 4,110 | 26.225166 | 76 | py |
espnet | espnet-master/test/test_e2e_st_transformer.py | # coding: utf-8
# Copyright 2019 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_st_transformer import E2E
from espnet.nets.pytorch_backend.transformer import plot
def make_arg(**kwargs):
defaults = dict(
adim=2,
aheads=1,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=True,
report_bleu=False,
report_cer=False,
report_wer=False,
mtlalpha=0.0, # for CTC-ASR
lsm_weight=0.001,
char_list=["<blank>", "a", "e", "i", "o", "u"],
ctc_type="builtin",
asr_weight=0.0,
mt_weight=0.0,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare(args):
idim = 10
odim = 5
model = E2E(idim, odim, args)
batchsize = 2
ilens = [10, 9]
olens = [3, 4]
n_token = odim - 1
x = torch.randn(batchsize, max(ilens), idim)
y_src = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
y_tgt = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
for i in range(batchsize):
x[i, ilens[i] :] = -1
y_tgt[i, olens[i] :] = model.ignore_id
y_src[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, x, torch.tensor(ilens), y_tgt, y_src, data, uttid_list
ldconv_lconv_args = dict(
transformer_decoder_selfattn_layer_type="lightconv",
transformer_encoder_selfattn_layer_type="lightconv",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_dconv_args = dict(
transformer_decoder_selfattn_layer_type="dynamicconv",
transformer_encoder_selfattn_layer_type="dynamicconv",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_lconv2d_args = dict(
transformer_decoder_selfattn_layer_type="lightconv2d",
transformer_encoder_selfattn_layer_type="lightconv2d",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
ldconv_dconv2d_args = dict(
transformer_decoder_selfattn_layer_type="dynamicconv2d",
transformer_encoder_selfattn_layer_type="dynamicconv2d",
wshare=2,
ldconv_encoder_kernel_length="5_7_11",
ldconv_decoder_kernel_length="3_7",
ldconv_usebias=False,
)
def _savefn(*args, **kwargs):
return
@pytest.mark.parametrize(
"model_dict",
[
{"asr_weight": 0.0, "mt_weight": 0.0}, # pure E2E-ST
ldconv_lconv_args,
ldconv_dconv_args,
ldconv_lconv2d_args,
ldconv_dconv2d_args,
# MTL w/ attention ASR
{"asr_weight": 0.1, "mtlalpha": 0.0, "mt_weight": 0.0},
# MTL w/ attention ASR + MT
{"asr_weight": 0.1, "mtlalpha": 0.0, "mt_weight": 0.1},
# MTL w/ CTC ASR
{"asr_weight": 0.1, "mtlalpha": 1.0, "mt_weight": 0.0},
{"asr_weight": 0.1, "mtlalpha": 1.0, "ctc_type": "builtin"},
{"asr_weight": 0.1, "mtlalpha": 1.0, "report_cer": True},
{"asr_weight": 0.1, "mtlalpha": 1.0, "report_wer": True},
{"asr_weight": 0.1, "mtlalpha": 1.0, "report_cer": True, "report_wer": True},
# MTL w/ CTC ASR + MT
{"asr_weight": 0.1, "mtlalpha": 1.0, "mt_weight": 0.1},
# MTL w/ attention ASR + CTC ASR
{"asr_weight": 0.1, "mtlalpha": 0.5, "mt_weight": 0.0},
# MTL w/ attention ASR + CTC ASR + MT
{"asr_weight": 0.1, "mtlalpha": 0.5, "mt_weight": 0.1},
],
)
def test_transformer_trainable_and_decodable(model_dict):
args = make_arg(**model_dict)
model, x, ilens, y_tgt, y_src, data, uttid_list = prepare(args)
# test beam search
trans_args = argparse.Namespace(
beam_size=1,
penalty=0.0,
ctc_weight=0.0,
maxlenratio=1.0,
lm_weight=0,
minlenratio=0,
nbest=1,
tgt_lang=False,
)
# test trainable
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(x, ilens, y_tgt, y_src)
optim.zero_grad()
loss.backward()
optim.step()
# test attention plot
attn_dict = model.calculate_all_attentions(
x[0:1], ilens[0:1], y_tgt[0:1], y_src[0:1]
)
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test CTC plot
ctc_probs = model.calculate_all_ctc_probs(
x[0:1], ilens[0:1], y_tgt[0:1], y_src[0:1]
)
if args.asr_weight > 0 and args.mtlalpha > 0:
print(ctc_probs.shape)
else:
assert ctc_probs is None
# test decodable
with torch.no_grad():
nbest = model.translate(x[0, : ilens[0]].numpy(), trans_args, args.char_list)
print(y_tgt[0])
print(nbest[0]["yseq"][1:-1])
| 5,352 | 28.738889 | 85 | py |
espnet | espnet-master/test/test_torch.py | # Copyright 2017 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
def test_pad_list():
xs = [[1, 2, 3], [1, 2], [1, 2, 3, 4]]
xs = list(map(lambda x: torch.LongTensor(x), xs))
xpad = pad_list(xs, -1)
es = [[1, 2, 3, -1], [1, 2, -1, -1], [1, 2, 3, 4]]
assert xpad.data.tolist() == es
def test_bmm_attention():
b, t, h = 3, 2, 5
enc_h = torch.randn(b, t, h)
w = torch.randn(b, t)
naive = torch.sum(enc_h * w.view(b, t, 1), dim=1)
# (b, 1, t) x (b, t, h) -> (b, 1, h)
fast = torch.matmul(w.unsqueeze(1), enc_h).squeeze(1)
import numpy
numpy.testing.assert_allclose(naive.numpy(), fast.numpy(), 1e-6, 1e-6)
def test_eye_bool_dtype():
assert torch.eye(2, dtype=torch.bool).dtype == torch.bool
| 852 | 26.516129 | 74 | py |
espnet | espnet-master/test/test_initialization.py | # Copyright 2017 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import os
import random
import numpy
import torch
args = argparse.Namespace(
elayers=4,
subsample="1_2_2_1_1",
etype="vggblstmp",
eunits=320,
eprojs=320,
dtype="lstm",
dlayers=2,
dunits=300,
atype="location",
aconv_chans=10,
aconv_filts=100,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=320,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
beam_size=3,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
verbose=True,
char_list=["あ", "い", "う", "え", "お"],
outdir=None,
seed=1,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False,
)
def test_lecun_init_torch():
nseed = args.seed
random.seed(nseed)
torch.manual_seed(nseed)
numpy.random.seed(nseed)
os.environ["CHAINER_SEED"] = str(nseed)
import espnet.nets.pytorch_backend.e2e_asr as m
model = m.E2E(40, 5, args)
b = model.ctc.ctc_lo.bias.data.numpy()
assert numpy.all(b == 0.0)
w = model.ctc.ctc_lo.weight.data.numpy()
numpy.testing.assert_allclose(w.mean(), 0.0, 1e-2, 1e-2)
numpy.testing.assert_allclose(w.var(), 1.0 / w.shape[1], 1e-2, 1e-2)
for name, p in model.named_parameters():
print(name)
data = p.data.numpy()
if "embed" in name:
numpy.testing.assert_allclose(data.mean(), 0.0, 5e-2, 5e-2)
numpy.testing.assert_allclose(data.var(), 1.0, 5e-2, 5e-2)
elif "dec.decoder.0.bias_ih" in name:
assert data.sum() == data.size // 4
elif "dec.decoder.1.bias_ih" in name:
assert data.sum() == data.size // 4
elif data.ndim == 1:
assert numpy.all(data == 0.0)
else:
numpy.testing.assert_allclose(data.mean(), 0.0, 5e-2, 5e-2)
numpy.testing.assert_allclose(
data.var(), 1.0 / numpy.prod(data.shape[1:]), 5e-2, 5e-2
)
def test_lecun_init_chainer():
nseed = args.seed
random.seed(nseed)
numpy.random.seed(nseed)
os.environ["CHAINER_SEED"] = str(nseed)
import espnet.nets.chainer_backend.e2e_asr as m
model = m.E2E(40, 5, args)
b = model.ctc.ctc_lo.b.data
assert numpy.all(b == 0.0)
w = model.ctc.ctc_lo.W.data
numpy.testing.assert_allclose(w.mean(), 0.0, 1e-2, 1e-2)
numpy.testing.assert_allclose(w.var(), 1.0 / w.shape[1], 1e-2, 1e-2)
for name, p in model.namedparams():
print(name)
data = p.data
if "rnn0/upward/b" in name:
assert data.sum() == data.size // 4
elif "rnn1/upward/b" in name:
assert data.sum() == data.size // 4
elif "embed" in name:
numpy.testing.assert_allclose(data.mean(), 0.0, 5e-2, 5e-2)
numpy.testing.assert_allclose(data.var(), 1.0, 5e-2, 5e-2)
elif data.ndim == 1:
assert numpy.all(data == 0.0)
else:
numpy.testing.assert_allclose(data.mean(), 0.0, 5e-2, 5e-2)
numpy.testing.assert_allclose(
data.var(), 1.0 / numpy.prod(data.shape[1:]), 5e-2, 5e-2
)
| 3,380 | 28.4 | 72 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.