code
stringlengths
17
6.64M
def _download_lsun(out_dir, category, set_name, tag): url = 'http://lsun.cs.princeton.edu/htbin/download.cgi?tag={tag}&category={category}&set={set_name}'.format(**locals()) print(url) if (set_name == 'test'): out_name = 'test_lmdb.zip' else: out_name = '{category}_{set_name}_lmdb.zip'.format(**locals()) out_path = os.path.join(out_dir, out_name) cmd = ['curl', url, '-o', out_path] print('Downloading', category, set_name, 'set') subprocess.call(cmd)
def download_lsun(dirpath): data_dir = os.path.join(dirpath, 'lsun') if os.path.exists(data_dir): print('Found LSUN - skip') return else: os.mkdir(data_dir) tag = 'latest' categories = ['bedroom'] for category in categories: _download_lsun(data_dir, category, 'train', tag) _download_lsun(data_dir, category, 'val', tag) _download_lsun(data_dir, '', 'test', tag)
def download_mnist(dirpath): data_dir = os.path.join(dirpath, 'mnist') if os.path.exists(data_dir): print('Found MNIST - skip') return else: os.mkdir(data_dir) url_base = 'http://yann.lecun.com/exdb/mnist/' file_names = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] for file_name in file_names: url = (url_base + file_name).format(**locals()) print(url) out_path = os.path.join(data_dir, file_name) cmd = ['curl', url, '-o', out_path] print('Downloading ', file_name) subprocess.call(cmd) cmd = ['gzip', '-d', out_path] print('Decompressing ', file_name) subprocess.call(cmd)
def prepare_data_dir(path='./data'): if (not os.path.exists(path)): os.mkdir(path)
def main(_): pp.pprint(flags.FLAGS.__flags) if (FLAGS.input_width is None): FLAGS.input_width = FLAGS.input_height if (FLAGS.output_width is None): FLAGS.output_width = FLAGS.output_height if (not os.path.exists(FLAGS.checkpoint_dir)): os.makedirs(FLAGS.checkpoint_dir) if (not os.path.exists(FLAGS.sample_dir)): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) elif (not dcgan.load(FLAGS.checkpoint_dir)[0]): raise Exception('[!] Train a model first, then run test mode') OPTION = FLAGS.options visualize(sess, dcgan, FLAGS, OPTION, FLAGS.gan_set)
class batch_norm(object): def __init__(self, epsilon=1e-05, momentum=0.9, name='batch_norm'): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x, train=True): return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=train, scope=self.name)
def conv_cond_concat(x, y): 'Concatenate conditioning vector on feature map axis.' x_shapes = x.get_shape() y_shapes = y.get_shape() return concat([x, (y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]]))], 3)
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d'): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv2d', with_w=False): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.random_normal_initializer(stddev=stddev)) try: deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) except AttributeError: deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) if with_w: return (deconv, w, biases) else: return deconv
def lrelu(x, leak=0.2, name='lrelu'): return tf.maximum(x, (leak * x))
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope((scope or 'Linear')): matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return ((tf.matmul(input_, matrix) + bias), matrix, bias) else: return (tf.matmul(input_, matrix) + bias)
def create_dataset(file_path): with h5py.File(file_path, 'r') as f, h5py.File('cuhk-03.h5') as fw: val_index = (f[f['testsets'][0][0]][:].T - 1).tolist() tes_index = (f[f['testsets'][0][1]][:].T - 1).tolist() fwa = fw.create_group('a') fwb = fw.create_group('b') fwat = fwa.create_group('train') fwav = fwa.create_group('validation') fwae = fwa.create_group('test') fwbt = fwb.create_group('train') fwbv = fwb.create_group('validation') fwbe = fwb.create_group('test') temp = [] count_t = 0 count_v = 0 count_e = 0 for i in range(3): for k in range(f[f['labeled'][0][i]][0].size): print(i, k) if ([i, k] in val_index): for j in range(5): if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3): temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0)) fwav.create_dataset(str(count_v), data=np.array(temp)) temp = [] for j in range(5, 10): if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3): temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0)) fwbv.create_dataset(str(count_v), data=np.array(temp)) temp = [] count_v += 1 if ([i, k] in tes_index): for j in range(5): if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3): temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0)) fwae.create_dataset(str(count_e), data=np.array(temp)) temp = [] for j in range(5, 10): if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3): temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0)) fwbe.create_dataset(str(count_e), data=np.array(temp)) temp = [] count_e += 1 if (([i, k] not in val_index) and ([i, k] not in tes_index)): for j in range(5): if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3): temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0)) fwat.create_dataset(str(count_t), data=np.array(temp)) temp = [] for j in range(5, 10): if (len(f[f[f['labeled'][0][i]][j][k]].shape) == 3): temp.append((np.array(Image.fromarray(f[f[f['labeled'][0][i]][j][k]][:].transpose(2, 1, 0)).resize((60, 160))) / 255.0)) fwbt.create_dataset(str(count_t), data=np.array(temp)) temp = [] count_t += 1
class DataGenerator(Dataset): def __init__(self, root, data_transform=None, image_dir=None, target_transform=None): super(DataGenerator, self).__init__() assert (image_dir is not None) self.image_dir = image_dir self.samples = [] self.img_label = [] self.img_flag = [] self.data_transform = data_transform self.target_transform = target_transform self.train_val = root if (root == 'train_new'): for folder in os.listdir(self.image_dir): fdir = ((self.image_dir + '/') + folder) if (folder == 'gen_0000'): for files in os.listdir(fdir): temp = ((folder + '_') + files) self.img_label.append(int(folder[(- 4):])) self.img_flag.append(1) self.samples.append(temp) else: for files in os.listdir(fdir): temp = ((folder + '_') + files) self.img_label.append(int(folder)) self.img_flag.append(0) self.samples.append(temp) else: for folder in os.listdir(self.image_dir): fdir = ((self.image_dir + '/') + folder) for files in os.listdir(fdir): temp = ((folder + '_') + files) self.img_label.append(int(folder)) self.img_flag.append(0) self.samples.append(temp) def __len__(self): return len(self.samples) def __getitem__(self, idx): temp = self.samples[idx] if (self.img_flag[idx] == 1): foldername = 'gen_0000' filename = temp[9:] else: foldername = temp[:4] filename = temp[5:] img = default_loader(((((self.image_dir + '/') + foldername) + '/') + filename)) if (self.train_val == 'train_new'): result = {'img': self.data_transform(img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]} else: result = {'img': self.data_transform(img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]} return result
class Dataset(): def __init__(self, root='/home/paul/datasets', dataset='market1501'): self.dataset = dataset self.root = root def train_path(self): if ((self.dataset == 'market1501') or (self.dataset == 'duke')): return os.path.join(self.root, self.dataset, 'bounding_box_train') elif (self.dataset == 'cuhk03'): return os.path.join(self.root, self.dataset, 'bounding_box_train') elif (self.dataset == 'viper'): return os.path.join(self.root, self.dataset, 'bounding_box_train') else: raise ValueError(('Unknown train set for %s' % self.dataset)) def test_path(self): if ((self.dataset == 'market1501') or (self.dataset == 'duke')): return os.path.join(self.root, self.dataset, 'bounding_box_test') elif ((self.dataset == 'cuhk03') or (self.dataset == 'viper')): return os.path.join(self.root, self.dataset, 'bounding_box_test') else: raise ValueError(('Unknown test set for %s' % self.dataset)) def gallery_path(self): return self.testset() def query_path(self): if ((self.dataset == 'market1501') or (self.dataset == 'duke')): return os.path.join(self.root, self.dataset, 'query') elif ((self.dataset == 'cuhk03') or (self.dataset == 'viper')): return os.path.join(self.root, self.dataset, 'query') else: raise ValueError(('Unknown query set for %s' % self.dataset)) def gan_path(self): return os.path.join('/home/paul/generated', self.dataset) def dataset_path(self): return os.path.join(self.root, self.dataset) def n_classe(self): if (self.dataset == 'market1501'): return 751 elif (self.dataset == 'duke'): return 702 elif (self.dataset == 'cuhk03'): return 767 elif (self.dataset == 'viper'): return 316 else: raise ValueError(('Unknown n_classe set for %s' % self.dataset)) def root_path(self): return self.root def gt_set(self): if (self.dataset == 'market1501'): return os.path.join(self.root, self.dataset, 'gt_bbox') else: raise ValueError(('Unknown hand-drawn bounding boxes for %s' % self.dataset)) def train_list(self): if ((self.dataset == 'market1501') or (self.dataset == 'duke') or (self.dataset == 'cuhk03')): train_list = os.path.join(self.root, self.dataset, 'train.list') elif (self.dataset == 'viper'): train_list = os.path.join(self.root, self.dataset, 'train.list') else: raise ValueError(('Unknown train bounding boxes for %s' % self.dataset)) if (not os.path.exists(train_list)): raise FileNotFoundError(('%s not found' % train_list)) return train_list def cluster_path(self): if ((self.dataset == 'market1501') or (self.dataset == 'duke') or (self.dataset == 'cuhk03') or (self.dataset == 'viper')): return os.path.join('/home/paul', 'clustering', self.dataset) else: raise ValueError(('Unknown cluster path for %s' % self.dataset)) def n_training_set(self): if (self.dataset == 'market1501'): data_list = glob.glob(os.path.join(self.train_path(), '*.jpg')) n = len(data_list) assert (n == 12936) elif (self.dataset == 'duke'): n = 16522 else: raise ValueError(('Unknow training set size for %s' % self.dataset)) return n def n_gan_set(self): if (self.dataset == 'market1501'): data_list = glob.glob(os.path.join(self.gan_path(), '*.jpg')) n = len(data_list) else: raise ValueError(('Unknow generated set size for %s' % self.dataset)) return n def test_num(self): if (self.dataset == 'market1501'): return 19732 elif (self.dataset == 'duke'): return 17661 elif (self.dataset == 'cuhk03'): return 6751 elif (self.dataset == 'viper'): return 316 else: raise ValueError(('Unknown test num for % dataset' % self.dataset)) def query_num(self): if (self.dataset == 'market1501'): return 3368 elif (self.dataset == 'duke'): return 2228 elif (self.dataset == 'cuhk03'): return 6751 elif (self.dataset == 'viper'): return 316 else: raise ValueError(('Unknown query num for % dataset' % self.dataset))
def read_image(img_path): 'Keep reading image until succeed.\n This can avoid IOError incurred by heavy IO process.' got_img = False if (not osp.exists(img_path)): raise IOError('{} does not exist'.format(img_path)) while (not got_img): try: img = Image.open(img_path).convert('RGB') got_img = True except IOError: print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path)) pass return img
class ImageDataset(Dataset): 'Image Person ReID Dataset' def __init__(self, dataset, transform=None): self.dataset = dataset self.transform = transform def __len__(self): return len(self.dataset) def __getitem__(self, index): (img_path, pid, camid) = self.dataset[index] img = read_image(img_path) if (self.transform is not None): img = self.transform(img) return (img, pid, camid)
class VideoDataset(Dataset): 'Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n ' sample_methods = ['evenly', 'random', 'all'] def __init__(self, dataset, seq_len=15, sample='evenly', transform=None): self.dataset = dataset self.seq_len = seq_len self.sample = sample self.transform = transform def __len__(self): return len(self.dataset) def __getitem__(self, index): (img_paths, pid, camid) = self.dataset[index] num = len(img_paths) if (self.sample == 'random'): '\n Randomly sample seq_len items from num items,\n if num is smaller than seq_len, then replicate items\n ' indices = np.arange(num) replace = (False if (num >= self.seq_len) else True) indices = np.random.choice(indices, size=self.seq_len, replace=replace) indices = np.sort(indices) elif (self.sample == 'evenly'): 'Evenly sample seq_len items from num items.' if (num >= self.seq_len): num -= (num % self.seq_len) indices = np.arange(0, num, (num / self.seq_len)) else: indices = np.arange(0, num) num_pads = (self.seq_len - num) indices = np.concatenate([indices, (np.ones(num_pads).astype(np.int32) * (num - 1))]) assert (len(indices) == self.seq_len) elif (self.sample == 'all'): '\n Sample all items, seq_len is useless now and batch_size needs\n to be set to 1.\n ' indices = np.arange(num) else: raise KeyError('Unknown sample method: {}. Expected one of {}'.format(self.sample, self.sample_methods)) imgs = [] for index in indices: img_path = img_paths[index] img = read_image(img_path) if (self.transform is not None): img = self.transform(img) img = img.unsqueeze(0) imgs.append(img) imgs = torch.cat(imgs, dim=0) return (imgs, pid, camid)
def evaluate(qf, ql, qc, gf, gl, gc): query = qf score = np.dot(gf, query) index = np.argsort(score) index = index[::(- 1)] query_index = np.argwhere((gl == ql)) camera_index = np.argwhere((gc == qc)) good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) junk_index1 = np.argwhere((gl == (- 1))) junk_index2 = np.intersect1d(query_index, camera_index) junk_index = np.append(junk_index2, junk_index1) CMC_tmp = compute_mAP(index, good_index, junk_index) return CMC_tmp
def compute_mAP(index, good_index, junk_index): ap = 0 cmc = torch.IntTensor(len(index)).zero_() if (good_index.size == 0): cmc[0] = (- 1) return (ap, cmc) mask = np.in1d(index, junk_index, invert=True) index = index[mask] ngood = len(good_index) mask = np.in1d(index, good_index) rows_good = np.argwhere((mask == True)) rows_good = rows_good.flatten() cmc[rows_good[0]:] = 1 for i in range(ngood): d_recall = (1.0 / ngood) precision = (((i + 1) * 1.0) / (rows_good[i] + 1)) if (rows_good[i] != 0): old_precision = ((i * 1.0) / rows_good[i]) else: old_precision = 1.0 ap = (ap + ((d_recall * (old_precision + precision)) / 2)) return (ap, cmc)
def weights_init_kaiming(m): classname = m.__class__.__name__ if (classname.find('Conv') != (- 1)): init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif (classname.find('Linear') != (- 1)): init.kaiming_normal_(m.weight.data, a=0, mode='fan_out') init.constant_(m.bias.data, 0.0) elif (classname.find('BatchNorm1d') != (- 1)): init.normal_(m.weight.data, 1.0, 0.02) init.constant_(m.bias.data, 0.0)
def weights_init_classifier(m): classname = m.__class__.__name__ if (classname.find('Linear') != (- 1)): init.normal_(m.weight.data, std=0.001) init.constant_(m.bias.data, 0.0)
class ClassBlock(nn.Module): def __init__(self, input_dim, class_num, dropout=True, relu=True, num_bottleneck=512): super(ClassBlock, self).__init__() add_block = [] add_block += [nn.Linear(input_dim, num_bottleneck)] add_block += [nn.BatchNorm1d(num_bottleneck)] if relu: add_block += [nn.LeakyReLU(0.1)] if dropout: add_block += [nn.Dropout(p=0.5)] add_block = nn.Sequential(*add_block) add_block.apply(weights_init_kaiming) classifier = [] classifier += [nn.Linear(num_bottleneck, class_num)] classifier = nn.Sequential(*classifier) classifier.apply(weights_init_classifier) self.add_block = add_block self.classifier = classifier def forward(self, x): x = self.add_block(x) x = self.classifier(x) return x
class ft_net(nn.Module): def __init__(self, class_num): super(ft_net, self).__init__() model_ft = models.resnet50(pretrained=True) model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.model = model_ft self.classifier = ClassBlock(2048, class_num) def forward(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = self.model.avgpool(x) x = torch.squeeze(x) x = self.classifier(x) return x
class ft_net_dense(nn.Module): def __init__(self, class_num): super().__init__() model_ft = models.densenet121(pretrained=True) model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1, 1)) model_ft.fc = nn.Sequential() self.model = model_ft self.classifier = ClassBlock(1024, class_num) def forward(self, x): x = self.model.features(x) x = torch.squeeze(x) x = self.classifier(x) return x
class ft_net_middle(nn.Module): def __init__(self, class_num): super(ft_net_middle, self).__init__() model_ft = models.resnet50(pretrained=True) model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.model = model_ft self.classifier = ClassBlock((2048 + 1024), class_num) def forward(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x0 = self.model.avgpool(x) x = self.model.layer4(x) x1 = self.model.avgpool(x) x = torch.cat((x0, x1), 1) x = torch.squeeze(x) x = self.classifier(x) return x
def generate_labels_for_gan(): image_labels = {} f = open('/home/paul/datasets/viper/train.list', 'r') old_lbl = (- 1) for line in f: line = line.strip() (img, lbl) = line.split() lbl = int(lbl) if (lbl != old_lbl): splt = img.split('_') image_labels[splt[0]] = int(lbl) old_lbl = lbl f.close() for n_cluster in range(N_CLUSTER): cluster_path = os.path.join(dataset.cluster_path(), ('cluster_%s' % n_cluster)) cluster_labels = {} cluster_imgs = glob.glob(os.path.join(cluster_path, '*.jpg')) cluster_imgs = sorted(cluster_imgs) for img in cluster_imgs: img = os.path.basename(img) splt = img.split('_') try: cluster_labels[splt[0]] += 1 except KeyError: cluster_labels[splt[0]] = 1 f = open(os.path.join(dataset.cluster_path(), ('gan%s.list' % n_cluster)), 'w') for i in cluster_labels: print(i) f.write(('%s\n' % image_labels[i])) f.close() print(image_labels[i])
def load_gan(gan_path, n_gan_images): return get_gan_data(n_gan_images)
def get_gan_data(n_gan_images): images = dict() labels = [] for i in range(N_CLUSTER): f = open(os.path.join(dataset.cluster_path(), ('gan%s.list' % i)), 'r') tmp_labels = np.zeros(shape=dataset.n_classe(), dtype=np.int32) for line in f: lbl = line.strip() tmp_labels[int(lbl)] = 1 f.close() labels.append(tmp_labels) labels = np.array(labels) n_gan = int(np.floor(((n_gan_images / N_CLUSTER) + 1))) data_list = None for i in range(N_CLUSTER): gan_list = glob.glob(os.path.join(dataset.gan_path(), ('gan_%s*.jpg' % i))) gan_list = gan_list[:n_gan] if (data_list is None): data_list = gan_list else: data_list = np.concatenate((data_list, gan_list), axis=0) data_list = np.unique(data_list) np.random.shuffle(data_list) data_list = data_list[:n_gan_images] assert (len(data_list) == n_gan_images) for (i, filename) in enumerate(data_list): img_name = os.path.basename(filename) lbl = int(img_name.split('_')[1]) try: images[str(lbl)].append(img_name) except KeyError: images[str(lbl)] = list() images[str(lbl)].append(img_name) labels = np.array(labels) assert (np.sum([len(images[i]) for i in images]) == n_gan_images) print(images) assert (len(images) == labels.shape[0]) assert (labels.shape[1] == dataset.n_classe()) return (images, labels)
def copyfolder(src, dst): files = os.listdir(src) if (not os.path.isdir(dst)): os.mkdir(dst) for tt in files: copyfile(((src + '/') + tt), ((dst + '/') + tt))
class dcganDataset(Dataset): def __init__(self, root, transform=None, targte_transform=None): super(dcganDataset, self).__init__() self.image_dir = os.path.join(opt.data_dir, root) self.samples = [] self.img_label = [] self.img_flag = [] self.transform = transform self.targte_transform = targte_transform self.train_val = root if (root == 'train_new'): for folder in os.listdir(self.image_dir): fdir = ((self.image_dir + '/') + folder) if (folder == 'gen_0000'): (samples, img_labels, flags) = generated_images self.samples = (self.samples + samples) self.img_label = (self.img_label + img_labels) self.img_flag = (self.img_flag + flags) else: for files in os.listdir(fdir): temp = ((folder + '_') + files) lbl = int(folder) label_vec = np.zeros(shape=n_classes) label_vec[lbl] = 1 self.img_label.append(label_vec) self.img_flag.append(0) self.samples.append(temp) else: for folder in os.listdir(self.image_dir): fdir = ((self.image_dir + '/') + folder) for files in os.listdir(fdir): temp = ((folder + '_') + files) lbl = int(folder) label_vec = np.zeros(shape=n_classes) label_vec[lbl] = 1 self.img_label.append(label_vec) self.img_flag.append(0) self.samples.append(temp) def __len__(self): return len(self.samples) def __getitem__(self, idx): temp = self.samples[idx] if (self.img_flag[idx] == 1): foldername = 'gen_0000' filename = temp[9:] else: foldername = temp[:4] filename = temp[5:] img = default_loader(((((self.image_dir + '/') + foldername) + '/') + filename)) if (self.train_val == 'train_new'): result = {'img': data_transforms['train'](img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]} else: result = {'img': data_transforms['val'](img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]} return result
class SLSloss(nn.Module): def __init__(self): super(SLSloss, self).__init__() def forward(self, input, target, flg): if (input.dim() > 2): input = input.view(input.size(0), input.size(1), (- 1)) input = input.transpose(1, 2) input = input.contiguous().view((- 1), input.size(2)) (maxRow, _) = torch.max(input.data, 1) maxRow = maxRow.unsqueeze(1) input.data = (input.data - maxRow) flg = flg.view((- 1), 1) flos = F.log_softmax(input) flos = (torch.sum(flos, 1) / flos.size(1)) logpt = F.log_softmax(input) logpt = torch.mul(logpt, target) logpt = torch.sum(logpt, 1, True) logpt = logpt.view((- 1)) flg = flg.view((- 1)) flg = flg.type(torch.cuda.FloatTensor) loss = ((((- 1) * logpt) * (1 - flg)) - (flos * flg)) return loss.mean()
def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = model.state_dict() best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, (num_epochs - 1))) print(('-' * 10)) for phase in ['train', 'val']: if (phase == 'train'): scheduler.step() model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: inputs = data['img'] labels = data['label'] flags = data['flag'] labels = labels.type(torch.cuda.FloatTensor) if use_gpu: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) flags = Variable(flags.cuda()) else: (inputs, labels, flags) = (Variable(inputs), Variable(labels), Variable(flags)) optimizer.zero_grad() outputs = model(inputs) (_, preds) = torch.max(outputs.data, 1) loss = criterion(outputs, labels, flags) if (phase == 'train'): loss.backward() optimizer.step() running_loss += loss.item() print('Loss {} '.format(loss.item())) for temp in range(flags.size()[0]): if (flags.data[temp] == 1): preds[temp] = (- 1) indices = torch.argmax(labels, dim=1) running_corrects += torch.sum((preds == indices.data)) epoch_loss = (running_loss / dataset_sizes[phase]) if (phase == 'train'): epoch_acc = (running_corrects / (dataset_sizes[phase] - generated_image_size)) else: epoch_acc = (running_corrects / dataset_sizes[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) y_loss[phase].append(epoch_loss) y_err[phase].append((1.0 - epoch_acc)) if (phase == 'val'): if (epoch_acc > best_acc): best_acc = epoch_acc best_model_wts = model.state_dict() if (epoch >= 40): save_network(model, epoch) print() time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_wts) save_network(model, 'best') return model
def save_network(network, epoch_label): save_filename = ('net_%s.pth' % epoch_label) save_path = os.path.join('./model', name, save_filename) torch.save(network.cpu().state_dict(), save_path) if torch.cuda.is_available: network.cuda(gpu_ids[0])
def load_network(network): save_path = os.path.join('./model', name, ('net_%s.pth' % opt.which_epoch)) network.load_state_dict(torch.load(save_path)) return network
def fliplr(img): 'flip horizontal' inv_idx = torch.arange((img.size(3) - 1), (- 1), (- 1)).long() img_flip = img.index_select(3, inv_idx) return img_flip
def extract_feature(model, dataloaders): features = torch.FloatTensor() count = 0 for data in dataloaders: (img, label) = data (n, c, h, w) = img.size() count += n print(count) if opt.use_dense: ff = torch.FloatTensor(n, 1024).zero_() else: ff = torch.FloatTensor(n, 2048).zero_() if opt.PCB: ff = torch.FloatTensor(n, 2048, 6).zero_() for i in range(2): if (i == 1): img = fliplr(img) input_img = Variable(img.cuda()) outputs = model(input_img) f = outputs.data.cpu() ff = (ff + f) if opt.PCB: fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) ff = ff.div(fnorm.expand_as(ff)) ff = ff.view(ff.size(0), (- 1)) else: fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) ff = ff.div(fnorm.expand_as(ff)) features = torch.cat((features, ff), 0) return features
def get_id(img_path): camera_id = [] labels = [] for (path, v) in img_path: filename = path.split('/')[(- 1)] label = filename[0:4] camera = filename.split('c')[1] if (label[0:2] == '-1'): labels.append((- 1)) else: labels.append(int(label)) camera_id.append(int(camera[0])) return (camera_id, labels)
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): (qf, q_pids, q_camids) = ([], [], []) for (batch_idx, (imgs, pids, camids)) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update((time.time() - end)) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1))) (gf, g_pids, g_camids) = ([], [], []) end = time.time() for (batch_idx, (imgs, pids, camids)) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update((time.time() - end)) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1))) print('==> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, 32)) (m, n) = (qf.size(0), gf.size(0)) distmat = (torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()) distmat.addmm_(1, (- 2), qf, gf.t()) distmat = distmat.numpy() result = {'distmat': distmat, 'q_pids': q_pids, 'g_pids': g_pids, 'q_camids': q_camids, 'g_camids': g_camids, 'query_feature': qf.numpy(), 'gallery_feature': gf.numpy()} print(qf.numpy()) print(gf.numpy()) scipy.io.savemat('./result.mat', result)
def load_network(network): save_path = os.path.join(opt.model_path) network.load_state_dict(torch.load(save_path)) return network
def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = model.state_dict() best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, (num_epochs - 1))) print(('-' * 10)) for phase in ['train', 'val']: if (phase == 'train'): scheduler.step() model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: (inputs, labels) = data if use_gpu: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) else: (inputs, labels) = (Variable(inputs), Variable(labels)) optimizer.zero_grad() outputs = model(inputs) (_, preds) = torch.max(outputs.data, 1) loss = criterion(outputs, labels) if (phase == 'train'): loss.backward() optimizer.step() running_loss += loss.item() print('Current Loss {}'.format(loss.item())) running_corrects += torch.sum((preds == labels.data)) epoch_loss = (running_loss / dataset_sizes[phase]) epoch_acc = (running_corrects / dataset_sizes[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) y_loss[phase].append(epoch_loss) y_err[phase].append((1.0 - epoch_acc)) if (phase == 'val'): last_model_wts = model.state_dict() save_network(model, epoch) print() time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) model.load_state_dict(last_model_wts) save_network(model, 'last') return model
def save_network(network, epoch_label): save_filename = ('net_%s.pth' % epoch_label) save_path = os.path.join('./model', name, save_filename) torch.save(network.cpu().state_dict(), save_path) if torch.cuda.is_available: network.cuda(gpu_ids[0])
def get_gan_data(generated_size, n_clusters=3, generated_dir=None): assert (generated_dir is not None) labels = [] for i in range(n_clusters): f = open(os.path.join('/home/paul/clustering', ('gan%s.list' % i)), 'r') tmp_labels = np.zeros(shape=n_classes, dtype=np.float) for line in f: lbl = line.strip() tmp_labels[int(lbl)] = 1.0 f.close() tmp_labels = (tmp_labels / np.sum(tmp_labels)) labels.append(tmp_labels) labels = np.array(labels) n_gan = int(np.floor(((generated_size / n_clusters) + 1))) data_list = None for i in range(n_clusters): gan_list = glob.glob(os.path.join(generated_dir, ('gan_%s*.jpg' % i))) gan_list = gan_list[:n_gan] if (data_list is None): data_list = gan_list else: data_list = np.concatenate((data_list, gan_list), axis=0) data_list = np.unique(data_list) np.random.shuffle(data_list) assert (data_list.shape[0] >= generated_size) data_list = data_list[:generated_size] img_labels = [] images = [] flags = [] for (i, filename) in enumerate(data_list): img_name = os.path.basename(filename) lbl = int(img_name.split('_')[1]) img_labels.append(labels[lbl]) temp = (('gen_0000' + '_') + img_name) images.append(temp) flags.append(1) assert (len(images) == generated_size) assert (len(images) == len(img_labels) == len(flags)) return (images, img_labels, flags)
class AverageMeter(object): 'Computes and stores the average and current value.\n\n Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262\n ' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
def read_json(fpath): with open(fpath, 'r') as f: obj = json.load(f) return obj
def mkdir_if_missing(directory): if (not os.path.exists(directory)): os.makedirs(directory)
def write_json(obj, fpath): mkdir_if_missing(os.path.dirname(fpath)) with open(fpath, 'w') as f: json.dump(obj, f, indent=4, separators=(',', ': '))
def main(args=None): if (args is None): parser = argparse.ArgumentParser() parser.add_argument('--game', type=str) parser.add_argument('--config', type=str, default='default') parser.add_argument('--seed', type=int, default=None) parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--buffer_device', type=str, default=None) parser.add_argument('--cpu_p', type=float, default=0.5) parser.add_argument('--wandb', type=str, default='disabled') parser.add_argument('--project', type=str, default=None) parser.add_argument('--group', type=str, default=None) parser.add_argument('--save', action='store_true', default=False) args = parser.parse_args() else: args = argparse.Namespace(**args) if (args.seed is not None): seed = args.seed random.seed(seed) torch.manual_seed(seed) np.random.seed(seed) torch.backends.cudnn.benchmark = True if __debug__: print('Running in debug mode, consider using the -O python flag to improve performance') wandb.require(experiment='service') buffer_device = (args.buffer_device if (args.buffer_device is not None) else args.device) config = deepcopy(CONFIGS[args.config]) config.update({'game': args.game, 'seed': args.seed, 'model_device': args.device, 'buffer_device': buffer_device, 'cpu_p': args.cpu_p, 'save': args.save}) wandb.init(config=config, project=args.project, group=args.group, mode=args.wandb) config = dict(wandb.config) trainer = Trainer(config) trainer.print_stats() try: trainer.run() finally: trainer.close()
def update_metrics(metrics, new_metrics, prefix=None): def process(key, t): if isinstance(t, (int, float)): return t assert torch.is_tensor(t), key assert (not t.requires_grad), key assert ((t.ndim == 0) or (t.shape == (1,))), key return t.clone() if (prefix is None): metrics.update({key: process(key, value) for (key, value) in new_metrics.items()}) else: metrics.update({f'{prefix}{key}': process(key, value) for (key, value) in new_metrics.items()}) return metrics
def combine_metrics(metrics, prefix=None): result = {} if (prefix is None): for met in metrics: update_metrics(result, met) else: for (met, pre) in zip(metrics, prefix): update_metrics(result, met, pre) return result
def mean_metrics(metrics_history, except_keys=None): if (len(metrics_history) == 0): return {} if (len(metrics_history) == 1): return metrics_history[0] except_keys = (set() if (except_keys is None) else set(except_keys)) result = {} value_history = collections.defaultdict((lambda : [])) for metrics in metrics_history: for (key, value) in metrics.items(): if ((key in except_keys) or isinstance(value, WBValue)): result[key] = value else: value_history[key].append(value) result.update({key: compute_mean(values) for (key, values) in value_history.items()}) return result
class MetricsSummarizer(): def __init__(self, except_keys=None): self.metrics_history = [] self.except_keys = (set() if (except_keys is None) else set(except_keys)) def append(self, metrics): self.metrics_history.append(metrics) def summarize(self): summary = mean_metrics(self.metrics_history, except_keys=self.except_keys) self.metrics_history = [] return summary
def compute_mean(values): if torch.is_tensor(values): return values.float().mean() if isinstance(values, (tuple, list)): return torch.stack([torch.as_tensor(x).detach() for x in values]).float().mean() raise ValueError()
def random_choice(n, num_samples, replacement=False, device=None): if replacement: return torch.randint(0, n, (num_samples,), device=device) weights = torch.ones(n, device=device) return torch.multinomial(weights, num_samples, replacement=False)
def windows(x, window_size, window_stride=1): x = x.unfold(1, window_size, window_stride) dims = list(range(x.ndim))[:(- 1)] dims.insert(2, (x.ndim - 1)) x = x.permute(dims) return x
def same_batch_shape(tensors, ndim=2): batch_shape = tensors[0].shape[:ndim] assert all(((t.ndim >= ndim) for t in tensors)) return all(((tensors[i].shape[:ndim] == batch_shape) for i in range(1, len(tensors))))
def same_batch_shape_time_offset(a, b, offset): assert ((a.ndim >= 2) and (b.ndim >= 2)) return (a.shape[:2] == (b.shape[0], (b.shape[1] + offset)))
def check_no_grad(*tensors): return all((((t is None) or (not t.requires_grad)) for t in tensors))
class AdamOptim(): def __init__(self, parameters, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, grad_clip=0): self.parameters = list(parameters) self.grad_clip = grad_clip self.optimizer = optim.Adam(self.parameters, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) def step(self, loss): self.optimizer.zero_grad() loss.backward() if (self.grad_clip > 0): nn.utils.clip_grad_norm_(self.parameters, self.grad_clip) self.optimizer.step()
def create_reward_transform(transform_type): if (transform_type == 'tanh'): def transform(r): if torch.is_tensor(r): return torch.tanh(r) return math.tanh(r) elif (transform_type == 'clip'): def transform(r): if torch.is_tensor(r): return torch.clip(r, (- 1), 1) return np.clip(r, (- 1), 1) elif ((transform_type == 'none') or (transform_type is None)): def transform(r): return r else: raise ValueError(transform_type) return transform
def preprocess_atari_obs(obs, device=None): if isinstance(obs, gym.wrappers.LazyFrames): obs = np.array(obs) return (torch.as_tensor(obs, device=device).float() / 255.0)
def create_atari_env(game, noop_max=30, frame_skip=4, frame_stack=4, frame_size=84, episodic_lives=True, grayscale=True, time_limit=27000): env = AtariEnv(rom_name_to_id(game), frameskip=1, repeat_action_probability=0.0) env.spec = gym.spec((game + 'NoFrameskip-v4')) has_fire_action = (env.get_action_meanings()[1] == 'FIRE') env = gym.wrappers.AtariPreprocessing(env, noop_max=(0 if has_fire_action else noop_max), frame_skip=frame_skip, screen_size=frame_size, terminal_on_life_loss=False, grayscale_obs=grayscale) if has_fire_action: env = FireAfterLifeLoss(env) if (noop_max > 0): env = NoopStart(env, noop_max) if episodic_lives: env = EpisodicLives(env) env = gym.wrappers.FrameStack(env, frame_stack) env = gym.wrappers.TimeLimit(env, max_episode_steps=time_limit) return env
def create_vector_env(num_envs, env_fn): if (num_envs == 1): return gym.vector.SyncVectorEnv([env_fn]) else: return gym.vector.AsyncVectorEnv([env_fn for _ in range(num_envs)])
def compute_atari_hns(game, agent_score): random_score = atari_random_scores[game] human_score = atari_human_scores[game] return (((agent_score - random_score) / (human_score - random_score)) * 100.0)
class EpisodicLives(gym.Wrapper): def __init__(self, env): super().__init__(env) self.ale = env.unwrapped.ale self.lives = 0 self.was_real_done = True def reset(self, seed=None, options=None): if (self.was_real_done or ((options is not None) and options.get('force', False))): (obs, info) = self.env.reset(seed=seed, options=options) else: (obs, _, _, _, info) = self.env.step(0) self.lives = self.ale.lives() return (obs, info) def step(self, action): (obs, reward, terminated, truncated, info) = self.env.step(action) self.was_real_done = (terminated or truncated) lives = self.ale.lives() if ((lives < self.lives) and (lives > 0)): terminated = True self.lives = lives return (obs, reward, terminated, truncated, info)
class NoAutoReset(gym.Wrapper): def __init__(self, env): super().__init__(env) self.final_observation = None self.final_info = None def reset(self, seed=None, options=None): if ((self.final_observation is None) or ((options is not None) and options.get('force', False))): return self.env.reset(seed=seed, options=options) return (self.final_observation, self.final_info) def step(self, action): (obs, reward, terminated, truncated, info) = self.env.step(action) if (terminated or truncated): self.final_observation = obs self.final_info = info return (obs, reward, terminated, truncated, info)
class FireAfterLifeLoss(gym.Wrapper): def __init__(self, env): super().__init__(env) unwrapped = env.unwrapped action_meanings = unwrapped.get_action_meanings() assert (action_meanings[1] == 'FIRE') assert (len(action_meanings) >= 3) self.ale = unwrapped.ale self.lives = 0 def reset(self, seed=None, options=None): (obs, info) = self.env.reset(seed=seed, options=options) (obs, _, terminated, truncated, _) = self.env.step(1) if (terminated or truncated): (obs, info) = self.env.reset(seed=seed, options=options) (obs, _, terminated, truncated, _) = self.env.step(2) if (terminated or truncated): (obs, info) = self.env.reset(seed=seed, options=options) self.lives = self.ale.lives() return (obs, info) def step(self, action): (obs, reward, terminated, truncated, info) = self.env.step(action) lives = self.ale.lives() if (lives < self.lives): (obs, reward2, terminated2, truncated2, info2) = self.env.step(1) reward += reward2 terminated = (terminated or terminated2) truncated = (truncated or truncated2) info.update(info2) self.lives = lives return (obs, reward, terminated, truncated, info)
class NoopStart(gym.Wrapper): def __init__(self, env, noop_max): super().__init__(env) self.noop_max = noop_max def reset(self, seed=None, options=None): (obs, reset_info) = self.env.reset(seed=seed, options=options) noops = (self.env.unwrapped.np_random.integers(1, (self.noop_max + 1)) if (self.noop_max > 0) else 0) for _ in range(noops): (obs, _, terminated, truncated, step_info) = self.env.step(0) reset_info.update(step_info) if (terminated or truncated): (obs, reset_info) = self.env.reset(seed=seed, options=options) return (obs, reset_info)
@torch.no_grad() def make_grid(tensor, nrow, padding, pad_value=0): nmaps = tensor.size(0) xmaps = min(nrow, nmaps) ymaps = int(math.ceil((float(nmaps) / xmaps))) (height, width) = (int((tensor.size(2) + padding[0])), int((tensor.size(3) + padding[1]))) num_channels = tensor.size(1) grid = tensor.new_full((num_channels, ((height * ymaps) + padding[0]), ((width * xmaps) + padding[1])), pad_value) k = 0 for y in range(ymaps): for x in range(xmaps): if (k >= nmaps): break grid.narrow(1, ((y * height) + padding[0]), (height - padding[0])).narrow(2, ((x * width) + padding[1]), (width - padding[1])).copy_(tensor[k]) k = (k + 1) return grid
def to_image(tensor): from PIL import Image tensor = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8) if (tensor.shape[2] == 1): tensor = tensor.squeeze(2) return Image.fromarray(tensor.numpy()).convert('RGB')
class NetworkConfig(object): scale = 100 max_step = (1000 * scale) initial_learning_rate = 0.0001 learning_rate_decay_rate = 0.96 learning_rate_decay_step = (5 * scale) moving_average_decay = 0.9999 entropy_weight = 0.1 save_step = (10 * scale) max_to_keep = 1000 Conv2D_out = 128 Dense_out = 128 optimizer = 'RMSprop' logit_clipping = 10
class Config(NetworkConfig): version = 'TE_v2' project_name = 'CFR-RL' method = 'actor_critic' model_type = 'Conv' topology_file = 'Abilene' traffic_file = 'TM' test_traffic_file = 'TM2' tm_history = 1 max_moves = 10 baseline = 'avg'
def get_config(FLAGS): config = Config for (k, v) in FLAGS.__flags.items(): if hasattr(config, k): setattr(config, k, v.value) return config
class Topology(object): def __init__(self, config, data_dir='./data/'): self.topology_file = (data_dir + config.topology_file) self.shortest_paths_file = (self.topology_file + '_shortest_paths') self.DG = nx.DiGraph() self.load_topology() self.calculate_paths() def load_topology(self): print('[*] Loading topology...', self.topology_file) f = open(self.topology_file, 'r') header = f.readline() self.num_nodes = int(header[(header.find(':') + 2):header.find('\t')]) self.num_links = int(header[(header.find(':', 10) + 2):]) f.readline() self.link_idx_to_sd = {} self.link_sd_to_idx = {} self.link_capacities = np.empty(self.num_links) self.link_weights = np.empty(self.num_links) for line in f: link = line.split('\t') (i, s, d, w, c) = link self.link_idx_to_sd[int(i)] = (int(s), int(d)) self.link_sd_to_idx[(int(s), int(d))] = int(i) self.link_capacities[int(i)] = float(c) self.link_weights[int(i)] = int(w) self.DG.add_weighted_edges_from([(int(s), int(d), int(w))]) assert ((len(self.DG.nodes()) == self.num_nodes) and (len(self.DG.edges()) == self.num_links)) f.close() def calculate_paths(self): self.pair_idx_to_sd = [] self.pair_sd_to_idx = {} self.shortest_paths = [] if os.path.exists(self.shortest_paths_file): print('[*] Loading shortest paths...', self.shortest_paths_file) f = open(self.shortest_paths_file, 'r') self.num_pairs = 0 for line in f: sd = line[:line.find(':')] s = int(sd[:sd.find('-')]) d = int(sd[(sd.find('>') + 1):]) self.pair_idx_to_sd.append((s, d)) self.pair_sd_to_idx[(s, d)] = self.num_pairs self.num_pairs += 1 self.shortest_paths.append([]) paths = line[(line.find(':') + 1):].strip()[1:(- 1)] while (paths != ''): idx = paths.find(']') path = paths[1:idx] node_path = np.array(path.split(',')).astype(np.int16) assert (node_path.size == np.unique(node_path).size) self.shortest_paths[(- 1)].append(node_path) paths = paths[(idx + 3):] else: print('[!] Calculating shortest paths...') f = open(self.shortest_paths_file, 'w+') self.num_pairs = 0 for s in range(self.num_nodes): for d in range(self.num_nodes): if (s != d): self.pair_idx_to_sd.append((s, d)) self.pair_sd_to_idx[(s, d)] = self.num_pairs self.num_pairs += 1 self.shortest_paths.append(list(nx.all_shortest_paths(self.DG, s, d, weight='weight'))) line = ((((str(s) + '->') + str(d)) + ': ') + str(self.shortest_paths[(- 1)])) f.writelines((line + '\n')) assert (self.num_pairs == (self.num_nodes * (self.num_nodes - 1))) f.close() print(('pairs: %d, nodes: %d, links: %d\n' % (self.num_pairs, self.num_nodes, self.num_links)))
class Traffic(object): def __init__(self, config, num_nodes, data_dir='./data/', is_training=False): if is_training: self.traffic_file = ((data_dir + config.topology_file) + config.traffic_file) else: self.traffic_file = ((data_dir + config.topology_file) + config.test_traffic_file) self.num_nodes = num_nodes self.load_traffic(config) def load_traffic(self, config): assert os.path.exists(self.traffic_file) print('[*] Loading traffic matrices...', self.traffic_file) f = open(self.traffic_file, 'r') traffic_matrices = [] for line in f: volumes = line.strip().split(' ') total_volume_cnt = len(volumes) assert (total_volume_cnt == (self.num_nodes * self.num_nodes)) matrix = np.zeros((self.num_nodes, self.num_nodes)) for v in range(total_volume_cnt): i = int((v / self.num_nodes)) j = (v % self.num_nodes) if (i != j): matrix[i][j] = float(volumes[v]) traffic_matrices.append(matrix) f.close() self.traffic_matrices = np.array(traffic_matrices) tms_shape = self.traffic_matrices.shape self.tm_cnt = tms_shape[0] print(('Traffic matrices dims: [%d, %d, %d]\n' % (tms_shape[0], tms_shape[1], tms_shape[2])))
class Environment(object): def __init__(self, config, is_training=False): self.data_dir = './data/' self.topology = Topology(config, self.data_dir) self.traffic = Traffic(config, self.topology.num_nodes, self.data_dir, is_training=is_training) self.traffic_matrices = ((((self.traffic.traffic_matrices * 100) * 8) / 300) / 1000) self.tm_cnt = self.traffic.tm_cnt self.traffic_file = self.traffic.traffic_file self.num_pairs = self.topology.num_pairs self.pair_idx_to_sd = self.topology.pair_idx_to_sd self.pair_sd_to_idx = self.topology.pair_sd_to_idx self.num_nodes = self.topology.num_nodes self.num_links = self.topology.num_links self.link_idx_to_sd = self.topology.link_idx_to_sd self.link_sd_to_idx = self.topology.link_sd_to_idx self.link_capacities = self.topology.link_capacities self.link_weights = self.topology.link_weights self.shortest_paths_node = self.topology.shortest_paths self.shortest_paths_link = self.convert_to_edge_path(self.shortest_paths_node) def convert_to_edge_path(self, node_paths): edge_paths = [] num_pairs = len(node_paths) for i in range(num_pairs): edge_paths.append([]) num_paths = len(node_paths[i]) for j in range(num_paths): edge_paths[i].append([]) path_len = len(node_paths[i][j]) for n in range((path_len - 1)): e = self.link_sd_to_idx[(node_paths[i][j][n], node_paths[i][j][(n + 1)])] assert ((e >= 0) and (e < self.num_links)) edge_paths[i][j].append(e) return edge_paths
def sim(config, network, game): for tm_idx in game.tm_indexes: state = game.get_state(tm_idx) if (config.method == 'actor_critic'): policy = network.actor_predict(np.expand_dims(state, 0)).numpy()[0] elif (config.method == 'pure_policy'): policy = network.policy_predict(np.expand_dims(state, 0)).numpy()[0] actions = policy.argsort()[(- game.max_moves):] game.evaluate(tm_idx, actions, eval_delay=FLAGS.eval_delay)
def main(_): tf.config.experimental.set_visible_devices([], 'GPU') tf.get_logger().setLevel('INFO') config = (get_config(FLAGS) or FLAGS) env = Environment(config, is_training=False) game = CFRRL_Game(config, env) network = Network(config, game.state_dims, game.action_dim, game.max_moves) step = network.restore_ckpt(FLAGS.ckpt) if (config.method == 'actor_critic'): learning_rate = network.lr_schedule(network.actor_optimizer.iterations.numpy()).numpy() elif (config.method == 'pure_policy'): learning_rate = network.lr_schedule(network.optimizer.iterations.numpy()).numpy() print(('\nstep %d, learning rate: %f\n' % (step, learning_rate))) sim(config, network, game)
def central_agent(config, game, model_weights_queues, experience_queues): network = Network(config, game.state_dims, game.action_dim, game.max_moves, master=True) network.save_hyperparams(config) start_step = network.restore_ckpt() for step in tqdm(range(start_step, config.max_step), ncols=70, initial=start_step): network.ckpt.step.assign_add(1) model_weights = network.model.get_weights() for i in range(FLAGS.num_agents): model_weights_queues[i].put(model_weights) if (config.method == 'actor_critic'): s_batch = [] a_batch = [] r_batch = [] for i in range(FLAGS.num_agents): (s_batch_agent, a_batch_agent, r_batch_agent) = experience_queues[i].get() assert (len(s_batch_agent) == FLAGS.num_iter), (len(s_batch_agent), len(a_batch_agent), len(r_batch_agent)) s_batch += s_batch_agent a_batch += a_batch_agent r_batch += r_batch_agent assert ((len(s_batch) * game.max_moves) == len(a_batch)) actions = np.eye(game.action_dim, dtype=np.float32)[np.array(a_batch)] (value_loss, entropy, actor_gradients, critic_gradients) = network.actor_critic_train(np.array(s_batch), actions, np.array(r_batch).astype(np.float32), config.entropy_weight) if GRADIENTS_CHECK: for g in range(len(actor_gradients)): assert (np.any(np.isnan(actor_gradients[g])) == False), ('actor_gradients', s_batch, a_batch, r_batch, entropy) for g in range(len(critic_gradients)): assert (np.any(np.isnan(critic_gradients[g])) == False), ('critic_gradients', s_batch, a_batch, r_batch) if ((step % config.save_step) == (config.save_step - 1)): network.save_ckpt(_print=True) actor_learning_rate = network.lr_schedule(network.actor_optimizer.iterations.numpy()).numpy() avg_value_loss = np.mean(value_loss) avg_reward = np.mean(r_batch) avg_entropy = np.mean(entropy) network.inject_summaries({'learning rate': actor_learning_rate, 'value loss': avg_value_loss, 'avg reward': avg_reward, 'avg entropy': avg_entropy}, step) print(('lr:%f, value loss:%f, avg reward:%f, avg entropy:%f' % (actor_learning_rate, avg_value_loss, avg_reward, avg_entropy))) elif (config.method == 'pure_policy'): s_batch = [] a_batch = [] r_batch = [] ad_batch = [] for i in range(FLAGS.num_agents): (s_batch_agent, a_batch_agent, r_batch_agent, ad_batch_agent) = experience_queues[i].get() assert (len(s_batch_agent) == FLAGS.num_iter), (len(s_batch_agent), len(a_batch_agent), len(r_batch_agent), len(ad_batch_agent)) s_batch += s_batch_agent a_batch += a_batch_agent r_batch += r_batch_agent ad_batch += ad_batch_agent assert ((len(s_batch) * game.max_moves) == len(a_batch)) actions = np.eye(game.action_dim, dtype=np.float32)[np.array(a_batch)] (entropy, gradients) = network.policy_train(np.array(s_batch), actions, np.vstack(ad_batch).astype(np.float32), config.entropy_weight) if GRADIENTS_CHECK: for g in range(len(gradients)): assert (np.any(np.isnan(gradients[g])) == False), (s_batch, a_batch, r_batch) if ((step % config.save_step) == (config.save_step - 1)): network.save_ckpt(_print=True) learning_rate = network.lr_schedule(network.optimizer.iterations.numpy()).numpy() avg_reward = np.mean(r_batch) avg_advantage = np.mean(ad_batch) avg_entropy = np.mean(entropy) network.inject_summaries({'learning rate': learning_rate, 'avg reward': avg_reward, 'avg advantage': avg_advantage, 'avg entropy': avg_entropy}, step) print(('lr:%f, avg reward:%f, avg advantage:%f, avg entropy:%f' % (learning_rate, avg_reward, avg_advantage, avg_entropy)))
def agent(agent_id, config, game, tm_subset, model_weights_queue, experience_queue): random_state = np.random.RandomState(seed=agent_id) network = Network(config, game.state_dims, game.action_dim, game.max_moves, master=False) model_weights = model_weights_queue.get() network.model.set_weights(model_weights) idx = 0 s_batch = [] a_batch = [] r_batch = [] if (config.method == 'pure_policy'): ad_batch = [] run_iteration_idx = 0 num_tms = len(tm_subset) random_state.shuffle(tm_subset) run_iterations = FLAGS.num_iter while True: tm_idx = tm_subset[idx] state = game.get_state(tm_idx) s_batch.append(state) if (config.method == 'actor_critic'): policy = network.actor_predict(np.expand_dims(state, 0)).numpy()[0] elif (config.method == 'pure_policy'): policy = network.policy_predict(np.expand_dims(state, 0)).numpy()[0] assert (np.count_nonzero(policy) >= game.max_moves), (policy, state) actions = random_state.choice(game.action_dim, game.max_moves, p=policy, replace=False) for a in actions: a_batch.append(a) reward = game.reward(tm_idx, actions) r_batch.append(reward) if (config.method == 'pure_policy'): if (config.baseline == 'avg'): ad_batch.append(game.advantage(tm_idx, reward)) game.update_baseline(tm_idx, reward) elif (config.baseline == 'best'): best_actions = policy.argsort()[(- game.max_moves):] best_reward = game.reward(tm_idx, best_actions) ad_batch.append((reward - best_reward)) run_iteration_idx += 1 if (run_iteration_idx >= run_iterations): if (config.method == 'actor_critic'): experience_queue.put([s_batch, a_batch, r_batch]) elif (config.method == 'pure_policy'): experience_queue.put([s_batch, a_batch, r_batch, ad_batch]) model_weights = model_weights_queue.get() network.model.set_weights(model_weights) del s_batch[:] del a_batch[:] del r_batch[:] if (config.method == 'pure_policy'): del ad_batch[:] run_iteration_idx = 0 idx += 1 if (idx == num_tms): random_state.shuffle(tm_subset) idx = 0
def main(_): tf.config.experimental.set_visible_devices([], 'GPU') tf.get_logger().setLevel('INFO') config = (get_config(FLAGS) or FLAGS) env = Environment(config, is_training=True) game = CFRRL_Game(config, env) model_weights_queues = [] experience_queues = [] if ((FLAGS.num_agents == 0) or (FLAGS.num_agents >= mp.cpu_count())): FLAGS.num_agents = (mp.cpu_count() - 1) print(('Agent num: %d, iter num: %d\n' % ((FLAGS.num_agents + 1), FLAGS.num_iter))) for _ in range(FLAGS.num_agents): model_weights_queues.append(mp.Queue(1)) experience_queues.append(mp.Queue(1)) tm_subsets = np.array_split(game.tm_indexes, FLAGS.num_agents) coordinator = mp.Process(target=central_agent, args=(config, game, model_weights_queues, experience_queues)) coordinator.start() agents = [] for i in range(FLAGS.num_agents): agents.append(mp.Process(target=agent, args=(i, config, game, tm_subsets[i], model_weights_queues[i], experience_queues[i]))) for i in range(FLAGS.num_agents): agents[i].start() coordinator.join()
class SiamRPN(nn.Module): def __init__(self, size=2, feature_out=512, anchor=5): configs = [3, 96, 256, 384, 384, 256] configs = list(map((lambda x: (3 if (x == 3) else (x * size))), configs)) feat_in = configs[(- 1)] super(SiamRPN, self).__init__() self.featureExtract = nn.Sequential(nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2), nn.BatchNorm2d(configs[1]), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.Conv2d(configs[1], configs[2], kernel_size=5), nn.BatchNorm2d(configs[2]), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.Conv2d(configs[2], configs[3], kernel_size=3), nn.BatchNorm2d(configs[3]), nn.ReLU(inplace=True), nn.Conv2d(configs[3], configs[4], kernel_size=3), nn.BatchNorm2d(configs[4]), nn.ReLU(inplace=True), nn.Conv2d(configs[4], configs[5], kernel_size=3), nn.BatchNorm2d(configs[5])) self.anchor = anchor self.feature_out = feature_out self.conv_r1 = nn.Conv2d(feat_in, ((feature_out * 4) * anchor), 3) self.conv_r2 = nn.Conv2d(feat_in, feature_out, 3) self.conv_cls1 = nn.Conv2d(feat_in, ((feature_out * 2) * anchor), 3) self.conv_cls2 = nn.Conv2d(feat_in, feature_out, 3) self.regress_adjust = nn.Conv2d((4 * anchor), (4 * anchor), 1) self.r1_kernel = [] self.cls1_kernel = [] self.cfg = {} def forward(self, x): x_f = self.featureExtract(x) temp = F.conv2d(self.conv_r2(x_f), self.r1_kernel) return (self.regress_adjust(temp), F.conv2d(self.conv_cls2(x_f), self.cls1_kernel)) def temple(self, z): z_f = self.featureExtract(z) r1_kernel_raw = self.conv_r1(z_f) cls1_kernel_raw = self.conv_cls1(z_f) kernel_size = r1_kernel_raw.data.size()[(- 1)] self.r1_kernel = r1_kernel_raw.view((self.anchor * 4), self.feature_out, kernel_size, kernel_size) self.cls1_kernel = cls1_kernel_raw.view((self.anchor * 2), self.feature_out, kernel_size, kernel_size)
class SiamRPNBIG(SiamRPN): def __init__(self): super(SiamRPNBIG, self).__init__(size=2) self.cfg = {'lr': 0.295, 'window_influence': 0.42, 'penalty_k': 0.055, 'instance_size': 271, 'adaptive': True}
class SiamRPNvot(SiamRPN): def __init__(self): super(SiamRPNvot, self).__init__(size=1, feature_out=256) self.cfg = {'lr': 0.45, 'window_influence': 0.44, 'penalty_k': 0.04, 'instance_size': 271, 'adaptive': False}
class SiamRPNotb(SiamRPN): def __init__(self): super(SiamRPNotb, self).__init__(size=1, feature_out=256) self.cfg = {'lr': 0.3, 'window_influence': 0.4, 'penalty_k': 0.22, 'instance_size': 271, 'adaptive': False}
def track_video(model, video): (toc, regions) = (0, []) (image_files, gt) = (video['image_files'], video['gt']) for (f, image_file) in enumerate(image_files): im = cv2.imread(image_file) tic = cv2.getTickCount() if (f == 0): (target_pos, target_sz) = rect_2_cxy_wh(gt[f]) state = SiamRPN_init(im, target_pos, target_sz, model) location = cxy_wh_2_rect(state['target_pos'], state['target_sz']) regions.append(gt[f]) elif (f > 0): state = SiamRPN_track(state, im) location = cxy_wh_2_rect((state['target_pos'] + 1), state['target_sz']) regions.append(location) toc += (cv2.getTickCount() - tic) if (args.visualization and (f >= 0)): if (f == 0): cv2.destroyAllWindows() if (len(gt[f]) == 8): cv2.polylines(im, [np.array(gt[f], np.int).reshape(((- 1), 1, 2))], True, (0, 255, 0), 3) else: cv2.rectangle(im, (gt[(f, 0)], gt[(f, 1)]), ((gt[(f, 0)] + gt[(f, 2)]), (gt[(f, 1)] + gt[(f, 3)])), (0, 255, 0), 3) if (len(location) == 8): cv2.polylines(im, [location.reshape(((- 1), 1, 2))], True, (0, 255, 255), 3) else: location = [int(l) for l in location] cv2.rectangle(im, (location[0], location[1]), ((location[0] + location[2]), (location[1] + location[3])), (0, 255, 255), 3) cv2.putText(im, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video['name'], im) cv2.waitKey(1) toc /= cv2.getTickFrequency() video_path = join('test', args.dataset, 'SiamRPN_AlexNet_OTB2015') if (not isdir(video_path)): makedirs(video_path) result_path = join(video_path, '{:s}.txt'.format(video['name'])) with open(result_path, 'w') as fin: for x in regions: fin.write((','.join([str(i) for i in x]) + '\n')) print('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(v_id, video['name'], toc, (f / toc))) return (f / toc)
def load_dataset(dataset): base_path = join(realpath(dirname(__file__)), 'data', dataset) if (not exists(base_path)): print('Please download OTB dataset into `data` folder!') exit() json_path = join(realpath(dirname(__file__)), 'data', (dataset + '.json')) info = json.load(open(json_path, 'r')) for v in info.keys(): path_name = info[v]['name'] info[v]['image_files'] = [join(base_path, path_name, 'img', im_f) for im_f in info[v]['image_files']] info[v]['gt'] = (np.array(info[v]['gt_rect']) - [1, 1, 0, 0]) info[v]['name'] = v return info
def main(): global args, v_id args = parser.parse_args() net = SiamRPNotb() net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model'))) net.eval().cuda() dataset = load_dataset(args.dataset) fps_list = [] for (v_id, video) in enumerate(dataset.keys()): fps_list.append(track_video(net, dataset[video])) print('Mean Running Speed {:.1f}fps'.format(np.mean(np.array(fps_list))))
def track_video(model, video): image_save = 0 (toc, regions) = (0, []) (image_files, gt) = (video['image_files'], video['gt']) for (f, image_file) in enumerate(image_files): im = cv2.imread(image_file) tic = cv2.getTickCount() if (f == 0): (target_pos, target_sz) = rect_2_cxy_wh(gt[f]) state = SiamRPN_init(im, target_pos, target_sz, model) location = cxy_wh_2_rect(state['target_pos'], state['target_sz']) regions.append(gt[f]) att_per = 0 def_per = 0 elif (f > 0): if ((f % 30) == 1): att_per = 0 def_per = 0 (state, att_per, def_per) = SiamRPN_track(state, im, f, regions[(f - 1)], att_per, def_per, image_save, iter=10) location = cxy_wh_2_rect((state['target_pos'] + 1), state['target_sz']) regions.append(location) else: (state, att_per, def_per) = SiamRPN_track(state, im, f, regions[(f - 1)], att_per, def_per, image_save, iter=5) location = cxy_wh_2_rect((state['target_pos'] + 1), state['target_sz']) regions.append(location) toc += (cv2.getTickCount() - tic) if (args.visualization and (f >= 0)): if (f == 0): cv2.destroyAllWindows() if (len(gt[f]) == 8): cv2.polylines(im, [np.array(gt[f], np.int).reshape(((- 1), 1, 2))], True, (0, 255, 0), 2) else: cv2.rectangle(im, (gt[(f, 0)], gt[(f, 1)]), ((gt[(f, 0)] + gt[(f, 2)]), (gt[(f, 1)] + gt[(f, 3)])), (0, 255, 0), 2) if (len(location) == 8): cv2.polylines(im, [location.reshape(((- 1), 1, 2))], True, (0, 255, 255), 2) else: location = [int(l) for l in location] cv2.rectangle(im, (location[0], location[1]), ((location[0] + location[2]), (location[1] + location[3])), (0, 255, 255), 2) cv2.putText(im, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video['name'], im) cv2.waitKey(1) toc /= cv2.getTickFrequency() video_path = join('test', args.dataset, 'DaSiamRPN_attack') if (not isdir(video_path)): makedirs(video_path) result_path = join(video_path, '{:s}.txt'.format(video['name'])) with open(result_path, 'w') as fin: for x in regions: fin.write((','.join([str(i) for i in x]) + '\n')) print('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(v_id, video['name'], toc, (f / toc))) return (f / toc)
def load_dataset(dataset): base_path = join(realpath(dirname(__file__)), 'data', dataset) if (not exists(base_path)): print('Please download OTB dataset into `data` folder!') exit() json_path = join(realpath(dirname(__file__)), 'data', (dataset + '.json')) info = json.load(open(json_path, 'r')) for v in info.keys(): path_name = info[v]['name'] info[v]['image_files'] = [join(base_path, path_name, 'img', im_f) for im_f in info[v]['image_files']] info[v]['gt'] = (np.array(info[v]['gt_rect']) - [1, 1, 0, 0]) info[v]['name'] = v return info
def main(): global args, v_id args = parser.parse_args() net = SiamRPNotb() net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model'))) net.eval().cuda() dataset = load_dataset(args.dataset) fps_list = [] for (v_id, video) in enumerate(dataset.keys()): if (v_id > (- 1)): fps_list.append(track_video(net, dataset[video])) print('Mean Running Speed {:.1f}fps'.format(np.mean(np.array(fps_list))))
def track_video(model, video): image_save = 0 (toc, regions) = (0, []) (image_files, gt) = (video['image_files'], video['gt']) for (f, image_file) in enumerate(image_files): im = cv2.imread(image_file) tic = cv2.getTickCount() if (f == 0): (target_pos, target_sz) = rect_2_cxy_wh(gt[f]) state = SiamRPN_init(im, target_pos, target_sz, model) location = cxy_wh_2_rect(state['target_pos'], state['target_sz']) regions.append(gt[f]) att_per = 0 def_per = 0 elif (f > 0): if ((f % 30) == 1): att_per = 0 def_per = 0 (state, att_per, def_per) = SiamRPN_track(state, im, f, regions[(f - 1)], att_per, def_per, image_save, iter=10) location = cxy_wh_2_rect((state['target_pos'] + 1), state['target_sz']) regions.append(location) else: (state, att_per, def_per) = SiamRPN_track(state, im, f, regions[(f - 1)], att_per, def_per, image_save, iter=5) location = cxy_wh_2_rect((state['target_pos'] + 1), state['target_sz']) regions.append(location) toc += (cv2.getTickCount() - tic) if (args.visualization and (f >= 0)): if (f == 0): cv2.destroyAllWindows() if (len(gt[f]) == 8): cv2.polylines(im, [np.array(gt[f], np.int).reshape(((- 1), 1, 2))], True, (0, 255, 0), 2) else: cv2.rectangle(im, (gt[(f, 0)], gt[(f, 1)]), ((gt[(f, 0)] + gt[(f, 2)]), (gt[(f, 1)] + gt[(f, 3)])), (0, 255, 0), 2) if (len(location) == 8): cv2.polylines(im, [location.reshape(((- 1), 1, 2))], True, (0, 255, 255), 2) else: location = [int(l) for l in location] cv2.rectangle(im, (location[0], location[1]), ((location[0] + location[2]), (location[1] + location[3])), (0, 255, 255), 2) cv2.putText(im, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video['name'], im) cv2.waitKey(1) toc /= cv2.getTickFrequency() video_path = join('test', args.dataset, 'DaSiamRPN_defense') if (not isdir(video_path)): makedirs(video_path) result_path = join(video_path, '{:s}.txt'.format(video['name'])) with open(result_path, 'w') as fin: for x in regions: fin.write((','.join([str(i) for i in x]) + '\n')) print('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(v_id, video['name'], toc, (f / toc))) return (f / toc)
def load_dataset(dataset): base_path = join(realpath(dirname(__file__)), 'data', dataset) if (not exists(base_path)): print('Please download OTB dataset into `data` folder!') exit() json_path = join(realpath(dirname(__file__)), 'data', (dataset + '.json')) info = json.load(open(json_path, 'r')) for v in info.keys(): path_name = info[v]['name'] info[v]['image_files'] = [join(base_path, path_name, 'img', im_f) for im_f in info[v]['image_files']] info[v]['gt'] = (np.array(info[v]['gt_rect']) - [1, 1, 0, 0]) info[v]['name'] = v return info
def main(): global args, v_id args = parser.parse_args() net = SiamRPNotb() net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model'))) net.eval().cuda() dataset = load_dataset(args.dataset) fps_list = [] for (v_id, video) in enumerate(dataset.keys()): fps_list.append(track_video(net, dataset[video])) print('Mean Running Speed {:.1f}fps'.format(np.mean(np.array(fps_list))))
class MyDistributedDataParallel(LightningDistributedDataParallel): def scatter(self, inputs, kwargs, device_ids): kwargs['batch_idx'] = inputs[1] kwargs = (kwargs,) inputs = ((inputs[0].to(torch.device('cuda:{}'.format(device_ids[0]))),),) return (inputs, kwargs)
class MyDDP(DDPPlugin): def configure_ddp(self): self.model = MyDistributedDataParallel(self.model, device_ids=self.determine_ddp_device_ids(), find_unused_parameters=True)
class GeometricGraphDataset(Dataset): def __init__(self, n_min=12, n_max=20, samples_per_epoch=100000, **kwargs): super().__init__() self.n_min = n_min self.n_max = n_max self.samples_per_epoch = samples_per_epoch def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): n = np.random.randint(low=self.n_min, high=self.n_max) g = random_geometric_graph(n=n, radius=0.5) return g
class RegularGraphDataset(Dataset): def __init__(self, n_min=12, n_max=20, samples_per_epoch=100000, **kwargs): super().__init__() self.n_min = n_min self.n_max = n_max self.samples_per_epoch = samples_per_epoch def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): n = np.random.randint(low=self.n_min, high=self.n_max) g = random_regular_graph(n=n, d=4) return g
class BarabasiAlbertGraphDataset(Dataset): def __init__(self, n_min=12, n_max=20, m_min=1, m_max=5, samples_per_epoch=100000, **kwargs): super().__init__() self.n_min = n_min self.n_max = n_max self.m_min = m_min self.m_max = m_max self.samples_per_epoch = samples_per_epoch def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): if (self.n_min == self.n_max): n = self.m_min else: n = np.random.randint(low=self.n_min, high=self.n_max) if (self.m_min == self.m_max): m = self.m_min else: m = np.random.randint(low=self.m_min, high=self.m_max) g = barabasi_albert_graph(n, m) return g
class BinomialGraphDataset(Dataset): def __init__(self, n_min=12, n_max=20, p_min=0.4, p_max=0.6, samples_per_epoch=100000, pyg=False, **kwargs): super().__init__() self.n_min = n_min self.n_max = n_max self.p_min = p_min self.p_max = p_max self.samples_per_epoch = samples_per_epoch self.pyg = pyg def __len__(self): return self.samples_per_epoch def get_largest_subgraph(self, g): g = g.subgraph(sorted(nx.connected_components(g), key=len, reverse=True)[0]) g = nx.convert_node_labels_to_integers(g, first_label=0) return g def __getitem__(self, idx): n = np.random.randint(low=self.n_min, high=self.n_max) if (self.p_min == self.p_max): p = self.p_min else: p = np.random.randint(low=self.p_min, high=self.p_max) p = np.random.uniform(low=self.p_min, high=self.p_max) g = binomial_graph(n, p) if self.pyg: g = from_networkx(g) return g
class RandomGraphDataset(Dataset): def __init__(self, n_min=12, n_max=20, samples_per_epoch=100000, **kwargs): super().__init__() self.n_min = n_min self.n_max = n_max self.samples_per_epoch = samples_per_epoch self.graph_generator = GraphGenerator() def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): n = np.random.randint(low=self.n_min, high=self.n_max) g = self.graph_generator(n) return g