code
stringlengths
17
6.64M
class AttnFeatTransAPP(nn.Module): '\n Feature Transforming Layer for K-hop caten 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n This will encode the k-hop into the channel of the nodex\n Args:\n in_channels (i...
class AttnFeatTransAPPNP(nn.Module): '\n Feature Transforming Layer for K-hop caten 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n This will encode the k-hop into the channel of the nodex\n Args:\n in_channels ...
class LGL(nn.Module): def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0, 0]): super(LGL, self).__init__() c = [1, 4, hidden[1]] f = [feat_len, int((hidden[0] / c[1])), 1] self.feat1 = FeatTrans1d(in_channels=c[0], in_features=f[0], out_channels=c[1], out_features...
class AFGN(nn.Module): def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0, 0]): super(AFGN, self).__init__() c = [1, 4, hidden[1]] f = [feat_len, int((hidden[0] / c[1])), 1] self.feat1 = AttnFeatTrans1dSoft(in_channels=c[0], in_features=f[0], out_channels=c[1], ou...
class KCAT(nn.Module): "\n TODO the locals or the __dict__ cause some issue for net.to(device), the weight of feat didn't load to cuda\n Concate the k level in the last classifier layer\n " def __init__(self, feat_len, num_class, k=1, device='cuda:0'): super(KCAT, self).__init__() se...
class KLGL(nn.Module): def __init__(self, feat_len, num_class, k=1): super(KLGL, self).__init__() c = [1, 4, 32] f = [feat_len, 16, 1] self.feat1 = FeatTransKhop(in_channels=c[0], in_features=f[0], out_channels=c[1], out_features=f[1], khop=k) self.acvt1 = nn.Sequential(nn...
class LifelongRehearsal(nn.Module): def __init__(self, args, BackBone, feat_len, num_class, k=None, hidden=[64, 32], drop=[0, 0]): super(LifelongRehearsal, self).__init__() self.args = args if (not k): self.backbone = BackBone(feat_len, num_class, hidden=hidden, dropout=drop) ...
class Net(nn.Module): def __init__(self, args, feat_len, num_class, k=2, hidden=2): super(Net, self).__init__() self.args = args self.feat1 = FeatBrd1d(in_channels=1, out_channels=hidden) self.acvt1 = nn.Sequential(nn.BatchNorm1d(hidden), nn.Softsign()) self.feat2 = FeatBr...
class PlainNet(nn.Module): '\n Net without memory\n ' def __init__(self, feat_len, num_class, hidden=[10, 10], dropout=[0, 0]): super(PlainNet, self).__init__() self.feat1 = FeatBrd1d(in_channels=1, out_channels=hidden[0]) self.acvt1 = nn.Sequential(nn.BatchNorm1d(hidden[0]), nn...
class AttnPlainNet(nn.Module): '\n With attention\n ' def __init__(self, feat_len, num_class, hidden=[10, 10], dropout=[0, 0]): super(AttnPlainNet, self).__init__() self.feat1 = FeatBrd1d(in_channels=1, out_channels=hidden[0]) self.acvt1 = nn.Sequential(nn.BatchNorm1d(hidden[0])...
class SAGE(nn.Module): '\n GraphSAGE: Inductive Representation Learning on Large Graphs, NIPS 2017\n https://arxiv.org/pdf/1706.02216.pdf\n ' def __init__(self, feat_len, num_class, hidden=[128, 128], dropout=[0, 0], aggr='gcn', k=1): super().__init__() aggrs = {'pool': PoolAggregato...
class GCNAggregator(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.tran = nn.Linear(in_features, out_features, False) def forward(self, x, neighbor): f = torch.cat([n.mean(dim=0, keepdim=True) for n in neighbor]) x = self.tran((x + f)) ...
class MeanAggregator(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.tranx = nn.Linear(in_features, out_features, False) self.trann = nn.Linear(in_features, out_features, False) def forward(self, x, neighbor): f = torch.cat([n.mean(dim=0, ke...
class PoolAggregator(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.tran = nn.Linear(in_features, out_features, True) def forward(self, x, neighbor): f = [self.tran(torch.cat([x[i:(i + 1)], n])) for (i, n) in enumerate(neighbor)] x = torch....
class LifelongSAGE(SAGE): def __init__(self, args, feat_len, num_class, k=1): super().__init__(feat_len, num_class) self.args = args self.register_buffer('adj', torch.zeros(1, feat_len, feat_len)) self.register_buffer('inputs', torch.Tensor(0, 1, feat_len)) self.register_b...
class EarlyStopScheduler(torch.optim.lr_scheduler.ReduceLROnPlateau): def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08): super().__init__(optimizer, mode=mode, factor=factor, patience=patience, thresh...
def count_parameters(model): return sum((p.numel() for p in model.parameters() if p.requires_grad))
class Timer(): def __init__(self): torch.cuda.synchronize() self.start_time = time.time() def tic(self): self.start() def show(self, prefix='', output=True): torch.cuda.synchronize() duration = (time.time() - self.start_time) if output: print(...
def performance(loader, net, device, k): net.eval() (correct, total) = (0, 0) with torch.no_grad(): for (batch_idx, (inputs, targets, neighbor)) in enumerate(tqdm.tqdm(loader)): if torch.cuda.is_available(): (inputs, targets) = (inputs.to(device), targets.to(device)) ...
def accuracy(net, loader, device, num_class): net.eval() (correct, total) = (0, 0) classes = torch.arange(num_class).view((- 1), 1).to(device) with torch.no_grad(): for (idx, (inputs, targets, neighbor)) in enumerate(loader): if torch.cuda.is_available(): (inputs, t...
def train(loader, net, criterion, optimizer, device): net.train() (train_loss, correct, total) = (0, 0, 0) for (batch_idx, (inputs, targets, neighbor)) in enumerate(tqdm.tqdm(loader)): (inputs, targets) = (inputs.to(device), targets.to(device)) if (not args.k): neighbor = [elem...
def load_one_file(x, y): return pd.read_csv(x).loc[(lambda k: k.session_id.isin(y))].copy()
def evaluate(submission, groundtruth): ap_sum = 0.0 first_pred_acc_sum = 0.0 counter = 0 for (sub, tru) in zip(submission, groundtruth): if (len(sub) != len(tru)): raise Exception('Line {} should contain {} predictions, but instead contains {}'.format((counter + 1), len(tru), len(s...
def ave_pre(submission, groundtruth, counter): s = 0.0 t = 0.0 c = 1.0 for (x, y) in zip(submission, groundtruth): if ((x != 0) and (x != 1)): raise Exception('Invalid prediction in line {}, should be 0 or 1'.format(counter)) if (x == y): s += 1.0 t ...
def read_file_list(filename): '\n Reads a trajectory from a text file. \n \n File format:\n The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)\n and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. \n \n...
def associate(first_list, second_list, offset, max_difference): '\n Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim \n to find the closest match for every input tuple.\n \n Input:\n first_list -- first dictionary of (stamp,data) tuples\n second_list -- ...
def imucallback(msg): global q, t q = array([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w]) t = msg.header.stamp
def toNED(msg): global vx_p, vy_p vx = ((1 * msg.twist.linear.x) + (0 * vx_p)) vy = ((1 * msg.twist.linear.y) + (0 * vy_p)) v_body = array([vx, (- vy), 0]) global q [qx, qy, qz, qw] = [q[0], q[1], q[2], q[3]] Tenu = array([[((1 - ((2 * qy) * qy)) - ((2 * qz) * qz)), (((2 * qx) * qy) - ((2 ...
def velocity_estimation(twist, vicon): global time, data, n, window_width, plt, t t = twist.header.stamp.to_sec() dvx = ((twist.twist.linear.x * vicon.pose.position.z) - vicon.vel.x) dvy = ((twist.twist.linear.y * vicon.pose.position.z) - vicon.vel.y) dvz = ((twist.twist.linear.z * vicon.pose.posi...
def callback(twist, pose): h = pose.pose.pose.position.z vx = (twist.twist.linear.x * h) vy = (twist.twist.linear.y * h) vz = (twist.twist.linear.z * h) yaw_rate = twist.twist.angular.z outtxt.write(str.format('{0:.9f} ', twist.header.stamp.to_sec())) outtxt.write(str.format('{0:.9f} ', vx...
def encoder(model): models = {'vgg': VGG, 'resnet': ResNet, 'mobilenet': MobileNetV2} Model = models[model] return Model()
class AutoEncoder(nn.Module): def __init__(self, model='vgg'): super().__init__() self.encoder = encoder(model) self.decoder = Decoder() def forward(self, x): coding = self.encoder(x) output = self.decoder(coding) return output
class VGG(models.VGG): def __init__(self, pretrained=True, requires_grad=True, remove_fc=True, show_params=False): super().__init__(models.vgg16().features) if pretrained: self.load_state_dict(models.vgg16(pretrained=True).state_dict()) if (not requires_grad): for ...
class ResNet(models.ResNet): def __init__(self, pretrained=True, requires_grad=True, remove_fc=True, show_params=False): super().__init__(block=models.resnet.BasicBlock, layers=[2, 2, 2, 2]) if pretrained: self.load_state_dict(models.resnet18(pretrained=True).state_dict()) if ...
class MobileNetV2(models.MobileNetV2): def __init__(self, pretrained=True, requires_grad=True, remove_fc=True, show_params=False): super().__init__() if pretrained: self.load_state_dict(models.mobilenet_v2(pretrained=True).state_dict()) if (not requires_grad): for ...
class Decoder(nn.Module): def __init__(self, in_channels=512): super().__init__() self.relu = nn.ReLU(inplace=True) self.deconv1 = nn.ConvTranspose2d(in_channels, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn1 = nn.BatchNorm2d(512) self.dec...
class VideoData(Dataset): def __init__(self, root, file, transform=None): super().__init__() self.transform = transform self.cap = cv2.VideoCapture(os.path.join(root, file)) self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.cap.get(cv2.CAP_PRO...
class ImageData(Dataset): def __init__(self, root, train=True, ratio=0.8, transform=None): super().__init__() self.transform = transform self.filename = [] types = ('*.jpg', '*.jpeg', '*.png', '*.ppm', '*.bmp', '*.pgm', '*.tif', '*.tiff', '*.webp') for files in types: ...
class Dronefilm(Dataset): def __init__(self, root, data='car', test_id=0, train=True, transform=None): super().__init__() (self.transform, self.train) = (transform, train) if (train is True): self.filenames = sorted(glob.glob(os.path.join(root, 'dronefilm', data, 'train/*.png'...
class DroneFilming(Dataset): '\n The Drone Filming data recorded by The Air Lab, CMU\n args:\n root: dataset location (without DroneFilming)\n train: bool value\n test_data: test_data id [0-5], ignored if train=True\n ' data = ['test0', 'test1', 'test2', 'test3', 'test4', 'test5'] def _...
class SubT(Dataset): '\n The DARPA Subterranean (SubT) Challenge data recorded by Team Exploer\n ' def __init__(self, root, data='tunnel-0', test='2019-08-17/ugv_1/front.mkv', train=True, transform=None): super().__init__() (self.transform, self.train) = (transform, train) if (t...
class SubTF(Dataset): '\n The DARPA Subterranean (SubT) Challenge Front camera data recorded by Team Exploer\n args:\n root: dataset location (without subt-front)\n train: bool value\n test_data: test_data id [0-6], ignored if train=True\n ' data = ['0817-ugv0-tunnel0', '0817-ugv1-tunnel0', ...
class PersonalVideo(Dataset): '\n The Personal Video Dataset\n ' data = ['00006_divx', '00007_divx', '00016_sea_divx', '00018_sea_divx', '00018_sea_divx24000', '00019_divx', '00043_t_divx', 'selfwalk_divx', 'snowresort_divx'] def __init__(self, root, train=True, test_data=0, transform=None): ...
def save_batch(batch, folder, batch_idx): torchvision.utils.save_image(batch, ((folder + ('%06d' % batch_idx)) + '.png'))
class Interestingness(nn.Module): def __init__(self, autoencoder, N, C, H, W, h, w): super().__init__() self.ae = autoencoder self.memory = Memory(N, C, h, w) self.split2d = Split2d(kernel_size=(h, w)) self.merge2d = Merge2d(output_size=(H, W), kernel_size=(h, w)) ...
def train(loader, net, creterion): (train_loss, batches) = (0, len(loader)) enumerater = tqdm.tqdm(enumerate(loader)) for (batch_idx, (inputs, _)) in enumerater: if torch.cuda.is_available(): inputs = inputs.cuda() optimizer.zero_grad() outputs = net(inputs) los...
def performance(loader, net, creterion): test_loss = 0 with torch.no_grad(): for (batch_idx, (inputs, _)) in enumerate(loader): if torch.cuda.is_available(): inputs = inputs.cuda() outputs = net(inputs) loss = creterion(inputs, outputs) t...
class Memory(nn.Module): pi_2 = (3.14159 / 2) def __init__(self, N=2000, C=512, H=7, W=7, rr=1, wr=1): 'Initialize the Memory.\n N: Number of cubes in the memory.\n C: Channel of each cube in the memory\n H: Height of each cube in the memory\n W: Width of each cube in the ...
class Interest(): '\n Maintain top K interests\n ' def __init__(self, K): self.K = K self.interests = [] def add_interest(self, tensor, loss, batch_idx, visualize_window=None): self.interests.append((loss, tensor, batch_idx)) self.interests.sort(key=self._sort_loss,...
def performance(loader, net, args): (time_use, timer) = (0, Timer()) movavg = MovAvg(args.window_size) test_name = ('%s-%d-%s-%s' % (args.dataset, args.test_data, time.strftime('%Y-%m-%d-%H:%M:%S'), args.save_flag)) file_name = ('results/%s.txt' % test_name) interest = Interest(args.num_interest) ...
def level_height(bar, ranges=[0.02, 0.08]): h = min(max(0, ((bar - ranges[0]) / (ranges[1] - ranges[0]))), 1) return ((np.tanh((np.tan(((math.pi / 2) * ((2 * h) - 1))) - 0.8)) + 1) / 2)
def boxbar(height, bar, ranges=[0.02, 0.08], threshold=[0.05, 0.06]): width = 15 box = np.zeros((height, width, 3), np.uint8) h = level_height(bar, ranges) (x1, y1) = (0, int(((1 - h) * height))) (x2, y2) = (int(width), int(height)) cv2.rectangle(box, (x1, y1), (x2, y2), (0, 1, 0), (- 1)) ...
def show_batch_box(batch, batch_idx, loss, box_id=None, show_now=True): min_v = torch.min(batch) range_v = (torch.max(batch) - min_v) if (range_v > 0): batch = ((batch - min_v) / range_v) else: batch = torch.zeros(batch.size()) grid = torchvision.utils.make_grid(batch).cpu() im...
def performance(loader, net, criterion, device='cuda', window='test'): test_loss = 0 with torch.no_grad(): for (batch_idx, inputs) in enumerate(loader): inputs = inputs.to(device) outputs = net(inputs) loss = criterion(outputs, inputs) test_loss += loss....
class Kerv2d(nn.Conv2d): "\n kervolution with following options:\n kernel_type: [linear, polynomial, gaussian, etc.]\n default is convolution:\n kernel_type --> linear,\n balance, power, gamma is valid only when the kernel_type is specified\n if learnable_kernel = True, they just be th...
class Kerv1d(nn.Conv1d): "Applies a 1D kervolution over an input signal composed of several input\n planes.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or...
class MultiKerv2d(nn.Module): '\n multiple kervolution on multiple output channels:\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, mapping=['translation'], kernel_type=['linear'], learnable_kernel=[False], kernel_regularizer=[False],...
class Timer(): def __init__(self): self.start_time = time.time() def tic(self): self.start() def show(self, prefix='', output=True): duration = (time.time() - self.start_time) if output: print((prefix + ('%fs' % duration))) return duration def to...
def read_file_list(filename): '\n Reads a trajectory from a text file. \n \n File format:\n The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)\n and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. \n \n...
def associate(first_list, second_list, offset, max_difference): '\n Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim \n to find the closest match for every input tuple.\n \n Input:\n first_list -- first dictionary of (stamp,data) tuples\n second_list -- ...
def callback1(msg): global i uwb_msg = UwbRange() uwb_msg.header.stamp = rospy.Time.now() uwb_msg.header.frame_id = 'range' uwb_msg.requester_id = 8 uwb_msg.responder_id = (i % 6) uwb_msg.distance = msg.ranges[(i % 6)] uwb_msg.distance_err = 0.1273 uwb_msg.antenna = 0 uwb_msg.r...
def callback2(msg): ps.pose.position.x = msg.pose.position.x ps.pose.position.y = msg.pose.position.y ps.pose.position.z = msg.pose.position.z ps.header.frame_id = '/world' ps.header.stamp = rospy.Time.now() pose_pub.publish(ps) br = tf.TransformBroadcaster() br.sendTransform((ps.pose....
def callback1(msg): t = msg.header.stamp t1 = msg.pose.position t2 = msg.pose.orientation f = open(filename1, 'a') f.write(('%.9f %f %f %f %f %f %f %f\n' % ((t.secs + (1e-09 * t.nsecs)), t1.x, t1.y, t1.z, t2.x, t2.y, t2.z, t2.w))) f.close()
def callback2(msg): t = msg.header.stamp t1 = msg.pose.position t2 = msg.pose.orientation f = open(filename2, 'a') f.write(('%.9f %f %f %f %f %f %f %f\n' % ((t.secs + (1e-09 * t.nsecs)), t1.x, t1.y, t1.z, t2.x, t2.y, t2.z, t2.w))) f.close()
def callback3(msg): t = msg.header.stamp t1 = msg.responder_id t2 = msg.distance f = open(filename3, 'a') f.write(('%.9f %d %f \n' % ((t.secs + (1e-09 * t.nsecs)), t1, t2))) f.close()
def parse_arguments(): parser = argparse.ArgumentParser(description='Parameters to train your model.') parser.add_argument('--imgs_folder', default='./data/DUTS/DUTS-TE/DUTS-TE-Image', help='Path to folder containing images', type=str) parser.add_argument('--model_path', default='/home/tarasha/Projects/sa...
def run_inference(args): if (args.use_gpu and torch.cuda.is_available()): device = torch.device(device='cuda') else: device = torch.device(device='cpu') model = SODModel() chkpt = torch.load(args.model_path, map_location=device) model.load_state_dict(chkpt['model']) model.to(de...
def calculate_mae(args): if (args.use_gpu and torch.cuda.is_available()): device = torch.device(device='cuda') else: device = torch.device(device='cpu') model = SODModel() chkpt = torch.load(args.model_path, map_location=device) model.load_state_dict(chkpt['model']) model.to(de...
class SpatialAttention(nn.Module): def __init__(self, in_channels, kernel_size=9): super(SpatialAttention, self).__init__() self.kernel_size = kernel_size self.in_channels = in_channels pad = ((self.kernel_size - 1) // 2) self.grp1_conv1k = nn.Conv2d(self.in_channels, (sel...
class ChannelwiseAttention(nn.Module): def __init__(self, in_channels): super(ChannelwiseAttention, self).__init__() self.in_channels = in_channels self.linear_1 = nn.Linear(self.in_channels, (self.in_channels // 4)) self.linear_2 = nn.Linear((self.in_channels // 4), self.in_chann...
class EdgeSaliencyLoss(nn.Module): def __init__(self, device, alpha_sal=0.7): super(EdgeSaliencyLoss, self).__init__() self.alpha_sal = alpha_sal self.laplacian_kernel = torch.tensor([[(- 1.0), (- 1.0), (- 1.0)], [(- 1.0), 8.0, (- 1.0)], [(- 1.0), (- 1.0), (- 1.0)]], dtype=torch.float, re...
def conv_1_2_hook(module, input, output): global vgg_conv1_2 vgg_conv1_2 = output return None
def conv_2_2_hook(module, input, output): global vgg_conv2_2 vgg_conv2_2 = output return None
def conv_3_3_hook(module, input, output): global vgg_conv3_3 vgg_conv3_3 = output return None
def conv_4_3_hook(module, input, output): global vgg_conv4_3 vgg_conv4_3 = output return None
def conv_5_3_hook(module, input, output): global vgg_conv5_3 vgg_conv5_3 = output return None
class CPFE(nn.Module): def __init__(self, feature_layer=None, out_channels=32): super(CPFE, self).__init__() self.dil_rates = [3, 5, 7] if (feature_layer == 'conv5_3'): self.in_channels = 512 elif (feature_layer == 'conv4_3'): self.in_channels = 512 ...
class SODModel(nn.Module): def __init__(self): super(SODModel, self).__init__() self.vgg16 = models.vgg16(pretrained=True).features self.vgg16[3].register_forward_hook(conv_1_2_hook) self.vgg16[8].register_forward_hook(conv_2_2_hook) self.vgg16[15].register_forward_hook(co...
def test(): dummy_input = torch.randn(2, 3, 256, 512) model = SODModel() (out, ca_act_reg) = model(dummy_input) print(model) print('\nModel input shape :', dummy_input.size()) print('Model output shape :', out.size()) print('ca_act_reg :', ca_act_reg)
def accuracy(y_pred, y_true): return (y_pred.round() == y_true).float().mean()
def precision(y_pred, y_true): return (torch.mul(y_pred.round(), y_true).sum() / y_pred.round().sum())
def recall(y_pred, y_true): return (torch.mul(y_pred.round(), y_true).sum() / y_true.sum())
def parse_arguments(): parser = argparse.ArgumentParser(description='Parameters to train your model.') parser.add_argument('--epochs', default=391, help='Number of epochs to train the model for', type=int) parser.add_argument('--bs', default=6, help='Batch size', type=int) parser.add_argument('--lr', ...
class Engine(): def __init__(self, args): self.epochs = args.epochs self.bs = args.bs self.lr = args.lr self.wd = args.wd self.img_size = args.img_size self.aug = args.aug self.n_worker = args.n_worker self.test_interval = args.test_interval ...
class Vgg19(): '\n A trainable version VGG19.\n ' def __init__(self, vgg19_npy_path=None, trainable=True): if (vgg19_npy_path is not None): self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item() else: self.data_dict = None self.var_dict = {} ...
class batch_norm(object): def __init__(self, epsilon=1e-05, momentum=0.9, name='batch_norm'): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x, train=True): return tf.contrib.layers.batch_n...
def binary_cross_entropy(preds, targets, name=None): 'Computes binary cross entropy given `preds`.\n\n For brevity, let `x = `, `z = targets`. The logistic loss is\n\n loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))\n\n Args:\n preds: A `Tensor` of type `float32` or `floa...
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d'): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d...
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv2d', with_w=False): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.random_normal_initializer(stddev=stddev)) try: d...
def lrelu(x, leak=0.2, name='lrelu'): return tf.maximum(x, (leak * x))
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope((scope or 'Linear')): matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = ...
class Cifar100(): def __init__(self): with open('cifar100/train', 'rb') as f: self.train = pickle.load(f, encoding='latin1') with open('cifar100/test', 'rb') as f: self.test = pickle.load(f, encoding='latin1') self.train_data = self.train['data'] self.train...
class BatchData(Dataset): def __init__(self, images, labels, input_transform=None): self.images = images self.labels = labels self.input_transform = input_transform def __getitem__(self, index): image = self.images[index] image = Image.fromarray(np.uint8(image)) ...
class Exemplar(): def __init__(self, max_size, total_cls): self.val = {} self.train = {} self.cur_cls = 0 self.max_size = max_size self.total_classes = total_cls def update(self, cls_num, train, val): (train_x, train_y) = train (val_x, val_y) = val ...
def conv3x3(in_planes, out_planes, stride=1): '3x3 convolution with padding' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.relu = nn.ReLU(inplace=True) self.conv1 = conv3x3(inplanes, planes, stride) self.bn2 ...
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(plan...
class PreResNet(nn.Module): def __init__(self, depth, num_classes=1000, block_name='BasicBlock'): super(PreResNet, self).__init__() if (block_name.lower() == 'basicblock'): assert (((depth - 2) % 6) == 0), 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202' ...
def preresnet(**kwargs): '\n Constructs a ResNet model.\n ' return PreResNet(**kwargs)
class BiasLayer(nn.Module): def __init__(self): super(BiasLayer, self).__init__() self.alpha = nn.Parameter(torch.ones(1, requires_grad=True, device='cuda')) self.beta = nn.Parameter(torch.zeros(1, requires_grad=True, device='cuda')) def forward(self, x): return ((self.alpha ...