code stringlengths 17 6.64M |
|---|
class ML_ISTA(nn.Module):
def __init__(self, T):
super(ML_ISTA, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.strd3 = 2
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data)
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
def forward(self, x):
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
for _ in range(self.T):
gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1)
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1))
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2))
gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out
|
class ML_FISTA(nn.Module):
def __init__(self, T):
super(ML_FISTA, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.strd3 = 2
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data)
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
def forward(self, x):
t = 1
t_prv = t
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
gamma3_prv = gamma3
for _ in range(self.T):
t_prv = t
t = float(((1 + np.sqrt((1 + (4 * (t_prv ** 2))))) / 2))
Z = (gamma3 + (((t_prv - 1) / t) * (gamma3 - gamma3_prv)))
gamma3_prv = gamma3
gamma2 = F.conv_transpose2d(Z, self.W3, stride=self.strd3, padding=1)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1)
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1))
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2))
gamma3 = F.relu(((Z - (self.c3 * F.conv2d((F.conv_transpose2d(Z, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out
|
class ML_LISTA_NET(nn.Module):
def __init__(self, T):
super(ML_LISTA_NET, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.strd3 = 2
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.B1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.B2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.B3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data)
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
self.B1.data = ((0.1 / np.sqrt((3 * 16))) * self.B1.data)
self.B2.data = ((0.1 / np.sqrt((32 * 16))) * self.B2.data)
self.B3.data = ((0.1 / np.sqrt((64 * 16))) * self.B3.data)
def forward(self, x):
gamma1 = F.relu((F.conv2d(x, self.B1, stride=self.strd1, padding=1) + self.b1))
gamma2 = F.relu((F.conv2d(gamma1, self.B2, stride=self.strd2, padding=1) + self.b2))
gamma3 = F.relu((F.conv2d(gamma2, self.B3, stride=self.strd3, padding=1) + self.b3))
for _ in range(self.T):
gamma2 = F.conv_transpose2d(gamma3, self.B3, stride=self.strd3, padding=1)
gamma1 = F.conv_transpose2d(gamma2, self.B2, stride=self.strd2, padding=1)
gamma1 = F.relu((((gamma1 - F.conv2d(F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1), self.W1, stride=self.strd1, padding=1)) + F.conv2d(x, self.B1, stride=self.strd1, padding=1)) + self.b1))
gamma2 = F.relu((((gamma2 - F.conv2d(F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1), self.W2, stride=self.strd2, padding=1)) + F.conv2d(gamma1, self.B2, stride=self.strd2, padding=1)) + self.b2))
gamma3 = F.relu((((gamma3 - F.conv2d(F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1), self.W3, stride=self.strd3, padding=1)) + F.conv2d(gamma2, self.B3, stride=self.strd3, padding=1)) + self.b3))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out
|
class LBP_NET(nn.Module):
def __init__(self, T):
super(LBP_NET, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.strd3 = 2
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data)
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
def forward(self, x):
if (self.T == 0):
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
else:
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
for _ in range(self.T):
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
for _ in range(self.T):
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
for _ in range(self.T):
gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out
|
class All_Free(nn.Module):
def __init__(self):
super(All_Free, self).__init__()
m1 = 32
m2 = 64
m3 = 128
self.W1_1 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.W1_2 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.W1_3 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.W1_4 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.W1_5 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.W1_6 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.W1_7 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True)
self.strd1 = 2
self.W2_1 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.W2_2 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.W2_3 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.W2_4 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.W2_5 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.W2_6 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.W2_7 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True)
self.strd2 = 2
self.W3_1 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.W3_2 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.W3_3 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.W3_4 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.W3_5 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.W3_6 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.W3_7 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True)
self.strd3 = 2
self.b1_1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_2 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_3 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_4 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_5 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_6 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_7 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b2_1 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_3 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_4 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_5 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_6 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_7 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b3_1 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_2 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_4 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_5 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_6 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_7 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
def forward(self, x):
gamma1 = F.relu((F.conv2d(x, self.W1_1, stride=self.strd1, padding=1) + self.b1_1))
gamma2 = F.relu((F.conv2d(gamma1, self.W2_1, stride=self.strd2, padding=1) + self.b2_1))
gamma3 = F.relu((F.conv2d(gamma2, self.W3_1, stride=self.strd3, padding=1) + self.b3_1))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_1, stride=self.strd1, padding=1) - x), self.W1_2, stride=self.strd1, padding=1)) + self.b1_2))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_1, stride=self.strd2, padding=1) - gamma1), self.W2_2, stride=self.strd2, padding=1)) + self.b2_2))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_1, stride=self.strd3, padding=1) - gamma2), self.W3_2, stride=self.strd3, padding=1)) + self.b3_2))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_2, stride=self.strd1, padding=1) - x), self.W1_3, stride=self.strd1, padding=1)) + self.b1_3))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_2, stride=self.strd2, padding=1) - gamma1), self.W2_3, stride=self.strd2, padding=1)) + self.b2_3))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_2, stride=self.strd3, padding=1) - gamma2), self.W3_3, stride=self.strd3, padding=1)) + self.b3_3))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_3, stride=self.strd1, padding=1) - x), self.W1_4, stride=self.strd1, padding=1)) + self.b1_4))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_3, stride=self.strd2, padding=1) - gamma1), self.W2_4, stride=self.strd2, padding=1)) + self.b2_4))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_3, stride=self.strd3, padding=1) - gamma2), self.W3_4, stride=self.strd3, padding=1)) + self.b3_4))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_4, stride=self.strd1, padding=1) - x), self.W1_5, stride=self.strd1, padding=1)) + self.b1_5))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_4, stride=self.strd2, padding=1) - gamma1), self.W2_5, stride=self.strd2, padding=1)) + self.b2_5))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_4, stride=self.strd3, padding=1) - gamma2), self.W3_5, stride=self.strd3, padding=1)) + self.b3_5))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_5, stride=self.strd1, padding=1) - x), self.W1_6, stride=self.strd1, padding=1)) + self.b1_6))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_5, stride=self.strd2, padding=1) - gamma1), self.W2_6, stride=self.strd2, padding=1)) + self.b2_6))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_5, stride=self.strd3, padding=1) - gamma2), self.W3_6, stride=self.strd3, padding=1)) + self.b3_6))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_6, stride=self.strd1, padding=1) - x), self.W1_7, stride=self.strd1, padding=1)) + self.b1_7))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_6, stride=self.strd2, padding=1) - gamma1), self.W2_7, stride=self.strd2, padding=1)) + self.b2_7))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_6, stride=self.strd3, padding=1) - gamma2), self.W3_7, stride=self.strd3, padding=1)) + self.b3_7))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out
|
class ML_ISTA_NET(nn.Module):
def __init__(self, m1, m2, m3, T):
super(ML_ISTA_NET, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True)
self.strd3 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(m3, 10)
self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data)
def forward(self, x, all_out=False):
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3))
for _ in range(self.T):
gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2)
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) - x), self.W1, stride=self.strd1))) + self.b1))
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) - gamma1), self.W2, stride=self.strd2))) + self.b2))
gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) - gamma2), self.W3, stride=self.strd3))) + self.b3))
gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
if all_out:
gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2)
x_Rec = F.conv_transpose2d(gamma1, self.W1, stride=self.strd1)
return (out, gamma, x_Rec.detach())
else:
return out
|
class ML_FISTA_NET(nn.Module):
def __init__(self, m1, m2, m3, T):
super(ML_FISTA_NET, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True)
self.strd3 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(m3, 10)
self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data)
def forward(self, x, all_out=False):
t = 1
t_prv = t
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3))
gamma3_prv = gamma3
for _ in range(self.T):
t_prv = t
t = float(((1 + np.sqrt((1 + (4 * (t_prv ** 2))))) / 2))
Z = (gamma3 + (((t_prv - 1) / t) * (gamma3 - gamma3_prv)))
gamma3_prv = gamma3
gamma2 = F.conv_transpose2d(Z, self.W3, stride=self.strd3)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2)
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) - x), self.W1, stride=self.strd1))) + self.b1))
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) - gamma1), self.W2, stride=self.strd2))) + self.b2))
gamma3 = F.relu(((Z - (self.c3 * F.conv2d((F.conv_transpose2d(Z, self.W3, stride=self.strd3) - gamma2), self.W3, stride=self.strd3))) + self.b3))
gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
if all_out:
gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2)
x_Rec = F.conv_transpose2d(gamma1, self.W1, stride=self.strd1)
return (out, gamma, x_Rec.detach())
else:
return out
|
class ML_LISTA_NET(nn.Module):
def __init__(self, m1, m2, m3, T):
super(ML_LISTA_NET, self).__init__()
self.T = T
self.B1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True)
self.B2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True)
self.B3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True)
self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True)
self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True)
self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.strd1 = 2
self.strd2 = 2
self.strd3 = 1
self.Wclass = nn.Linear(m3, 10)
self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data)
self.B1.data = ((0.1 / np.sqrt(36)) * self.B1.data)
self.B2.data = ((0.1 / np.sqrt((m1 * 36))) * self.B2.data)
self.B3.data = ((0.1 / np.sqrt((m2 * 16))) * self.B3.data)
def forward(self, x, all_out=False):
gamma1 = F.relu((F.conv2d(x, self.B1, stride=self.strd1) + self.b1))
gamma2 = F.relu((F.conv2d(gamma1, self.B2, stride=self.strd2) + self.b2))
gamma3 = F.relu((F.conv2d(gamma2, self.B3, stride=self.strd3) + self.b3))
for _ in range(self.T):
gamma2 = F.conv_transpose2d(gamma3, self.B3, stride=self.strd3)
gamma1 = F.conv_transpose2d(gamma2, self.B2, stride=self.strd2)
gamma1 = F.relu((((gamma1 - F.conv2d(F.conv_transpose2d(gamma1, self.W1, stride=self.strd1), self.W1, stride=self.strd1)) + F.conv2d(x, self.B1, stride=self.strd1)) + self.b1))
gamma2 = F.relu((((gamma2 - F.conv2d(F.conv_transpose2d(gamma2, self.W2, stride=self.strd2), self.W2, stride=self.strd2)) + F.conv2d(gamma1, self.B2, stride=self.strd2)) + self.b2))
gamma3 = F.relu((((gamma3 - F.conv2d(F.conv_transpose2d(gamma3, self.W3, stride=self.strd3), self.W3, stride=self.strd3)) + F.conv2d(gamma2, self.B3, stride=self.strd3)) + self.b3))
gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
if all_out:
return (out, gamma)
else:
return out
|
class LBP_NET(nn.Module):
def __init__(self, m1, m2, m3, T):
super(LBP_NET, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True)
self.strd3 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(m3, 10)
self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data)
def forward(self, x, all_out=False):
if (self.T == 0):
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3))
else:
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1))
for _ in range(self.T):
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) - x), self.W1, stride=self.strd1))) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2))
for _ in range(self.T):
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) - gamma1), self.W2, stride=self.strd2))) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3))
for _ in range(self.T):
gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) - gamma2), self.W3, stride=self.strd3))) + self.b3))
gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
if all_out:
gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2)
x_Rec = F.conv_transpose2d(gamma1, self.W1, stride=self.strd1)
return (out, gamma, x_Rec.detach())
else:
return out
|
class All_Free(nn.Module):
def __init__(self, m1, m2, m3):
super(All_Free, self).__init__()
self.W1_1 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.W1_2 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.W1_3 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.W1_4 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.W1_5 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.W1_6 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.W1_7 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True)
self.strd1 = 2
self.W2_1 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.W2_2 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.W2_3 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.W2_4 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.W2_5 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.W2_6 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.W2_7 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True)
self.strd2 = 2
self.W3_1 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.W3_2 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.W3_3 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.W3_4 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.W3_5 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.W3_6 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.W3_7 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True)
self.strd3 = 1
self.b1_1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_2 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_3 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_4 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_5 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_6 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b1_7 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True)
self.b2_1 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_3 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_4 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_5 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_6 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b2_7 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True)
self.b3_1 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_2 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_4 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_5 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_6 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.b3_7 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(m3, 10)
def forward(self, x, all_out=False):
gamma1 = F.relu((F.conv2d(x, self.W1_1, stride=self.strd1) + self.b1_1))
gamma2 = F.relu((F.conv2d(gamma1, self.W2_1, stride=self.strd2) + self.b2_1))
gamma3 = F.relu((F.conv2d(gamma2, self.W3_1, stride=self.strd3) + self.b3_1))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_1, stride=self.strd1) - x), self.W1_2, stride=self.strd1)) + self.b1_2))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_1, stride=self.strd2) - gamma1), self.W2_2, stride=self.strd2)) + self.b2_2))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_1, stride=self.strd3) - gamma2), self.W3_2, stride=self.strd3)) + self.b3_2))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_2, stride=self.strd1) - x), self.W1_3, stride=self.strd1)) + self.b1_3))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_2, stride=self.strd2) - gamma1), self.W2_3, stride=self.strd2)) + self.b2_3))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_2, stride=self.strd3) - gamma2), self.W3_3, stride=self.strd3)) + self.b3_3))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_3, stride=self.strd1) - x), self.W1_4, stride=self.strd1)) + self.b1_4))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_3, stride=self.strd2) - gamma1), self.W2_4, stride=self.strd2)) + self.b2_4))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_3, stride=self.strd3) - gamma2), self.W3_4, stride=self.strd3)) + self.b3_4))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_4, stride=self.strd1) - x), self.W1_5, stride=self.strd1)) + self.b1_5))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_4, stride=self.strd2) - gamma1), self.W2_5, stride=self.strd2)) + self.b2_5))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_4, stride=self.strd3) - gamma2), self.W3_5, stride=self.strd3)) + self.b3_5))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_5, stride=self.strd1) - x), self.W1_6, stride=self.strd1)) + self.b1_6))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_5, stride=self.strd2) - gamma1), self.W2_6, stride=self.strd2)) + self.b2_6))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_5, stride=self.strd3) - gamma2), self.W3_6, stride=self.strd3)) + self.b3_6))
gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_6, stride=self.strd1) - x), self.W1_7, stride=self.strd1)) + self.b1_7))
gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_6, stride=self.strd2) - gamma1), self.W2_7, stride=self.strd2)) + self.b2_7))
gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_6, stride=self.strd3) - gamma2), self.W3_7, stride=self.strd3)) + self.b3_7))
gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
if all_out:
return (out, gamma)
else:
return out
|
def data_loader(data_name, miss_rate):
'Loads datasets and introduce missingness.\n \n Args:\n - data_name: letter, spam, or mnist\n - miss_rate: the probability of missing components\n \n Returns:\n data_x: original data\n miss_data_x: data with missing values\n data_m: indicator matrix for missing components\n '
if (data_name in ['letter', 'spam']):
file_name = (('data/' + data_name) + '.csv')
data_x = np.loadtxt(file_name, delimiter=',', skiprows=1)
elif (data_name == 'mnist'):
((data_x, _), _) = mnist.load_data()
data_x = np.reshape(np.asarray(data_x), [60000, (28 * 28)]).astype(float)
(no, dim) = data_x.shape
data_m = binary_sampler((1 - miss_rate), no, dim)
miss_data_x = data_x.copy()
miss_data_x[(data_m == 0)] = np.nan
return (data_x, miss_data_x, data_m)
|
def main(args):
'Main function for UCI letter and spam datasets.\n \n Args:\n - data_name: letter or spam\n - miss_rate: probability of missing components\n - batch:size: batch size\n - hint_rate: hint rate\n - alpha: hyperparameter\n - iterations: iterations\n \n Returns:\n - imputed_data_x: imputed data\n - rmse: Root Mean Squared Error\n '
data_name = args.data_name
miss_rate = args.miss_rate
gain_parameters = {'batch_size': args.batch_size, 'hint_rate': args.hint_rate, 'alpha': args.alpha, 'iterations': args.iterations}
(ori_data_x, miss_data_x, data_m) = data_loader(data_name, miss_rate)
imputed_data_x = gain(miss_data_x, gain_parameters)
rmse = rmse_loss(ori_data_x, imputed_data_x, data_m)
print()
print(('RMSE Performance: ' + str(np.round(rmse, 4))))
return (imputed_data_x, rmse)
|
class RawVideoExtractorCV2():
def __init__(self, centercrop=False, size=224, framerate=(- 1)):
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.transform = self._transform(self.size)
def _transform(self, n_px):
return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])
def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None):
if ((start_time is not None) or (end_time is not None)):
assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time))
assert (sample_fp > (- 1))
cap = cv2.VideoCapture(video_file)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
total_duration = (((frameCount + fps) - 1) // fps)
(start_sec, end_sec) = (0, total_duration)
if (start_time is not None):
(start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration))
cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps)))
interval = 1
if (sample_fp > 0):
interval = (fps // sample_fp)
else:
sample_fp = fps
if (interval == 0):
interval = 1
inds = [ind for ind in np.arange(0, fps, interval)]
assert (len(inds) >= sample_fp)
inds = inds[:sample_fp]
ret = True
(images, included) = ([], [])
for sec in np.arange(start_sec, (end_sec + 1)):
if (not ret):
break
sec_base = int((sec * fps))
for ind in inds:
cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind))
(ret, frame) = cap.read()
if (not ret):
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
images.append(preprocess(Image.fromarray(frame_rgb).convert('RGB')))
cap.release()
if (len(images) > 0):
video_data = th.tensor(np.stack(images))
else:
video_data = th.zeros(1)
return {'video': video_data}
def get_video_data(self, video_path, start_time=None, end_time=None):
image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time)
return image_input
def process_raw_data(self, raw_video_data):
tensor_size = raw_video_data.size()
tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)])
return tensor
def process_frame_order(self, raw_video_data, frame_order=0):
if (frame_order == 0):
pass
elif (frame_order == 1):
reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1))
raw_video_data = raw_video_data[(reverse_order, ...)]
elif (frame_order == 2):
random_order = np.arange(raw_video_data.size(0))
np.random.shuffle(random_order)
raw_video_data = raw_video_data[(random_order, ...)]
return raw_video_data
|
def get_args(description='VQA Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--do_pretrain', action='store_true', help='Whether to run training.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--train_csv', type=str, default='data/.train.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/.val.csv', help='')
parser.add_argument('--data_path', type=str, default='train_ans2label.json', help='data pickle file path')
parser.add_argument('--features_path', type=str, default='MSRVTT_Videos', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--cross_model', default='cross-base', type=str, required=False, help='Cross module')
parser.add_argument('--init_model', default=None, type=str, required=False, help='Initial model.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--n_gpu', type=int, default=1, help='Changed in the execute process.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--task_type', default='retrieval', type=str, help='Point the task `retrieval` to finetune.')
parser.add_argument('--datatype', default='msrvtt', type=str, help='Point the dataset to finetune.')
parser.add_argument('--world_size', default=0, type=int, help='distribted training')
parser.add_argument('--local_rank', default=0, type=int, help='distribted training')
parser.add_argument('--rank', default=0, type=int, help='distribted training')
parser.add_argument('--coef_lr', type=float, default=0.001, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help='Whether use MIL as Miech et. al. (2020).')
parser.add_argument('--sampled_use_mil', action='store_true', help='Whether MIL, has a high priority than use_mil.')
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help='Layer NO. of text.')
parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help='Layer NO. of visual.')
parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help='Layer NO. of cross.')
parser.add_argument('--loose_type', action='store_true', help='Default using tight type for retrieval.')
parser.add_argument('--expand_msrvtt_sentences', action='store_true', help='')
parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.')
parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.')
parser.add_argument('--freeze_layer_num', type=int, default=0, help='Layer NO. of CLIP need to freeze.')
parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2], help='0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.')
parser.add_argument('--linear_patch', type=str, default='2d', choices=['2d', '3d'], help='linear projection of flattened patches.')
parser.add_argument('--sim_header', type=str, default='meanP', choices=['meanP', 'seqLSTM', 'seqTransf', 'tightTransf', 'BTransf', 'denseTransf'], help='choice a similarity header.')
parser.add_argument('--loss', type=str, default='CrossEn', choices=['CrossEn'])
parser.add_argument('--K', type=int, default=16)
parser.add_argument('--stage_num', type=int, default=5)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--lamd', type=float, default=1)
parser.add_argument('--beta', type=float, default=1)
parser.add_argument('--num_labels', type=int, default=1000)
args = parser.parse_args()
if (args.sim_header == 'tightTransf'):
args.loose_type = False
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
args.batch_size = int((args.batch_size / args.gradient_accumulation_steps))
return args
|
def set_seed_logger(args):
global logger
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
rank = torch.distributed.get_rank()
args.rank = rank
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, 'log.txt'))
if (args.local_rank == 0):
logger.info('Effective parameters:')
for key in sorted(args.__dict__):
logger.info(' <<< {}: {}'.format(key, args.__dict__[key]))
return args
|
def init_device(args, local_rank):
global logger
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'), local_rank)
n_gpu = torch.cuda.device_count()
logger.info('device: {} n_gpu: {}'.format(device, n_gpu))
args.n_gpu = n_gpu
if (((args.batch_size % args.n_gpu) != 0) or ((args.batch_size_val % args.n_gpu) != 0)):
raise ValueError('Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return (device, n_gpu)
|
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed'))
model = EMCL4QA.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
|
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.0):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))]
no_decay_param_tp = [(n, p) for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))]
decay_clip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' in n)]
decay_noclip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' not in n)]
no_decay_clip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' in n)]
no_decay_noclip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' not in n)]
weight_decay = 0.2
optimizer_grouped_parameters = [{'params': [p for (n, p) in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in decay_noclip_param_tp], 'weight_decay': weight_decay}, {'params': [p for (n, p) in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in no_decay_noclip_param_tp], 'weight_decay': 0.0}]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion, schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-06, t_total=num_train_optimization_steps, weight_decay=weight_decay, max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
return (optimizer, scheduler, model)
|
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_TrainDataLoader(jsonl_path=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=True, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
def dataloader_msrvtt_test(args, tokenizer):
msrvtt_testset = MSRVTT_DataLoader(jsonl_path=args.val_csv, train_jsonl=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels)
dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader_msrvtt, len(msrvtt_testset))
|
def save_model(epoch, args, model, type_name=''):
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info('Model saved to %s', output_model_file)
return output_model_file
|
def load_model(epoch, args, n_gpu, device, model_file=None):
if ((model_file is None) or (len(model_file) == 0)):
model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}'.format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if (args.local_rank == 0):
logger.info('Model loaded from %s', model_file)
cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed'))
model = EMCL4QA.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
|
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0, tokenizer=ClipTokenizer()):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for (step, batch) in enumerate(train_dataloader):
if (n_gpu == 1):
batch = tuple((t.to(device=device, non_blocking=True) for t in batch))
(input_ids, input_mask, segment_ids, video, video_mask, labels) = batch
ce_loss = model(input_ids, segment_ids, input_mask, video, video_mask, labels)
if (n_gpu > 1):
ce_loss = ce_loss.mean()
if (args.gradient_accumulation_steps > 1):
ce_loss = (ce_loss / args.gradient_accumulation_steps)
loss = ce_loss
loss.backward()
total_loss += float(loss)
if (((step + 1) % args.gradient_accumulation_steps) == 0):
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if (scheduler is not None):
scheduler.step()
optimizer.step()
optimizer.zero_grad()
if hasattr(model, 'module'):
torch.clamp_(model.module.clip.logit_scale.data, max=np.log(100))
else:
torch.clamp_(model.clip.logit_scale.data, max=np.log(100))
global_step += 1
if (((global_step % log_step) == 0) and (local_rank == 0)):
logger.info('Epoch: %d/%s, Step: %d/%d, Lr: %s, CeLoss: %f, Time/step: %f', (epoch + 1), args.epochs, (step + 1), len(train_dataloader), '-'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), float(ce_loss), ((time.time() - start_time) / (log_step * args.gradient_accumulation_steps)))
start_time = time.time()
total_loss = (total_loss / len(train_dataloader))
return (total_loss, global_step)
|
def eval_epoch(args, model, test_dataloader, device, n_gpu):
top1 = AverageMeter()
top5 = AverageMeter()
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
for (bid, batch) in enumerate(test_dataloader):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, video, video_mask, labels) = batch
output = model(input_ids, segment_ids, input_mask, video, video_mask, labels)
(prec1, prec5) = accuracy(output, labels, topk=(1, 5))
top1.update(prec1[0], input_ids.size(0))
top5.update(prec5[0], input_ids.size(0))
print('{}/{}\r'.format(bid, len(test_dataloader)), end='')
logger.info('Video QA:')
logger.info('\t>>> Prec@1: {top1.avg:.3f} - Prec@5: {top5.avg:.3f}'.format(top1=top1, top5=top5))
R1 = top1.avg
return R1
|
def main():
global logger
args = get_args()
args = set_seed_logger(args)
(device, n_gpu) = init_device(args, args.local_rank)
tokenizer = ClipTokenizer()
assert (args.task_type == 'retrieval')
args.num_labels = 1500
model = init_model(args, device, n_gpu, args.local_rank)
assert ((args.freeze_layer_num <= 12) and (args.freeze_layer_num >= (- 1)))
if (hasattr(model, 'clip') and (args.freeze_layer_num > (- 1))):
for (name, param) in model.clip.named_parameters():
if ((name.find('ln_final.') == 0) or (name.find('text_projection') == 0) or (name.find('logit_scale') == 0) or (name.find('visual.ln_post.') == 0) or (name.find('visual.proj') == 0)):
continue
elif ((name.find('visual.transformer.resblocks.') == 0) or (name.find('transformer.resblocks.') == 0)):
layer_num = int(name.split('.resblocks.')[1].split('.')[0])
if (layer_num >= args.freeze_layer_num):
continue
if ((args.linear_patch == '3d') and name.find('conv2.')):
continue
else:
param.requires_grad = False
assert (args.datatype in DATALOADER_DICT)
(test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer)
if (DATALOADER_DICT[args.datatype]['val'] is not None):
(val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val')
else:
(val_dataloader, val_length) = (test_dataloader, test_length)
if (args.local_rank == 0):
logger.info('***** Running test *****')
logger.info(' Num examples = %d', test_length)
logger.info(' Batch size = %d', args.batch_size_val)
logger.info(' Num steps = %d', len(test_dataloader))
logger.info('***** Running val *****')
logger.info(' Num examples = %d', val_length)
if args.do_train:
(train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer)
num_train_optimization_steps = ((int(((len(train_dataloader) + args.gradient_accumulation_steps) - 1)) / args.gradient_accumulation_steps) * args.epochs)
coef_lr = args.coef_lr
(optimizer, scheduler, model) = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if (args.local_rank == 0):
logger.info('***** Running training *****')
logger.info(' Num examples = %d', train_length)
logger.info(' Batch size = %d', args.batch_size)
logger.info(' Num steps = %d', (num_train_optimization_steps * args.gradient_accumulation_steps))
best_score = 1e-05
best_output_model_file = 'None'
global_step = 0
for epoch in range(args.epochs):
if ((epoch == 0) and (args.local_rank == 0) and 0):
logger.info('Eval first')
output_model_file = None
R1 = eval_epoch(args, model, test_dataloader, device, n_gpu)
if (best_score <= R1):
best_score = R1
best_output_model_file = output_model_file
logger.info('The best model is: {}, the R1 is: {:.4f}'.format(best_output_model_file, best_score))
train_sampler.set_epoch(epoch)
(tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=args.local_rank, tokenizer=tokenizer)
if (args.local_rank == 0):
logger.info('Epoch %d/%s Finished, Train Loss: %f', (epoch + 1), args.epochs, tr_loss)
output_model_file = None
logger.info('Eval on val dataset')
R1 = eval_epoch(args, model, val_dataloader, device, n_gpu)
if (best_score <= R1):
best_score = R1
best_output_model_file = output_model_file
logger.info('The best model is: {}, the R1 is: {:.4f}'.format(best_output_model_file, best_score))
elif args.do_eval:
if (args.local_rank == 0):
eval_epoch(args, model, test_dataloader, device, n_gpu)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous()
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def url_to_filename(url: str, etag: str=None) -> str:
"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n "
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
return filename
|
def filename_to_url(filename: str, cache_dir: Union[(str, Path)]=None) -> Tuple[(str, str)]:
'\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n '
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise FileNotFoundError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise FileNotFoundError('file {} not found'.format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag)
|
def cached_path(url_or_filename: Union[(str, Path)], cache_dir: Union[(str, Path)]=None) -> str:
"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n "
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in ('http', 'https', 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise FileNotFoundError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
|
def split_s3_path(url: str) -> Tuple[(str, str)]:
'Split a full s3 path into the bucket name and path.'
parsed = urlparse(url)
if ((not parsed.netloc) or (not parsed.path)):
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
return (bucket_name, s3_path)
|
def s3_request(func: Callable):
'\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n '
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if (int(exc.response['Error']['Code']) == 404):
raise FileNotFoundError('file {} not found'.format(url))
else:
raise
return wrapper
|
@s3_request
def s3_etag(url: str) -> Optional[str]:
'Check ETag on S3 object.'
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
|
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
'Pull a file directly from S3.'
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = (int(content_length) if (content_length is not None) else None)
progress = tqdm(unit='B', total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
|
def get_from_cache(url: str, cache_dir: Union[(str, Path)]=None) -> str:
"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n "
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if url.startswith('s3://'):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if (response.status_code != 200):
raise IOError('HEAD request failed for url {} with status code {}'.format(url, response.status_code))
etag = response.headers.get('ETag')
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
with tempfile.NamedTemporaryFile() as temp_file:
logger.info('%s not found in cache, downloading to %s', url, temp_file.name)
if url.startswith('s3://'):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
temp_file.flush()
temp_file.seek(0)
logger.info('copying %s to cache at %s', temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info('removing temp file %s', temp_file.name)
return cache_path
|
def read_set_from_file(filename: str) -> Set[str]:
'\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
def get_file_extension(path: str, dot=True, lower: bool=True):
ext = os.path.splitext(path)[1]
ext = (ext if dot else ext[1:])
return (ext.lower() if lower else ext)
|
class CrossEn(nn.Module):
def __init__(self, config=None):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=(- 1))
logpt = th.diag(logpt)
nce_loss = (- logpt)
sim_loss = nce_loss.mean()
return sim_loss
|
class InfoNceLoss(nn.Module):
'Implementation of the noise-constrastive estimation loss.'
def __init__(self):
super().__init__()
self.loss = th.nn.CrossEntropyLoss(reduction='mean')
def forward(self, x):
n = x.size()[0]
target = th.arange(n)
if x.is_cuda:
target = target.cuda()
return (self.loss(x, target) + self.loss(th.transpose(x, 0, 1), target))
|
class MaxMarginRankingLoss(nn.Module):
'Implementation of the Max-margin ranking loss.'
def __init__(self, margin=1, fix_norm=True):
super().__init__()
self.fix_norm = fix_norm
self.loss = th.nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, x):
n = x.size()[0]
x1 = th.diag(x)
x1 = x1.unsqueeze(1)
x1 = x1.expand(n, n)
x1 = x1.contiguous().view((- 1), 1)
x1 = th.cat((x1, x1), 0)
x2 = x.view((- 1), 1)
x3 = x.transpose(0, 1).contiguous().view((- 1), 1)
x2 = th.cat((x2, x3), 0)
max_margin = F.relu((self.margin - (x1 - x2)))
if self.fix_norm:
keep = (th.ones(x.shape) - th.eye(x.shape[0]))
keep1 = keep.view((- 1), 1)
keep2 = keep.transpose(0, 1).contiguous().view((- 1), 1)
keep_idx = th.nonzero(th.cat((keep1, keep2), 0).flatten()).flatten()
if x1.is_cuda:
keep_idx = keep_idx.cuda()
x1_ = th.index_select(x1, dim=0, index=keep_idx)
x2_ = th.index_select(x2, dim=0, index=keep_idx)
max_margin = F.relu((self.margin - (x1_ - x2_)))
return max_margin.mean()
|
def warmup_cosine(x, warmup=0.002):
if (x < warmup):
return (x / warmup)
return (0.5 * (1.0 + math.cos((math.pi * x))))
|
def warmup_constant(x, warmup=0.002):
' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. '
if (x < warmup):
return (x / warmup)
return 1.0
|
def warmup_linear(x, warmup=0.002):
' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. '
if (x < warmup):
return (x / warmup)
return max(((x - 1.0) / (warmup - 1.0)), 0)
|
class BertAdam(Optimizer):
"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n "
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (schedule not in SCHEDULES):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
if (len(state) == 0):
return [0]
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_(grad, alpha=(1 - beta1))
next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
progress = (state['step'] / group['t_total'])
lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup']))
else:
lr_scheduled = group['lr']
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ''
weights_name = ''
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if (os.path.exists(archive_file) is False):
if (pretrained_model_name in cls.pretrained_model_archive_map):
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if ((task_config is None) or (task_config.local_rank == 0)):
logger.error("Model name '{}' was not found in model name list. We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name, archive_file))
return None
if (resolved_archive_file == archive_file):
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {}'.format(archive_file))
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if ((task_config is None) or (task_config.local_rank == 0)):
logger.info('Model config {}'.format(config))
if (state_dict is None):
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
elif ((task_config is None) or (task_config.local_rank == 0)):
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
shutil.rmtree(tempdir)
return (config, state_dict)
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = cls(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
def gelu(x):
"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'Construct a layernorm module in the TF style (epsilon inside the square root).\n '
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class PreTrainedModel(nn.Module):
' An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n '
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if (not isinstance(config, PretrainedConfig)):
raise ValueError('Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. To create a model from a Google pretrained model use `model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`'.format(self.__class__.__name__, self.__class__.__name__))
self.config = config
def init_weights(self, module):
' Initialize the weights.\n '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if (('beta' in dir(module)) and ('gamma' in dir(module))):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if (prefix is not None):
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append((prefix + key))
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(model, prefix='')
if ((prefix is None) and ((task_config is None) or (task_config.local_rank == 0))):
logger.info(('-' * 20))
if (len(missing_keys) > 0):
logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(missing_keys))))
if (len(unexpected_keys) > 0):
logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(unexpected_keys))))
if (len(error_msgs) > 0):
logger.error('Weights from pretrained model cause errors in {}: {}'.format(model.__class__.__name__, ('\n ' + '\n '.join(error_msgs))))
return model
@property
def dtype(self):
'\n :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n '
try:
return next(self.parameters()).dtype
except StopIteration:
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
'\n Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n '
model = cls(config, *inputs, **kwargs)
if (state_dict is None):
return model
model = cls.init_preweight(model, state_dict)
return model
|
class CrossEn(nn.Module):
def __init__(self):
super(CrossEn, self).__init__()
def forward(self, sim_matrix, target):
logpt = F.log_softmax(sim_matrix, dim=(- 1))
logpt = torch.index_select(logpt, (- 1), target)
loss = (- logpt)
sim_loss = loss.mean()
return sim_loss
|
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float('.'.join(torch.__version__.split('.')[:2]))
self.bool_dtype = (torch.bool if (torch_v >= 1.3) else torch.uint8)
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = (sim_matrix + (mm_mask * (- 1000000000000.0)))
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=(- 1))
logpt = F.log_softmax(new_sim_matrix, dim=(- 1))
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=(- 1))
masked_logpt = (logpt + ((torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * (- 1000000000000.0)))
new_logpt = (- torch.logsumexp(masked_logpt, dim=(- 1)))
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = ((torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair) + (self.n_pair // 2))
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
|
class MaxMarginRankingLoss(nn.Module):
def __init__(self, margin=1.0, negative_weighting=False, batch_size=1, n_pair=1, hard_negative_rate=0.5):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = (1 - hard_negative_rate)
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if ((n_pair > 1) and (batch_size > 1)):
alpha = (easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate)))
mm_mask = (((1 - alpha) * np.eye(self.batch_size)) + alpha)
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = (torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate)))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = (F.relu(((self.margin + x) - d.view((- 1), 1))) + F.relu(((self.margin + x) - d.view(1, (- 1)))))
if (self.negative_weighting and (self.n_pair > 1) and (self.batch_size > 1)):
max_margin = (max_margin * self.mm_mask.to(max_margin.device))
return max_margin.mean()
|
class Emcl(object):
def __init__(self, k=32, stage_num=9, momentum=0.9, lamd=1, beta=3):
self.k = k
self.lamd = lamd
self.stage_num = stage_num
self.beta = beta
self.momentum = momentum
self.mu = torch.Tensor(1, self.k)
self.mu.normal_(0, math.sqrt((2.0 / self.k)))
self.mu = (self.mu / (1e-06 + self.mu.norm(dim=0, keepdim=True)))
def __call__(self, embds, if_train=True):
(b, n) = embds.size()
mu = self.mu.repeat(b, 1).cuda(embds.device)
_embds = embds
with torch.no_grad():
for i in range(self.stage_num):
_embds_t = _embds.permute(1, 0)
z = torch.mm(_embds_t, mu)
z = (z / self.lamd)
z = F.softmax(z, dim=1)
z = (z / (1e-06 + z.sum(dim=0, keepdim=True)))
mu = torch.mm(_embds, z)
mu = (mu / (1e-06 + mu.norm(dim=0, keepdim=True)))
z_t = z.permute(1, 0)
_embds = torch.mm(mu, z_t)
if if_train:
mu = mu.cpu()
self.mu = ((self.momentum * self.mu) + ((1 - self.momentum) * mu.mean(dim=0, keepdim=True)))
return ((self.beta * _embds) + embds)
|
class AllGather(torch.autograd.Function):
'An autograd function that performs allgather on a tensor.'
@staticmethod
def forward(ctx, tensor, args):
output = [torch.empty_like(tensor) for _ in range(args.world_size)]
torch.distributed.all_gather(output, tensor)
ctx.rank = args.rank
ctx.batch_size = tensor.shape[0]
return torch.cat(output, dim=0)
@staticmethod
def backward(ctx, grad_output):
return (grad_output[(ctx.batch_size * ctx.rank):(ctx.batch_size * (ctx.rank + 1))], None)
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
def compress(paras):
(input_video_path, output_video_path) = paras
try:
command = ['ffmpeg', '-y', '-i', input_video_path, '-filter:v', "scale='if(gt(a,1),trunc(oh*a/2)*2,224)':'if(gt(a,1),224,trunc(ow*a/2)*2)'", '-map', '0:v', '-r', '3', output_video_path]
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = ffmpeg.communicate()
retcode = ffmpeg.poll()
except Exception as e:
raise e
|
def prepare_input_output_pairs(input_root, output_root):
input_video_path_list = []
output_video_path_list = []
for (root, dirs, files) in os.walk(input_root):
for file_name in files:
input_video_path = os.path.join(root, file_name)
output_video_path = os.path.join(output_root, file_name)
if (os.path.exists(output_video_path) and (os.path.getsize(output_video_path) > 0)):
pass
else:
input_video_path_list.append(input_video_path)
output_video_path_list.append(output_video_path)
return (input_video_path_list, output_video_path_list)
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
class BaseDataLoader(DataLoader):
'Base class for all data loaders.'
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
(self.sampler, self.valid_sampler) = self._split_sampler(self.validation_split)
self.init_kwargs = {'dataset': dataset, 'batch_size': batch_size, 'shuffle': self.shuffle, 'collate_fn': collate_fn, 'num_workers': num_workers}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if (split == 0.0):
return (None, None)
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert (split > 0)
assert (split < self.n_samples), 'validation set size is configured to be larger than entire dataset.'
len_valid = split
else:
len_valid = int((self.n_samples * split))
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
self.shuffle = False
self.n_samples = len(train_idx)
return (train_sampler, valid_sampler)
def split_validation(self):
if (self.valid_sampler is None):
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
|
class BaseModel(nn.Module):
'Base class for all models.'
@abc.abstractmethod
def forward(self, *inputs):
'Forward pass logic.'
raise NotImplementedError
def __str__(self):
'Model prints with number of trainable parameters.'
model_parameters = filter((lambda p: p.requires_grad), self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return (super().__str__() + f'''
Trainable parameters: {params}''')
|
class BaseTrainer():
'Base class for all trainers.'
def __init__(self, model, loss, metrics, optimizer, lr_scheduler, config):
self.config = config
self.hparams = get_hparams_from_config(self.config)
(self.device, device_ids) = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if (len(device_ids) > 1):
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.loss = loss
self.metrics = metrics
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.exp_dir = config.save_dir
self.checkpoint_dir = config.save_dir
self.perf_log_path = os.path.join(config.save_dir, 'perf_log.txt')
self.info_checkpoint_path = os.path.join(config.save_dir, 'info_checkpoint.txt')
self.monitoring_path = os.path.join(config.save_dir, 'monitoring.json')
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
self.timer = AverageMeter()
if (self.monitor == 'off'):
self.mnt_mode = 'off'
self.mnt_best = 0
elif self.monitor.startswith('given_epoch'):
(self.mnt_mode, self.given_epoch) = self.monitor.split()
assert (self.mnt_mode in ['given_epoch'])
self.mnt_best = 0
self.given_epoch = int(self.given_epoch)
else:
(self.mnt_mode, self.mnt_metric) = self.monitor.split()
assert (self.mnt_mode in ['min', 'max'])
self.mnt_best = (inf if (self.mnt_mode == 'min') else (- inf))
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 0
self.epoch = 0
self.n_samples = 0
self.n_steps = 0
self.writer = SummaryWriter(config.log_dir)
self.include_optim_in_ckpts = config['trainer'].get('include_optim_in_ckpts', False)
if (config.resume is not None):
self._resume_checkpoint(config.resume)
@abc.abstractmethod
def _train_epoch(self, epoch):
'Training logic for an epoch.'
raise NotImplementedError
@abc.abstractmethod
def _valid_epoch(self, epoch, sets):
'Validation logic for an epoch.'
raise NotImplementedError
def train(self):
'Full training logic.'
not_improved_count = 0
for epoch in range(self.start_epoch, (self.epochs + 1)):
self.epoch = epoch
epoch_start = time.time()
logger.debug('Starting training epoch %s ...', str(epoch))
train_start = time.time()
result = self._train_epoch(epoch)
for (key, val) in result.items():
self.writer.add_scalar(f'{key}', val, epoch)
self.timer.update('epoch.train', (time.time() - train_start))
logger.debug('Starting evaluating epoch %s ...', str(epoch))
valid_start = time.time()
val_log = self._valid_epoch(epoch, sets='continuous_eval')
logger.debug('Updating val log with results ...')
result.update(val_log)
self.timer.update('epoch.valid', (time.time() - valid_start))
checkpoint_start = time.time()
log = {'epoch': epoch}
for (key, value) in result.items():
if (key == 'metrics'):
for (dataset_name, dataset_metrics) in value.items():
for (metric_type, metric_dict) in dataset_metrics.items():
for (metric_name, metric_value) in metric_dict.items():
log[f'{dataset_name}/{metric_type}/{metric_name}'] = metric_value
else:
log[key] = value
best = False
if (self.mnt_mode in ['min', 'max']):
try:
lower = (log[self.mnt_metric] <= self.mnt_best)
higher = (log[self.mnt_metric] >= self.mnt_best)
improved = (((self.mnt_mode == 'min') and lower) or ((self.mnt_mode == 'max') and higher))
except KeyError:
logger.warning('Warning: Metric %s not found, perf monitoring is disabled.', self.mnt_metric)
self.mnt_mode = 'off'
improved = False
not_improved_count = 0
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if (not_improved_count > self.early_stop):
logger.info("Val performance didn't improve for %s epochs. Training stops.", self.early_stop)
break
save_best = (best and (self.mnt_metric != 'epoch'))
if ((self.mnt_mode in ['given_epoch']) and (epoch == self.given_epoch)):
save_best = True
if (epoch < self.skip_first_n_saves):
msg = f'Skipping ckpt save at epoch {epoch} < {self.skip_first_n_saves}'
logger.info(msg)
elif (((epoch % self.save_period) == 0) or save_best):
self._save_checkpoint(epoch, save_best=best)
if (epoch > self.num_keep_ckpts):
self.purge_stale_checkpoints()
self.timer.update('epoch.checkpoint', (time.time() - checkpoint_start))
self.timer.update('epoch.total', (time.time() - epoch_start))
for (key, val) in self.timer.dic.items():
for metric in ['avg', 'sum']:
log[f'timer.{key}.{metric}'] = self.timer.dic[key][metric]
self.writer.add_scalar(f'timer_epoch/{key}', self.timer.dic[key]['sum'], epoch)
self.writer.add_text('exp_dir', str(self.exp_dir), epoch)
self.timer.reset()
log['best'] = self.mnt_best
log['not_improved_count'] = not_improved_count
self.writer.add_scalar('best', self.mnt_best, epoch)
for (metric_name, metric_value) in log.items():
if ('/cols' in metric_name):
continue
if ('timer.' in metric_name):
logger.debug(' {:15s}: {}'.format(str(metric_name), metric_value))
else:
logger.info(' {:15s}: {}'.format(str(metric_name), metric_value))
log_light = {}
for (key, value) in log.items():
if (not key.endswith('cols')):
log_light[key] = value
update_perf_log(log_light, self.perf_log_path)
self.writer.add_hparams(self.hparams, {'hparam/accuracy': log[self.mnt_metric], 'hparam/mnt_best': self.mnt_best, 'hparam/epoch': epoch}, name='hparams')
def evaluate(self):
'Final evaluation.'
sets = 'final_eval'
ckpt_path = (self.config.save_dir / 'trained_model.pth')
if os.path.exists(ckpt_path):
self._resume_checkpoint(ckpt_path)
else:
msg = f'The checkpoint {ckpt_path} does not exist and cannot be loaded. The model will not be resumed to that checkpoint.'
logger.info(msg)
final_result = self._valid_epoch(epoch=self.epoch, sets=sets)
nested_metrics = final_result['metrics']
log = {}
for (dataset_name, dataset_metrics) in nested_metrics.items():
log[dataset_name] = {}
for (metric_type, metric_dict) in dataset_metrics.items():
for (metric_name, metric_value) in metric_dict.items():
log[dataset_name][f'{metric_type}/{metric_name}/{sets}'] = metric_value
for (dataset_name, metric_dict) in log.items():
logger.info('%s:', dataset_name)
for (metric_name, metric_value) in metric_dict.items():
if ('/cols' in metric_name):
continue
if ('timer.' in metric_name):
logger.debug(' {:15s}: {}'.format(str(metric_name), metric_value))
else:
logger.info(' {:15s}: {}'.format(str(metric_name), metric_value))
save_dir = self.config.save_dir
results_on_datasets_log_path = os.path.join(save_dir, 'exp_results.json')
if os.path.exists(results_on_datasets_log_path):
with open(results_on_datasets_log_path) as json_file:
res = json.load(json_file)
else:
res = collections.OrderedDict({})
if ('perfs' not in res.keys()):
res['perfs'] = {}
res['perfs'] = log
res['checkpoint_epoch'] = self.loaded_epoch
logger.info('Best epoch for the monitored metric: %s', self.loaded_epoch)
with open(results_on_datasets_log_path, 'w') as fp:
json.dump(res, fp, indent=4)
exp_completed_flag_path = os.path.join(save_dir, 'exp_completed_flag.txt')
with open(exp_completed_flag_path, 'a'):
os.utime(exp_completed_flag_path, None)
def purge_stale_checkpoints(self):
'Remove checkpoints that are no longer neededself.\n\n NOTE: This function assumes that the `best` checkpoint has already been\n renamed\n to have a format that differs from `checkpoint-epoch<num>.pth`\n '
found_epoch_ckpts = list(self.checkpoint_dir.glob('checkpoint-epoch*.pth'))
if (len(found_epoch_ckpts) <= self.num_keep_ckpts):
return
regex = '.*checkpoint-epoch(\\d+)[.]pth$'
epochs = [int(re.search(regex, str(x)).groups()[0]) for x in found_epoch_ckpts]
sorted_ckpts = sorted(list(zip(epochs, found_epoch_ckpts)), key=(lambda x: (- x[0])))
for (epoch, stale_ckpt) in sorted_ckpts[self.num_keep_ckpts:]:
tic = time.time()
stale_ckpt.unlink()
msg = f'removing stale ckpt [epoch {epoch}] [took {(time.time() - tic):.2f}s]'
logger.info(msg)
def _prepare_device(self, n_gpu_use):
'Setup GPU device if available, move model into configured device.'
n_gpu = torch.cuda.device_count()
msg = f'n_gpu = torch.cuda.device_count(): {n_gpu} (nb of gpus available)'
logger.debug(msg)
if ((n_gpu_use > 0) and (n_gpu == 0)):
logger.warning("Warning: There's no GPU available on this machine,training will be performed on CPU.")
n_gpu_use = 0
if (n_gpu_use > n_gpu):
msg = "Warning: The number of GPU's configured to use is {}, but only {} are available on this machine.".format(n_gpu_use, n_gpu)
logger.warning(msg)
n_gpu_use = n_gpu
device = torch.device(('cuda:0' if (n_gpu_use > 0) else 'cpu'))
logger.debug('device: %s', device)
list_ids = list(range(n_gpu_use))
logger.debug('list_ids: %s', list_ids)
return (device, list_ids)
def _save_checkpoint(self, epoch, save_best=False):
'Saving checkpoints.'
arch = type(self.model).__name__
try:
state_dict = self.model.module.state_dict()
except AttributeError:
state_dict = self.model.state_dict()
state = {'arch': arch, 'epoch': epoch, 'state_dict': state_dict, 'monitor_best': self.mnt_best, 'config': self.config, 'n_samples': self.n_samples, 'n_steps': self.n_steps}
if self.include_optim_in_ckpts:
state['optimizer'] = self.optimizer.state_dict()
state['lr_scheduler'] = self.lr_scheduler.state_dict()
filename = str((self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch)))
filename_tmp = (filename + '_')
tic = time.time()
logger.info('Saving checkpoint: %s ...', filename)
torch.save(state, filename_tmp)
os.rename(filename_tmp, filename)
msg = f'Done in {(time.time() - tic):.3f}s'
logger.info(msg)
if save_best:
logger.info("Updating 'best' checkpoint: %s ...", filename)
best_path = str((self.checkpoint_dir / 'trained_model.pth'))
best_path_tmp = (best_path + '_')
torch.save(state, best_path_tmp)
os.rename(best_path_tmp, best_path)
msg = f'Done in {(time.time() - tic):.3f}s'
logger.info(msg)
def _resume_last_checkpoint(self):
checkpoint_path = get_last_checkpoint_path(self.exp_dir)
self._resume_checkpoint(checkpoint_path)
def match_checkpoint_to_model(self, checkpoint, model):
'Adapt the loaded checkpoint so that is fits the current architecture.'
modules = ['vid_bert.embeddings.position_embeddings.weight']
for module in modules:
if ((module in model) and (checkpoint[module].shape != model[module].shape)):
padding = (model[module].shape[0] - checkpoint[module].shape[0])
padding_shape = list(model[module].shape)
padding_shape[0] = padding
device = checkpoint[module].device
checkpoint[module] = torch.cat([checkpoint[module], torch.zeros(padding_shape, device=device)], 0)
logger.warning('Size mismatch for module %s fixed by zero padding', module)
modules = []
for module in modules:
if ((module in model) and (module not in checkpoint)):
padding_shape = model[module].shape
checkpoint[module] = torch.Tensor(padding_shape).cuda()
logger.warning('Size mismatch for module %s', module)
elif ((module in model) and (checkpoint[module].shape != model[module].shape)):
padding_shape = model[module].shape
checkpoint[module] = torch.Tensor(padding_shape).cuda()
logger.warning('Size mismatch for module %s', module)
def _resume_checkpoint(self, resume_path):
'Resume from saved checkpoints.'
resume_path = str(resume_path)
logger.info('Loading checkpoint from: %s ...', resume_path)
checkpoint = torch.load(resume_path, map_location=self.device)
self.loaded_epoch = checkpoint['epoch']
self.epoch = checkpoint['epoch']
self.start_epoch = (checkpoint['epoch'] + 1)
self.n_samples = checkpoint['n_samples']
self.n_steps = checkpoint['n_steps']
exp_dir_src = os.path.dirname(resume_path)
restart = (exp_dir_src == str(self.exp_dir))
if (checkpoint['config']['arch'] != self.config['arch']):
msg = 'Warning: Architecture configuration given in config file isdifferent from that of checkpoint. This may yield an exception while state_dict is being loaded.'
logger.warning(msg)
logger.warning('Created model conf: %s', self.config['arch'])
logger.warning('Loaded model conf: %s', checkpoint['config']['arch'])
self.match_checkpoint_to_model(checkpoint['state_dict'], self.model.state_dict())
self.model.load_state_dict(checkpoint['state_dict'], strict=restart)
if restart:
optim_args = checkpoint['config']['optimizer']
if (optim_args['type'] != self.config['optimizer']['type']):
msg = 'Warning: Optimizer type given in config file differs from that of checkpoint. Optimizer parameters not being resumed.'
logger.warning(msg)
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler_args = checkpoint['config']['lr_scheduler']
if (lr_scheduler_args['type'] != self.config['lr_scheduler']['type']):
msg = 'Warning: Lr_scheduler type given in config file differs from that of checkpoint. Lr_scheduler parameters not being resumed.'
logger.warning(msg)
else:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.mnt_best = checkpoint['monitor_best']
else:
self.loaded_epoch = 0
self.epoch = 0
self.start_epoch = 0
self.n_samples = 0
self.n_steps = 0
with open(self.info_checkpoint_path, 'a') as f:
f.write(f"This experiment is based on the checkpoint {resume_path}loaded at epoch {checkpoint['epoch']}")
logger.info('Ckpt loaded at epoch %s.', str(checkpoint['epoch']))
|
class ActivityNet(BaseDataset):
'ActivityNet captions dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['val1']):
train_list_path = 'train_list.txt'
test_list_path = 'val_1_list.txt'
test_list_path = os.path.join(self.data_dir, test_list_path)
with open(test_list_path) as f:
test_vid_list = f.readlines()
nb_test_samples = len(test_vid_list)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
cross_vid_list = train_vid_list
cross_vid_list = [x.strip() for x in cross_vid_list]
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[nb_test_samples:]
if (split_name in ['trn']):
self.vid_list = self.vid_list[:nb_test_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[:nb_test_samples]
elif (split_name == 'test'):
self.vid_list = test_vid_list
self.vid_list = [x.strip() for x in self.vid_list]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'ActivityNet_{cut_name}_{split_name}'
|
class ExpertDataLoader():
'Data loading of a dataset.'
def __init__(self, mix, num_workers, batch_size, raw_input_dims, until_epoch=float('inf'), pin_memory=False, n_pairs=1, training=False, tokenizer=None, loaded_data=None, cross_seed=0):
self.batch_size = batch_size
self.until_epoch = until_epoch
self.n_pairs = n_pairs
dataset = MixDataset(mix=mix, raw_input_dims=raw_input_dims, training=training, tokenizer=tokenizer, n_pairs=n_pairs, loaded_data=loaded_data, cross_seed=cross_seed)
loader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=dataset.collate_data, drop_last=training, shuffle=training, pin_memory=pin_memory)
self.dataloaders = {'loader': loader, 'dataset': dataset}
logger.debug('Loading data with %d workers', num_workers)
def __getitem__(self, key):
return self.dataloaders[key]
|
class DiDeMo(BaseDataset):
'DiDeMo dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised DiDeMo split: {split_name}')
list_path = os.path.join(self.root_feat, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
if (cut_name in ['c']):
self.vid_list = self.vid_list[:840]
else:
self.vid_list = self.vid_list[:1065]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'DiDeMo_{cut_name}_{split_name}'
self.expert_timings = {}
|
class HowTo100M(BaseDataset):
'HowTo100M dataset.'
def configure_train_test_splits(self, cut_name, split_name):
self.restrict_test_captions = None
list_path = None
if (cut_name in ['full']):
if (split_name in ['train']):
list_path = 'train_list_full.txt'
elif (split_name in ['trn']):
list_path = 'trn_list_full.txt'
elif (split_name in ['val', 'valong', 'val3-30']):
list_path = 'val_list_full.txt'
elif (split_name in ['test', 'testlong', 'test3-30']):
list_path = 'test_list_full.txt'
else:
msg = 'unrecognised HowTo100M cut: {}'
raise ValueError(msg.format(cut_name))
list_path = os.path.join(self.root_feat, list_path)
print('loading training/val splits....')
tic = time.time()
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
print('done in {:.3f}s'.format((time.time() - tic)))
self.split_name = split_name
self.dataset_name = f'HowTo100M_{cut_name}_{split_name}'
|
class LSMDC(BaseDataset):
'LSMDC dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
train_list_path = 'LSMDC16_annos_training.csv'
test_list_path = 'LSMDC16_challenge_1000_publictect.csv'
test_list_path = os.path.join(self.data_dir, test_list_path)
df = pd.read_csv(test_list_path, delimiter='\t', header=None)
test_vid_list = list(df[0])
nb_test_samples = len(test_vid_list)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = os.path.join(self.data_dir, train_list_path)
df = pd.read_csv(train_list_path, delimiter='\t', header=None)
train_vid_list = list(df[0])
cross_vid_list = train_vid_list
cross_vid_list = [x.strip() for x in cross_vid_list]
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[nb_test_samples:]
if (split_name in ['trn']):
self.vid_list = self.vid_list[:nb_test_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[:nb_test_samples]
elif (split_name == 'test'):
self.vid_list = test_vid_list
self.vid_list = [x.strip() for x in self.vid_list]
movies = ['0024_THE_LORD_OF_THE_RINGS_THE_FELLOWSHIP_OF_THE_RING_00.31.10.217-00.31.10.706', '1014_2012_00.01.21.399-00.01.23.997', '1014_2012_00.27.58.174-00.27.59.021', '1018_Body_Of_Lies_00.42.15.677-00.42.18.534', '1037_The_Curious_Case_Of_Benjamin_Button_02.25.14.743-02.25.17.312']
for movie in movies:
if (movie in self.vid_list):
self.vid_list.remove(movie)
self.split_name = split_name
self.dataset_name = f'LSMDC_{cut_name}_{split_name}'
|
class MixDataset(Dataset):
'Dataset composed of a mix of different datasets.'
@abc.abstractmethod
def configure_train_test_splits(self, split_name):
'Partition the datset into train/val/test splits.'
raise NotImplementedError
@abc.abstractmethod
def sanity_checks(self):
'Run sanity checks on loaded data.'
raise NotImplementedError
@abc.abstractmethod
def load_features(self):
'Load features from disk.'
raise NotImplementedError
def __init__(self, mix, raw_input_dims, training=False, tokenizer=None, n_pairs=1, loaded_data=None, cross_seed=0):
self.sanity_checks = False
self.mix = mix
self.experts = set(raw_input_dims.keys())
self.train = training
self.tokenizer = tokenizer
self.n_pairs = n_pairs
if (len(mix) == 1):
self.dataset_name = '_'.join([mix[0]['dataset_name'], mix[0]['cut_name'], mix[0]['split_name']])
self.split_name = mix[0]['split_name']
else:
self.dataset_name = 'Mix'
self.split_name = 'mic'
dataset_classes = {'MSVD': MSVD, 'LSMDC': LSMDC, 'MSRVTT': MSRVTT, 'DiDeMo': DiDeMo, 'ActivityNet': ActivityNet, 'YouCook2': YouCook2, 'HowTo100M': HowTo100M}
self.datasets = []
self.mix_weights = []
self.dataset_names = []
for config in mix:
dataset_config = config.copy()
if ('mix_weight' in dataset_config.keys()):
self.mix_weights.append(dataset_config['mix_weight'])
dataset_config.pop('mix_weight')
else:
self.mix_weights.append(1)
dataset_name = dataset_config['dataset_name']
self.dataset_names.append(dataset_name)
dataset_config.pop('dataset_name')
dataset = dataset_classes[dataset_name](**dataset_config, raw_input_dims=raw_input_dims, training=training, tokenizer=tokenizer, n_pairs=n_pairs, loaded_data=loaded_data, cross_seed=cross_seed)
self.datasets.append(dataset)
self.mix_weights = [(float(i) / sum(self.mix_weights)) for i in self.mix_weights]
logger.debug('Datasets: %s', self.dataset_names)
logger.debug('mix_weights: %s', self.mix_weights)
def collate_data(self, data):
text_keys = data[0]['text_tensors'].keys()
text_tensors = {key: [] for key in text_keys}
vid_keys = data[0]['vid_tensors'].keys()
vid_tensors = {key: {expert: [] for expert in self.experts} for key in vid_keys}
l_keys = data[0]['lists'].keys()
lists = {key: [] for key in l_keys}
for (_, vid) in enumerate(data):
for key in text_keys:
text_tensors[key].append(vid['text_tensors'][key])
for key in vid_keys:
for expert in self.experts:
vid_tensors[key][expert].append(vid['vid_tensors'][key][expert])
for key in l_keys:
lists[key].extend(vid['lists'][key])
for key in text_keys:
text_tensors[key] = np.concatenate(text_tensors[key], axis=0).astype(np.int32)
for key in vid_keys:
for expert in self.experts:
vid_tensors[key][expert] = np.concatenate(vid_tensors[key][expert], axis=0).astype(np.float32)
minibatch = {**text_tensors, **vid_tensors, **lists}
return minibatch
def __len__(self):
if (len(self.mix) == 1):
if self.train:
return int(10000000.0)
else:
return len(self.datasets[0])
elif self.train:
return int(10000000.0)
else:
return 1000
def __getitem__(self, idx):
if self.train:
rng = np.random
else:
rng = np.random.RandomState(idx)
dataset_nb = rng.choice(len(self.mix), p=self.mix_weights)
dataset = self.datasets[dataset_nb]
return dataset[idx]
|
class MSRVTT(BaseDataset):
'MSR-VTT dataset.'
def configure_train_test_splits(self, cut_name, split_name):
self.restrict_test_captions = None
if (cut_name in ['miech', 'jsfusion']):
if (cut_name in ['miech']):
train_list_path = 'train_list_miech.txt'
test_list_path = 'test_list_miech.txt'
elif (cut_name in ['jsfusion']):
train_list_path = 'train_list_jsfusion.txt'
test_list_path = 'val_list_jsfusion.txt'
test_cap_idx_path = os.path.join(self.data_dir, 'jsfusion_val_caption_idx.pkl')
self.restrict_test_captions = memcache(test_cap_idx_path)
test_list_path = os.path.join(self.data_dir, test_list_path)
with open(test_list_path) as f:
test_vid_list = f.readlines()
nb_test_samples = len(test_vid_list)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
cross_vid_list = train_vid_list
cross_vid_list = [x.strip() for x in cross_vid_list]
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[nb_test_samples:]
if (split_name in ['trn']):
self.vid_list = self.vid_list[:nb_test_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[:nb_test_samples]
elif (split_name == 'test'):
self.vid_list = test_vid_list
self.vid_list = [x.strip() for x in self.vid_list]
elif (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised split: {split_name}')
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:497]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'MSRVTT_{cut_name}_{split_name}'
|
class MSVD(BaseDataset):
'MSVD dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised MSVD split: {split_name}')
list_path = os.path.join(self.root_feat, list_path)
print('loading split ...')
tic = time.time()
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
print('done in {:.3f}s'.format((time.time() - tic)))
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
if (cut_name in ['c']):
self.vid_list = self.vid_list[:120]
else:
self.vid_list = self.vid_list[:670]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'MSVD_{cut_name}_{split_name}'
|
class YouCook2(BaseDataset):
'YouCook2 dataset.'
def configure_train_test_splits(self, cut_name, split_name):
if (cut_name in ['full']):
if (split_name in ['train', 'trn']):
list_path = 'train_list.txt'
elif (split_name in ['val']):
list_path = 'val_list.txt'
elif (split_name in ['test']):
list_path = 'test_list.txt'
else:
raise ValueError(f'unrecognised split: {split_name}')
list_path = os.path.join(self.root_feat, list_path)
print('loading split ...')
tic = time.time()
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
print('done in {:.3f}s'.format((time.time() - tic)))
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:3310]
elif (cut_name in ['c']):
self.expert_paths = get_expert_paths(self.data_dir)
if (split_name in ['train', 'trn', 'val', 'trainval']):
train_list_path = 'train_list.txt'
train_list_path = os.path.join(self.data_dir, train_list_path)
with open(train_list_path) as f:
train_vid_list = f.readlines()
nb_train_samples = len(train_vid_list)
val_list_path = 'val_list.txt'
val_list_path = os.path.join(self.data_dir, val_list_path)
with open(val_list_path) as f:
val_vid_list = f.readlines()
nb_val_samples = len(val_vid_list)
cross_vid_list = (train_vid_list + val_vid_list)
cross_vid_list = [x.strip() for x in cross_vid_list]
if (self.cross_seed != 0):
rng = np.random.RandomState(self.cross_seed)
rng.shuffle(cross_vid_list)
if (split_name in ['train', 'trn', 'trainval']):
if (split_name in ['trainval']):
self.vid_list = cross_vid_list
elif (split_name in ['train', 'trn']):
self.vid_list = cross_vid_list[:nb_train_samples]
if (split_name in ['trn']):
rng = np.random.RandomState(0)
rng.shuffle(self.vid_list)
self.vid_list = self.vid_list[:nb_val_samples]
elif (split_name in ['val']):
self.vid_list = cross_vid_list[nb_train_samples:]
else:
if (split_name == 'test1'):
list_path = 'public_server_val.txt'
elif (split_name == 'test2'):
list_path = 'public_server_test.txt'
list_path = os.path.join(self.data_dir, list_path)
with open(list_path) as f:
self.vid_list = f.readlines()
self.vid_list = [x.strip() for x in self.vid_list]
else:
msg = 'unrecognised cut: {}'
raise ValueError(msg.format(cut_name))
self.split_name = split_name
self.dataset_name = f'YouCook2_{cut_name}_{split_name}'
|
class MaxMarginRankingLoss(nn.Module):
'Implementation of the Max-margin ranking loss.'
def __init__(self, margin=1, fix_norm=True):
super().__init__()
self.fix_norm = fix_norm
self.loss = th.nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, x):
n = x.size()[0]
x1 = th.diag(x)
x1 = x1.unsqueeze(1)
x1 = x1.expand(n, n)
x1 = x1.contiguous().view((- 1), 1)
x1 = th.cat((x1, x1), 0)
x2 = x.view((- 1), 1)
x3 = x.transpose(0, 1).contiguous().view((- 1), 1)
x2 = th.cat((x2, x3), 0)
max_margin = F.relu((self.margin - (x1 - x2)))
if self.fix_norm:
keep = (th.ones(x.shape) - th.eye(x.shape[0]))
keep1 = keep.view((- 1), 1)
keep2 = keep.transpose(0, 1).contiguous().view((- 1), 1)
keep_idx = th.nonzero(th.cat((keep1, keep2), 0).flatten()).flatten()
if x1.is_cuda:
keep_idx = keep_idx.cuda()
x1_ = th.index_select(x1, dim=0, index=keep_idx)
x2_ = th.index_select(x2, dim=0, index=keep_idx)
max_margin = F.relu((self.margin - (x1_ - x2_)))
return max_margin.mean()
|
class TripletLoss(object):
def __init__(self, margin=None, mining_type='hard', topk=1):
self.margin = margin
if ((self.margin is not None) and (self.margin > 0)):
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
self.mining_type = mining_type
self.type = type
self.topk = topk
def __call__(self, mat_dist):
if (self.mining_type == 'hard'):
(dist_ap, dist_an) = hard_example_mining(mat_dist)
elif (self.mining_type == 'topk'):
(dist_ap, dist_an) = topk_example_mining(mat_dist, self.topk)
elif (self.mining_type == 'weighted'):
(dist_ap, dist_an) = batch_weight(mat_dist)
elif (self.mining_type == 'topk2'):
(dist_ap, dist_an) = topk_example_mining2(mat_dist, self.topk)
elif (self.mining_type == 'topk3'):
(_dist_ap, _dist_an) = topk_example_mining(mat_dist, self.topk)
dist_ap = F.softmax(_dist_ap, dim=1)
dist_an = F.softmax(_dist_an, dim=1)
y = dist_ap.new().resize_as_(dist_an).fill_(1)
if ((self.margin is not None) and (self.margin > 0)):
loss = self.ranking_loss(dist_ap, dist_an, y)
else:
loss = self.ranking_loss((dist_ap - dist_an), y)
return loss
|
def hard_example_mining(dist_mat):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_ap[(dist_ap == 0.0)] = 100000000.0
(dist_ap, relative_p_inds) = th.min(dist_ap, dim=1, keepdim=True)
dist_ap2 = th.mul(dist_mat.t(), is_pos)
dist_ap2[(dist_ap2 == 0.0)] = 100000000.0
(dist_ap2, relative_p_inds) = th.min(dist_ap2, dim=1, keepdim=True)
dist_ap = th.cat((dist_ap, dist_ap2), dim=0)
dist_an = th.mul(dist_mat, is_neg)
dist_an[(dist_an == 0.0)] = (- 100000000.0)
(dist_an, relative_n_inds) = th.max(dist_an, dim=1, keepdim=True)
dist_an2 = th.mul(dist_mat.t(), is_neg)
dist_an2[(dist_an2 == 0.0)] = (- 100000000.0)
(dist_an2, relative_n_inds) = th.max(dist_an2, dim=1, keepdim=True)
dist_an = th.cat((dist_an, dist_an2), dim=0)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return (dist_ap, dist_an)
|
def topk_example_mining(dist_mat, topk):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_ap[(dist_ap == 0.0)] = 100000000.0
(dist_ap, relative_p_inds) = th.topk(dist_ap, k=1, dim=1, largest=False)
dist_ap2 = th.mul(dist_mat.t(), is_pos)
dist_ap2[(dist_ap2 == 0.0)] = 100000000.0
(dist_ap2, relative_p_inds) = th.topk(dist_ap2, k=1, dim=1, largest=False)
dist_ap = th.cat((dist_ap, dist_ap2), dim=0)
temp = dist_ap
for i in range((topk - 1)):
dist_ap = th.cat((dist_ap, temp), dim=1)
dist_an = th.mul(dist_mat, is_neg)
dist_an[(dist_an == 0.0)] = (- 100000000.0)
(dist_an, relative_n_inds) = th.topk(dist_an, k=topk, dim=1, largest=True)
dist_an2 = th.mul(dist_mat.t(), is_neg)
dist_an2[(dist_an2 == 0.0)] = (- 100000000.0)
(dist_an2, relative_n_inds) = th.topk(dist_an2, k=topk, dim=1, largest=True)
dist_an = th.cat((dist_an, dist_an2), dim=0)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return (dist_ap, dist_an)
|
def topk_example_mining2(dist_mat, topk):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
_dist_mat = (F.softmax(dist_mat, dim=1) * dist_mat)
_dist_mat_t = (F.softmax(dist_mat, dim=0) * dist_mat)
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(_dist_mat, is_pos)
dist_ap[(dist_ap == 0.0)] = 100000000.0
(dist_ap, relative_p_inds) = th.topk(dist_ap, k=1, dim=1, largest=False)
dist_ap2 = th.mul(_dist_mat_t.t(), is_pos)
dist_ap2[(dist_ap2 == 0.0)] = 100000000.0
(dist_ap2, relative_p_inds) = th.topk(dist_ap2, k=1, dim=1, largest=False)
dist_ap = th.cat((dist_ap, dist_ap2), dim=0)
temp = dist_ap
for i in range((topk - 1)):
dist_ap = th.cat((dist_ap, temp), dim=1)
dist_an = th.mul(_dist_mat, is_neg)
dist_an[(dist_an == 0.0)] = (- 100000000.0)
(dist_an, relative_n_inds) = th.topk(dist_an, k=topk, dim=1, largest=True)
dist_an2 = th.mul(_dist_mat_t.t(), is_neg)
dist_an2[(dist_an2 == 0.0)] = (- 100000000.0)
(dist_an2, relative_n_inds) = th.topk(dist_an2, k=topk, dim=1, largest=True)
dist_an = th.cat((dist_an, dist_an2), dim=0)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return (dist_ap, dist_an)
|
def batch_all(dist_mat):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_an = th.mul(dist_mat, is_neg)
dist_ap_pos = dist_ap[(dist_ap > 0)]
dist_an_pos = dist_an[(dist_an > 0)]
ap_num = int((dist_ap_pos.size()[0] / N))
an_num = int((dist_an_pos.size()[0] / N))
all_num = ((N * ap_num) * an_num)
dist_ap_re = dist_ap_pos.reshape(N, ap_num, 1)
dist_ap_re = dist_ap_re.expand(N, ap_num, an_num)
dist_ap_re = dist_ap_re.reshape(all_num)
dist_an_re = dist_an_pos.reshape(N, an_num, 1)
dist_an_re = dist_an_re.expand(N, an_num, ap_num)
dist_an_re = th.transpose(dist_an_re, 1, 2)
dist_an_re = dist_an_re.reshape(all_num)
return (dist_ap_re, dist_an_re)
|
def batch_weight(dist_mat):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = th.eye(N)
is_neg = (th.ones(dist_mat.shape) - th.eye(N))
is_pos = is_pos.cuda()
is_neg = is_neg.cuda()
dist_ap = th.mul(dist_mat, is_pos)
dist_an = th.mul(dist_mat, is_neg)
dist_ap_weighted = F.softmax(dist_ap, dim=1)
dist_an_weighted = F.softmax((- dist_an), dim=1)
dist_ap_w = (dist_ap * dist_ap_weighted)
dist_an_w = (dist_an * dist_an_weighted)
dist_ap_pos = dist_ap_w[(dist_ap_w > 0)]
dist_an_pos = dist_an_w[(dist_an_w > 0)]
ap_num = int((dist_ap_pos.size()[0] / N))
an_num = int((dist_an_pos.size()[0] / N))
all_num = ((N * ap_num) * an_num)
dist_ap_re = dist_ap_pos.reshape(N, ap_num, 1)
dist_ap_re = dist_ap_re.expand(N, ap_num, an_num)
dist_ap_re = dist_ap_re.reshape(all_num)
dist_an_re = dist_an_pos.reshape(N, an_num, 1)
dist_an_re = dist_an_re.expand(N, an_num, ap_num)
dist_an_re = th.transpose(dist_an_re, 1, 2)
dist_an_re = dist_an_re.reshape(all_num)
return (dist_ap_re, dist_an_re)
|
class LSTMModel(nn.Module):
'Long Short-Term memory network.'
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(LSTMModel, self).__init__()
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x, x_lengths):
device = x.device
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
h0 = h0.to(device)
c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
c0 = c0.to(device)
x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, enforce_sorted=False, batch_first=True)
(out, (hn, _)) = self.lstm(x, (h0.detach(), c0.detach()))
(out, _) = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
res = self.fc(hn[(- 1)])
return res
|
class NetVLAD(nn.Module):
'Net Vlad module.'
def __init__(self, cluster_size, feature_size, add_batch_norm=True):
super().__init__()
self.feature_size = feature_size
self.cluster_size = cluster_size
init_sc = (1 / math.sqrt(feature_size))
self.clusters = nn.Parameter((init_sc * th.randn(feature_size, cluster_size)))
self.clusters2 = nn.Parameter((init_sc * th.randn(1, feature_size, cluster_size)))
self.add_batch_norm = add_batch_norm
self.batch_norm = nn.BatchNorm1d(cluster_size)
self.out_dim = (cluster_size * feature_size)
def forward(self, x):
self.sanity_checks(x)
max_sample = x.size()[1]
x = x.view((- 1), self.feature_size)
if (x.device != self.clusters.device):
ipdb.set_trace()
assignment = th.matmul(x, self.clusters)
if self.add_batch_norm:
assignment = self.batch_norm(assignment)
assignment = F.softmax(assignment, dim=1)
assignment = assignment.view((- 1), max_sample, self.cluster_size)
a_sum = th.sum(assignment, dim=1, keepdim=True)
a = (a_sum * self.clusters2)
assignment = assignment.transpose(1, 2)
x = x.view((- 1), max_sample, self.feature_size)
vlad = th.matmul(assignment, x)
vlad = vlad.transpose(1, 2)
vlad = (vlad - a)
vlad = F.normalize(vlad)
vlad = vlad.reshape((- 1), (self.cluster_size * self.feature_size))
vlad = F.normalize(vlad)
return vlad
def sanity_checks(self, x):
'Catch any nans in the inputs/clusters.'
if th.isnan(th.sum(x)):
print('nan inputs')
ipdb.set_trace()
if th.isnan(self.clusters[0][0]):
print('nan clusters')
ipdb.set_trace()
|
class TxtEmbeddings(nn.Module):
'Construct the embeddings from word, position and token_type embeddings.'
def __init__(self, vocab_size=None, emb_dim=None, ckpt=None, freeze=False):
super(TxtEmbeddings, self).__init__()
if (ckpt is not None):
if isinstance(ckpt, str):
logger.debug('Loading the pretrained word embeddings from %s ...', ckpt)
pretrained_dict = torch.load(ckpt)
weight = pretrained_dict['bert.embeddings.word_embeddings.weight']
elif isinstance(ckpt, torch.FloatTensor):
weight = ckpt
self.nb_words = weight.size()[0]
logger.debug('Nb of words in the embedding table: %d', self.nb_words)
self.text_dim = weight.size()[1]
self.word_embeddings = nn.Embedding.from_pretrained(weight, freeze=freeze, padding_idx=0)
else:
self.word_embeddings = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
self.text_dim = emb_dim
if freeze:
model = self.word_embeddings
for param in model.parameters():
param.requires_grad = False
def forward(self, input_ids=None):
inputs_embeds = self.word_embeddings(input_ids)
return inputs_embeds
|
class WeTokenizer():
'Word embeddings tokenizer.'
def __init__(self, we_filepath, freeze=False):
if we_filepath.endswith('.bin'):
binary = True
self.we = KeyedVectors.load_word2vec_format(we_filepath, binary=binary)
elif we_filepath.endswith('.txt'):
w2v_format_path = we_filepath.replace('.txt', '.w2v')
if (not os.path.exists(w2v_format_path)):
glove2word2vec(we_filepath, w2v_format_path)
self.we = KeyedVectors.load_word2vec_format(w2v_format_path, binary=False)
self.text_dim = self.we.vectors.shape[1]
pad_vec = torch.zeros((2, self.text_dim))
raw_table = torch.FloatTensor(self.we.vectors)
self.weights = torch.cat((pad_vec, raw_table))
self.words = (['[PAD]', '[UNK]'] + list(self.we.vocab.keys()))
self.we_model = TxtEmbeddings(ckpt=self.weights, freeze=freeze)
def tokenize(self, text):
'Convert a text into tokens.'
text = text.lower()
words = text.split(' ')
words = [''.join((e for e in word if e.isalnum())) for word in words]
words = [word for word in words if (word in self.words)]
if (not words):
words = ['[UNK]']
return words
def convert_tokens_to_ids(self, tokens):
return [self.words.index(token) for token in tokens]
def convert_ids_to_tokens(self, ids):
return [self.words[idx] for idx in ids]
|
class ConfigParser():
'Config parser.'
def __init__(self, args, options=''):
if args.resume:
msg_cfg = 'If resuming experiment then no config should be provided'
assert (args.config is None), msg_cfg
msg_cfg = 'If resuming experiment then no checkpoint should be provided'
assert (args.load_checkpoint is None), msg_cfg
exp_dir = pathlib.Path(args.resume)
checkpoint_path = get_last_checkpoint_path(exp_dir)
self.resume = checkpoint_path
self.cfg_fname = (exp_dir / 'config.json')
else:
msg_no_cfg = 'Config file must be specified'
assert (args.config is not None), msg_no_cfg
self.resume = None
self.cfg_fname = pathlib.Path(args.config)
if args.load_checkpoint:
checkpoint_path = args.load_checkpoint
self.resume = checkpoint_path
if args.only_eval:
self.only_eval = True
else:
self.only_eval = False
config = read_json(self.cfg_fname)
self._config = _update_config(config, options, args)
if ('exp_name' in self.config.keys()):
exper_name = self.config['exp_name']
else:
exper_name = pathlib.Path(args.config).stem
self._config['exp_name'] = exper_name
if ('save_dir' in self.config['trainer'].keys()):
save_dir = pathlib.Path(self.config['trainer']['save_dir'])
else:
save_dir = ((pathlib.Path.cwd() / 'exps') / exper_name)
self._config['trainer']['save_dir'] = str(save_dir)
self._save_dir = save_dir
self._log_dir = save_dir
self._web_dirs = [(save_dir / 'visualisations')]
self._exper_name = exper_name
self._args = args
if ('external_save_dir' in self.config['trainer'].keys()):
external_save_dir = pathlib.Path(self.config['trainer']['external_save_dir'])
self._web_dirs.append((external_save_dir / 'visualisations'))
else:
external_save_root = (pathlib.Path.cwd() / 'external_save_dir')
if external_save_root.exists():
external_save_dir = ((external_save_root / 'exps') / exper_name)
self._config['trainer']['external_save_dir'] = str(save_dir)
self._web_dirs.append((external_save_dir / 'visualisations'))
self.save_dir.mkdir(parents=True, exist_ok=True)
self.log_dir.mkdir(parents=True, exist_ok=True)
logpath = (save_dir / 'log.txt')
if args.verbose:
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'DEBUG'), format='%(message)s')
else:
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'), handlers=[logging.FileHandler(logpath), logging.StreamHandler()], format='%(message)s')
logger.info('Experiment directory: %s', save_dir)
if (args.device == 'cpu'):
os.environ['CUDA_VISIBLE_DEVICES'] = ''
elif args.device:
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
logger.debug('CUDA_VISIBLE_DEVICES: %s', os.environ['CUDA_VISIBLE_DEVICES'])
n_gpu = torch.cuda.device_count()
logger.debug('n_gpu = torch.cuda.device_count(): %d (nb of gpus available)', n_gpu)
write_json(self.config, (self.save_dir / 'config.json'))
logging.debug(pprint.pformat(self.config))
def init(self, name, module, *args, **kwargs):
"Finds a function handle with the name given as 'type' in config."
module_name = self[name]['type']
module_args = dict(self[name]['args'])
msg = 'Overwriting kwargs given in config file is not allowed'
assert all([(k not in module_args) for k in kwargs]), msg
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def __getitem__(self, name):
return self.config[name]
def get(self, name, default):
return self.config.get(name, default)
@property
def config(self):
return self._config
@property
def save_dir(self):
return self._save_dir
@property
def log_dir(self):
return self._log_dir
@property
def exper_name(self):
return self._exper_name
@property
def web_dirs(self):
return self._web_dirs
def __repr__(self):
return pprint.PrettyPrinter().pprint.pformat(self.__dict__)
|
def _update_config(config, options, args):
for opt in options:
value = getattr(args, _get_opt_name(opt.flags))
if (value is not None):
_set_by_path(config, opt.target, value)
return config
|
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
|
def _set_by_path(tree, keys, value):
'Set a value in a nested object in tree by sequence of keys.'
_get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
|
def _get_by_path(tree, keys):
'Access a nested object in tree by sequence of keys.'
return functools.reduce(operator.getitem, keys, tree)
|
def train(config):
expert_dims = compute_dims(config)
raw_input_dims = {}
for (expert, expert_dic) in expert_dims.items():
raw_input_dims[expert] = expert_dic['dim']
tic = time.time()
seed = config['seed']
cross_seed = config.get('cross_seed', seed)
logger.debug('Setting experiment random seed to %d', seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
tokenizer = create_tokenizer(config['arch']['args']['txt_inp'])
logger.info('Preparing the dataloaders ...')
dataset_types = ['train_sets', 'continuous_eval_sets', 'final_eval_sets']
data_loaders = {}
loaded_data = {}
for dataset_type in dataset_types:
training = (dataset_type == 'train_sets')
if (not config.get(dataset_type, False)):
continue
data_loaders[dataset_type] = []
for (_, data_loader) in enumerate(config[dataset_type]):
data_loaders[dataset_type].append(getattr(module_data, data_loader['type'])(**data_loader['args'], raw_input_dims=raw_input_dims, training=training, tokenizer=tokenizer, loaded_data=loaded_data, cross_seed=cross_seed))
model = config.init(name='arch', module=module_arch, expert_dims=expert_dims, tokenizer=tokenizer)
loss = config.init(name='loss', module=module_loss)
metrics = [getattr(module_metric, met) for met in config['metrics']]
trainable_params = filter((lambda p: p.requires_grad), model.parameters())
txt_bert_params = []
params = []
for (name, param) in model.named_parameters():
if param.requires_grad:
if ('txt_bert' in name):
txt_bert_params.append(param)
else:
params.append(param)
if (config['optimizer']['type'] == 'Ranger'):
optimizer = config.init('optimizer', ranger, trainable_params)
elif (config['optimizer']['type'] == 'Adam_'):
optimizer = torch.optim.Adam([{'params': params, 'lr': config['optimizer']['vid']['lr']}, {'params': txt_bert_params, 'lr': config['optimizer']['txt']['lr']}], lr=config['optimizer']['args']['lr'], weight_decay=config['optimizer']['args']['weight_decay'])
else:
optimizer = config.init('optimizer', torch.optim, trainable_params)
lr_scheduler = config.init('lr_scheduler', torch.optim.lr_scheduler, optimizer)
if ('warmup_iterations' in config['optimizer']):
warmup_iterations = config['optimizer']['warmup_iterations']
else:
warmup_iterations = (- 1)
visualizer = config.init(name='visualizer', module=module_vis, exp_name=config.exper_name, web_dirs=config.web_dirs)
trainer = Trainer(model, loss, metrics, optimizer, config=config, data_loaders=data_loaders, lr_scheduler=lr_scheduler, visualizer=visualizer, skip_first_n_saves=config['trainer'].get('skip_first_n_saves', 0), include_optim_in_ckpts=config['trainer'].get('include_optim_in_ckpts', False), expert_dims=expert_dims, tokenizer=tokenizer, warmup_iterations=warmup_iterations)
if (not config.only_eval):
logger.info('Training ...')
trainer.train()
logger.info('Final evaluation ...')
trainer.evaluate()
duration = time.strftime('%Hh%Mm%Ss', time.gmtime((time.time() - tic)))
logger.info('Script took %s', duration)
best_ckpt_path = (config.save_dir / 'trained_model.pth')
if os.path.exists(best_ckpt_path):
logger.info('The best performing ckpt can be found at %s', str(best_ckpt_path))
|
def main_train(raw_args=None):
parser = argparse.ArgumentParser(description='PyTorch Template')
parser.add_argument('--config', default=None, type=str, help='config file path (default: None)')
parser.add_argument('--resume', default=None, type=str, help='path to the experiment dir to resume (default: None)')
parser.add_argument('--load_checkpoint', default=None, type=str, help='path to the checkpoint to load (default: None)')
parser.add_argument('--device', type=str, help='indices of GPUs to enable')
parser.add_argument('--only_eval', action='store_true')
parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')
args = parser.parse_args(raw_args)
args = ConfigParser(args)
msg = f"Expected the number of training epochs ({args['trainer']['epochs']})to exceed the save period ({args['trainer']['save_period']}), otherwise no checkpoints will be saved."
assert (args['trainer']['epochs'] >= args['trainer']['save_period']), msg
train(config=args)
|
class HTML():
def __init__(self, web_dir, title, refresh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (refresh > 0):
with self.doc.head:
meta(http_equiv='refresh', content=str(refresh))
def get_image_dir(self):
'Return the directory that stores images.'
return self.img_dir
def add_header(self, text):
with self.doc:
h3(text)
def add_videos(self, vids, txts, links, width=400, hidden_tag='hidden'):
self.t = table(border=1, style='table-layout: fixed;')
self.doc.add(self.t)
colors = ['red', 'blue', 'gold', 'salman']
with self.t:
with tr():
for (vid, txt, link) in zip(vids, txts, links):
td_style = 'word-wrap: break-word; width:{}px'.format(width)
with td(style=td_style, halign='center', valign='top'):
with p():
vid_path = str(vid)
if (vid_path == hidden_tag):
p_style = 'font-weight: bold; width:{}px;'
p_style = p_style.format((width * 3))
p('hidden video', style=p_style)
else:
with a(href=str(link)):
with video():
attr(controls='controls', width=width)
source(src=vid_path, type='video/mp4')
br()
rows = txt.split('<br>')
for (idx, row) in enumerate(rows):
color = colors[(idx % len(colors))]
bold_tag = '<b>'
if (not row.startswith(bold_tag)):
s_style = 'color:{};'.format(color)
else:
s_style = 'color:black; font-weight: bold;'
row = row[len(bold_tag):]
span(row, style=s_style)
br()
def add_images(self, ims, txts, links, width=400):
self.t = table(border=1, style='table-layout: fixed;')
self.doc.add(self.t)
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
td_style = 'word-wrap: break-word;'
with td(style=td_style, halign='center', valign='top'):
with p():
with a(href=os.path.join('images', link)):
img(style=('width:%dpx' % width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
'Save the current content to the HMTL file.'
html_file = ('%s/index.html' % self.web_dir)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.