|
|
import torch
|
|
|
import torch.nn as nn
|
|
|
|
|
|
class ViolenceGRU(nn.Module):
|
|
|
def __init__(self):
|
|
|
super(ViolenceGRU, self).__init__()
|
|
|
|
|
|
|
|
|
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
|
|
|
self.bn1 = nn.BatchNorm2d(32)
|
|
|
self.pool1 = nn.MaxPool2d(2, 2)
|
|
|
|
|
|
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
|
|
|
self.bn2 = nn.BatchNorm2d(64)
|
|
|
self.pool2 = nn.MaxPool2d(2, 2)
|
|
|
|
|
|
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
|
|
|
self.bn3 = nn.BatchNorm2d(128)
|
|
|
self.pool3 = nn.MaxPool2d(2, 2)
|
|
|
|
|
|
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
|
|
|
self.bn4 = nn.BatchNorm2d(256)
|
|
|
self.pool4 = nn.MaxPool2d(2, 2)
|
|
|
|
|
|
self.relu = nn.ReLU()
|
|
|
self.dropout = nn.Dropout(0.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.feature_dim = 256 * 7 * 7
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.gru = nn.GRU(input_size=self.feature_dim, hidden_size=256, num_layers=2, batch_first=True, dropout=0.5)
|
|
|
|
|
|
self.fc = nn.Linear(256, 2)
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
|
b, c, s, h, w = x.size()
|
|
|
|
|
|
|
|
|
x = x.permute(0, 2, 1, 3, 4).contiguous()
|
|
|
x = x.view(b * s, c, h, w)
|
|
|
|
|
|
|
|
|
x = self.relu(self.bn1(self.conv1(x)))
|
|
|
x = self.pool1(x)
|
|
|
|
|
|
x = self.relu(self.bn2(self.conv2(x)))
|
|
|
x = self.pool2(x)
|
|
|
|
|
|
x = self.relu(self.bn3(self.conv3(x)))
|
|
|
x = self.pool3(x)
|
|
|
|
|
|
x = self.relu(self.bn4(self.conv4(x)))
|
|
|
x = self.pool4(x)
|
|
|
|
|
|
|
|
|
x = x.view(b * s, -1)
|
|
|
|
|
|
|
|
|
x = x.view(b, s, -1)
|
|
|
|
|
|
|
|
|
|
|
|
out, _ = self.gru(x)
|
|
|
|
|
|
|
|
|
out = out[:, -1, :]
|
|
|
|
|
|
out = self.dropout(out)
|
|
|
out = self.fc(out)
|
|
|
return out
|
|
|
|