Bhuvanesh24
Intial Commit
ab7bcc1
import torch.nn as nn
class LSTM(nn.Module):
def _init_(self, input_size, lstm_layer_sizes,linear_layer_size, output_size):
super(LSTM, self)._init_()
self.input_size = input_size
self.linear_layer_size = linear_layer_size
self.lstm_layer_1 = nn.LSTM(input_size, lstm_layer_sizes[0], batch_first=True)
self.lstm_layer_2 = nn.LSTM(lstm_layer_sizes[0], lstm_layer_sizes[1], batch_first=True)
self.lstm_layer_3 = nn.LSTM(lstm_layer_sizes[1], lstm_layer_sizes[2], batch_first=True)
self.fc = Linear(lstm_layer_sizes[2], self.linear_layer_size,output_size)
self.apply(self.initialize_weights)
def forward(self, x):
out, (hn_1, cn_1) = self.lstm_layer_1(x)
out, (hn_2, cn_2) = self.lstm_layer_2(out)
out, (hn_3, cn_3) = self.lstm_layer_3(out)
out = hn_3[-1]
out = self.fc(out)
return out
def initialize_weights(self, layer):
if isinstance(layer, nn.Linear):
nn.init.xavier_uniform_(layer.weight)
nn.init.zeros_(layer.bias)
elif isinstance(layer, nn.LSTM):
for name, param in layer.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(param.data)
elif 'bias' in name:
nn.init.zeros_(param.data)
class Linear(nn.Module):
def _init_(self,input_size,hidden_sizes,output_size):
super(Linear,self)._init_()
self.relu =nn.ReLU()
self.sigmoid =nn.Sigmoid()
self.tanh = nn.Tanh()
self.input = nn.Linear(input_size,hidden_sizes[0])
self.fc = nn.Linear(hidden_sizes[0],hidden_sizes[1])
self.output = nn.Linear(hidden_sizes[1],output_size)
self.apply(self.initialize_weights)
def forward(self,x):
out = self.relu(self.input(x))
out = self.relu(self.fc(out))
out = self.output(out)
return out
def initialize_weights(self, layer):
if isinstance(layer, nn.Linear):
nn.init.xavier_uniform_(layer.weight)
nn.init.zeros_(layer.bias)