from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence import torch.nn as nn import torch class Vector2MIDI(nn.Module): def __init__(self, input_dim, hidden_dim, n_vocab, dropout=0.2): super().__init__() # 부모 클래스 생성자 호출 self.input_fc = nn.Linear(input_dim, hidden_dim) # 입력 차원에서 은닉 차원으로 변환 # 과적합 방지 드롭아웃 LSTM self.lstm = nn.LSTM(hidden_dim, hidden_dim, num_layers=2, batch_first=True, dropout=dropout) self.fc_mid = nn.Linear(hidden_dim, 256) self.fc_out = nn.Linear(256, n_vocab) def forward(self, x, lengths, total_length=None): print("input to forward:", x.shape) B, feat_dim = x.size() T = lengths.max() # [B, 1, feat_dim] → [B, T, feat_dim] x = x.unsqueeze(1).expand(B, T, feat_dim) x = self.input_fc(x) packed_x = nn.utils.rnn.pack_padded_sequence( x, lengths.cpu(), batch_first=True, enforce_sorted=False ) packed_out, _ = self.lstm(packed_x) out, _ = nn.utils.rnn.pad_packed_sequence( packed_out, batch_first=True, total_length=total_length ) out = self.fc_mid(out) out = self.fc_out(out) # [B, max_len, vocab_size] return out def generate(self, x, lengths, total_length=None): out = self.forward(x, lengths, total_length) preds = torch.argmax(out, dim=-1) # [B, T], 가장 큰 점수 클래스 선택 external = preds - 2 # 내부 표현 → 외부 표현 external[external == -2] = 0 # PAD 처리 return external