Diva / Models /Vector2MIDI.py
rrayy
Changes to be committed: 모델 구조 구상
5054620
raw
history blame
1.19 kB
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn as nn
import numpy as np
import torch
class Vector2MIDI(nn.Module):
def __init__(self, input_dim, hidden_dim, n_vocab, dropout=0.2):
super().__init__() # 부모 클래스 생성자 호출
self.input_fc = nn.Linear(input_dim, hidden_dim) # 입력 차원에서 은닉 차원으로 변환
self.lstm = nn.LSTM(hidden_dim, hidden_dim, num_layers=2, batch_first=True, dropout=dropout) # 과적합 방지 드롭아웃 LSTM
self.fc_mid = nn.Linear(hidden_dim, 256)
self.fc_out = nn.Linear(256, n_vocab)
def forward(self, x, lengths):
# x: [batch, seq_len, input_dim]
x = self.input_fc(x)
# Token 길이가 Midi마다 다르니까 PackedSequence 변환 후 LSTM 처리
packed = pack_padded_sequence(x, lengths.cpu(), batch_first=True, enforce_sorted=False) # 패딩 변환
LSTM_out, _ = self.lstm(packed) # LSTM 처리
padded, _ = pad_packed_sequence(LSTM_out, batch_first=True) # 패딩 복원
# 최종 출력
x = self.fc_mid(padded)
return self.fc_out(x) # [B, T, vocab_size]