File size: 1,818 Bytes
b34a74f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# lab5_functions.py
"""
Минимальный LSTM example для ЛР №5 (PyTorch).
Если у вас нет torch — функции выбросят понятную ошибку.
"""
import numpy as np
import pandas as pd

try:
    import torch
    import torch.nn as nn
    TORCH_AVAILABLE = True
except Exception:
    TORCH_AVAILABLE = False

if TORCH_AVAILABLE:
    class SimpleLSTM(nn.Module):
        def __init__(self, input_size, hidden_size=64, num_layers=1, out_size=1):
            super().__init__()
            self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
            self.fc = nn.Linear(hidden_size, out_size)
        def forward(self, x):
            out, _ = self.lstm(x)
            return self.fc(out[:, -1, :])

    def create_sequences(series, lookback=30, horizon=1):
        X, y = [], []
        for i in range(len(series) - lookback - horizon + 1):
            X.append(series[i:i+lookback])
            y.append(series[i+lookback:i+lookback+horizon])
        return np.array(X), np.array(y)

    def train_lstm(series, lookback=30, epochs=20, lr=1e-3):
        X, y = create_sequences(series, lookback=lookback, horizon=1)
        X = torch.from_numpy(X).float().unsqueeze(-1)  # (N, L, 1)
        y = torch.from_numpy(y).float().squeeze(-1)
        model = SimpleLSTM(input_size=1)
        opt = torch.optim.Adam(model.parameters(), lr=lr)
        loss_fn = nn.MSELoss()
        model.train()
        for ep in range(epochs):
            pred = model(X)
            loss = loss_fn(pred.squeeze(), y)
            opt.zero_grad(); loss.backward(); opt.step()
        return model
else:
    def train_lstm(*args, **kwargs):
        raise ImportError("PyTorch не установлен. Установите torch, чтобы использовать LSTM.")