import torch import torch.nn as nn import numpy as np import gradio as gr from sklearn.preprocessing import MinMaxScaler # Sequence and window size seq = np.array([i for i in range(1, 51)]) # Sequence 1 to 50 window_size = 3 # Prepare data x, y = [], [] for i in range(len(seq) - window_size): x.append(seq[i:i + window_size]) y.append(seq[i + window_size]) x = np.array(x) y = np.array(y) # Normalize target scaler = MinMaxScaler() y = scaler.fit_transform(y.reshape(-1, 1)) # Convert to torch tensors x_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(-1) # (N, window, 1) y_tensor = torch.tensor(y, dtype=torch.float32) # Define LSTM model class LSTMModel(nn.Module): def __init__(self): super(LSTMModel, self).__init__() self.lstm = nn.LSTM(input_size=1, hidden_size=50, batch_first=True) self.fc = nn.Linear(50, 1) def forward(self, x): out, _ = self.lstm(x) out = out[:, -1, :] return self.fc(out) model = LSTMModel() criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.01) # Train for epoch in range(100): model.train() optimizer.zero_grad() outputs = model(x_tensor) loss = criterion(outputs, y_tensor) loss.backward() optimizer.step() # Prediction function def predict_next(n1, n2, n3): try: inp = torch.tensor([[n1, n2, n3]], dtype=torch.float32).unsqueeze(-1) model.eval() with torch.no_grad(): pred = model(inp) pred_val = scaler.inverse_transform(pred.numpy())[0][0] return f"🔮 Predicted next number: {pred_val:.2f}" except Exception as e: return f"❌ Error: {str(e)}" # Gradio UI gr.Interface( fn=predict_next, inputs=[ gr.Number(label="Number 1"), gr.Number(label="Number 2"), gr.Number(label="Number 3") ], outputs=gr.Text(label="Prediction"), title="LSTM Number Predicton", description="Predict the next number in a sequence using LSTM built" ).launch()