Spaces:
Sleeping
Sleeping
| import os, json, math, pickle | |
| from datetime import datetime, timedelta | |
| import numpy as np | |
| import yfinance as yf | |
| from sklearn.metrics import mean_absolute_error, mean_squared_error | |
| import torch | |
| from models import StockLSTM | |
| os.environ["CUDA_VISIBLE_DEVICES"] = "" | |
| ARTIFACTS_DIR = "artifacts" | |
| def evaluate(symbol: str): | |
| base = os.path.join(ARTIFACTS_DIR, symbol.upper()) | |
| model = StockLSTM(input_dim=1, hidden_dim=64, num_layers=2, dropout=0.2) | |
| model.load_state_dict(torch.load(os.path.join(base, "model.pt"), map_location="cpu")) | |
| model.eval() | |
| with open(os.path.join(base, "scaler.pkl"), "rb") as f: | |
| scaler = pickle.load(f) | |
| with open(os.path.join(base, "meta.json"), "r") as f: | |
| meta = json.load(f) | |
| seq_len = meta["seq_len"] | |
| end = datetime.utcnow().date() | |
| start = end - timedelta(days=5*365) | |
| df = yf.download(symbol, start=start.isoformat(), end=end.isoformat(), progress=False, auto_adjust=True) | |
| data = df[["Close"]].dropna().values | |
| scaled = scaler.transform(data) | |
| split_idx = int(len(scaled) * 0.8) | |
| test_scaled = scaled[split_idx - seq_len:] # include tail of train for continuity | |
| # build sequences | |
| X, y = [], [] | |
| for i in range(seq_len, len(test_scaled)): | |
| X.append(test_scaled[i-seq_len:i]) | |
| y.append(test_scaled[i]) | |
| X = np.array(X, dtype=np.float32) | |
| y = np.array(y, dtype=np.float32) | |
| X_t = torch.from_numpy(X) # [N, T, 1] | |
| pred_scaled = model(X_t).numpy() | |
| pred = scaler.inverse_transform(pred_scaled) | |
| y_true = scaler.inverse_transform(y) | |
| rmse = math.sqrt(mean_squared_error(y_true, pred)) | |
| mae = mean_absolute_error(y_true, pred) | |
| return {"symbol": symbol.upper(), "rmse": rmse, "mae": mae, "n": len(y_true)} | |