Spaces:
Sleeping
Sleeping
| # core/model_runner.py | |
| import torch | |
| import logging | |
| from core.train_eval import train_and_evaluate | |
| from core.models import ( | |
| LSTMModel, | |
| GRUModel, | |
| CNNModel, | |
| MLPModel, | |
| HybridCNNGRUModel, | |
| TransformerModel, | |
| BiLSTMModel, | |
| ) | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| filename="/tmp/app_log.txt", | |
| filemode="a", | |
| format="%(asctime)s - %(levelname)s - %(message)s", | |
| ) | |
| def get_model( | |
| df, | |
| features, | |
| target, | |
| model_name="LSTM", | |
| horizon=1, | |
| # Hidden size aliases | |
| hidden=None, | |
| hidden_units=None, | |
| # Layers aliases | |
| layers=None, | |
| n_layers=None, | |
| # Learning rate aliases | |
| lr=None, | |
| learning_rate=None, | |
| # Betas for optimizer | |
| beta1=0.9, | |
| beta2=0.999, | |
| # Other hyperparams | |
| epochs=50, | |
| weight_decay=0.01, | |
| dropout=0.2, | |
| # Window aliases | |
| window=None, | |
| window_size=None, | |
| test_split=0.2, | |
| selector_method="RandomForest", | |
| importance_threshold=0.0, | |
| scheduler_type="None", | |
| device=None, | |
| verbose=True, | |
| ): | |
| """ | |
| Wrapper that accepts many common argument names used by the UI/calls, | |
| normalizes them, and calls train_and_evaluate(...) with the canonical names. | |
| """ | |
| try: | |
| # --- Normalize aliases & defaults --- | |
| # hidden size: prefer explicit hidden_units, then hidden, else default 64 | |
| if hidden_units is not None: | |
| hidden = hidden_units | |
| if hidden is None: | |
| hidden = 64 | |
| # layers: prefer explicit n_layers, then layers, else default 1 | |
| if n_layers is not None: | |
| layers = n_layers | |
| if layers is None: | |
| layers = 1 | |
| # learning rate: prefer learning_rate then lr, else default 0.001 | |
| if learning_rate is not None: | |
| lr = learning_rate | |
| if lr is None: | |
| lr = 0.001 | |
| # window size: prefer window_size then window, else default 30 | |
| if window_size is not None: | |
| window = window_size | |
| if window is None: | |
| window = 30 | |
| # device: caller may pass it; otherwise detect automatically | |
| if device is None: | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| logging.info( | |
| f"get_model called: model={model_name}, device={device}, hidden={hidden}, layers={layers}, lr={lr}, window={window}, epochs={epochs}" | |
| ) | |
| # --- Select model class mapping (keys as used in UI) --- | |
| model_classes = { | |
| "LSTM": LSTMModel, | |
| "GRU": GRUModel, | |
| "CNN": CNNModel, | |
| "MLP": MLPModel, | |
| "Hybrid": HybridCNNGRUModel, | |
| "HybridCNNGRU": HybridCNNGRUModel, | |
| "Transformer": TransformerModel, | |
| "BiLSTM": BiLSTMModel, | |
| } | |
| model_cls = model_classes.get(model_name, LSTMModel) | |
| # --- Call the core training function with canonical param names --- | |
| result = train_and_evaluate( | |
| df=df, | |
| features=features, | |
| target=target, | |
| model_cls=model_cls, | |
| horizon=horizon, | |
| hidden=hidden, | |
| layers=layers, | |
| epochs=epochs, | |
| lr=lr, | |
| beta1=beta1, | |
| beta2=beta2, | |
| weight_decay=weight_decay, | |
| dropout=dropout, | |
| window=window, | |
| test_split=test_split, | |
| selector_method=selector_method, | |
| importance_threshold=importance_threshold, | |
| scheduler_type=scheduler_type, | |
| device=device, | |
| verbose=verbose, | |
| ) | |
| # --- Normalize return --- | |
| if not result: | |
| logging.error(f"{model_name} returned empty result.") | |
| return {"error": "Empty result from training"} | |
| if isinstance(result, dict) and result.get("error"): | |
| logging.error(f"{model_name} training error: {result['error']}") | |
| return {"error": result["error"]} | |
| logging.info(f"{model_name} training completed successfully") | |
| return result | |
| except Exception as e: | |
| logging.error(f"Model runner error for {model_name}: {str(e)}", exc_info=True) | |
| return {"error": str(e)} | |