| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
| from torch.utils.data import Dataset, DataLoader |
| import pandas as pd |
| from sklearn.preprocessing import StandardScaler |
| from tqdm import tqdm |
| import os |
| from configuration_LightGTS import LightGTSConfig |
| from modeling_LightGTS import LightGTSForFinetune |
| import torch |
| from transformers import AutoModelForCausalLM |
| from transformers import AutoModelForCausalLM, MODEL_MAPPING |
| from transformers import AutoConfig |
| os.environ['CUDA_VISIBLE_DEVICES']='6' |
| class TimeSeriesDataset(Dataset): |
| def __init__(self, df, target_col="HUFL", lookback=528, pred_len=192): |
| self.lookback = lookback |
| self.pred_len = pred_len |
| |
| |
| data = df[[target_col]].values |
| |
| |
| self.scaler = StandardScaler() |
| self.data = self.scaler.fit_transform(data) |
| |
| def __len__(self): |
| |
| return len(self.data) - self.lookback - self.pred_len + 1 |
| |
| def __getitem__(self, idx): |
| |
| x = self.data[idx : idx + self.lookback] |
| y = self.data[idx + self.lookback : idx + self.lookback + self.pred_len] |
| |
| return { |
| "input": torch.tensor(x, dtype=torch.float32), |
| "labels": torch.tensor(y, dtype=torch.float32) |
| } |
|
|
| if __name__=="__main__": |
| print("Loading data...") |
| df1 = pd.read_csv("/home/wlf/LightGTS/LightGTS/data/predict_datasets/ETTh1.csv") |
|
|
| lookback_length = 528 |
| target_length = 240 |
|
|
| train_dataset = TimeSeriesDataset(df1, target_col="HUFL", lookback=lookback_length, pred_len=target_length) |
| train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True, drop_last=True) |
|
|
|
|
| print("Initializing model...") |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| LightGTS_config = LightGTSConfig(context_points=528, c_in=1, target_dim=192, patch_len=48, stride=48) |
| LightGTS_config.save_pretrained("LightGTS-huggingface") |
|
|
| AutoConfig.register("LightGTS",LightGTSConfig) |
| AutoModelForCausalLM.register(LightGTSConfig, LightGTSForFinetune) |
|
|
| model = AutoModelForCausalLM.from_pretrained( |
| "./LightGTS-huggingface", |
| trust_remote_code=True |
| ) |
|
|
| model.loss_fn = nn.MSELoss() |
| model.to(device) |
| optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-2) |
|
|
| num_epochs = 1 |
| print(f"Starting training on {device} for {num_epochs} epochs...") |
|
|
| for epoch in range(num_epochs): |
| model.train() |
| total_loss = 0.0 |
| |
| progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}/{num_epochs}") |
| |
| for batch in progress_bar: |
| |
| inputs = batch["input"].to(device) |
| labels = batch["labels"].to(device) |
| |
| optimizer.zero_grad() |
| |
| outputs = model(input=inputs, labels=labels, target_dim = 240, patch_len = 24, stride = 24) |
| loss = outputs["loss"] |
| |
| loss.backward() |
| optimizer.step() |
| |
| total_loss += loss.item() |
| |
| progress_bar.set_postfix({"loss": f"{loss.item():.4f}"}) |
| |
| avg_loss = total_loss / len(train_dataloader) |
| print(f"Epoch [{epoch+1}/{num_epochs}] completed. Average Loss: {avg_loss:.4f}\n") |
|
|
| model.save_pretrained("./LightGTS-finetuned") |
| print("Training complete!") |