AdriBat1
Implement and verify remote persistence workflow
979e977
#!/usr/bin/env python3
"""
Remote Neural Network Training (Persistent)
===========================================
Addestra la rete e salva il modello in una cartella persistente sul server,
invece di scaricarlo localmente.
"""
from antigravity_sdk import RemoteGPU
# Definition of the code to run REMOTELY
TRAINING_CODE = r'''
import os
import sys
print("🔧 Fixing dependencies...")
os.system(f"{sys.executable} -m pip uninstall -y transformers")
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
print("🚀 Starting Remote Training (Persistence Mode)...")
# 0. Setup Storage Directory
# Questa directory è fuori dalla temp dir di esecuzione, quindi non viene cancellata
# e NON viene scaricata automaticamente dal server.
STORAGE_DIR = "/home/user/app/storage"
os.makedirs(STORAGE_DIR, exist_ok=True)
print(f" 📂 Storage Directory: {STORAGE_DIR}")
# 1. Generate Synthetic Data
print(" Generating data...")
np.random.seed(42)
X = np.random.randn(1000, 2)
y = (X[:,0]**2 + X[:,1]**2 < 0.8).astype(int)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f" Using device: {device}")
X_tensor = torch.tensor(X, dtype=torch.float32).to(device)
y_tensor = torch.tensor(y, dtype=torch.float32).unsqueeze(1).to(device)
# 2. Define Model
class SimpleNN(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Linear(2, 16),
nn.ReLU(),
nn.Linear(16, 16),
nn.ReLU(),
nn.Linear(16, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.net(x)
model = SimpleNN().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
criterion = nn.BCELoss()
# 3. Training Loop
print(" Training model...")
losses = []
epochs = 500
for epoch in range(epochs):
optimizer.zero_grad()
outputs = model(X_tensor)
loss = criterion(outputs, y_tensor)
loss.backward()
optimizer.step()
losses.append(loss.item())
if epoch % 100 == 0:
print(f" Epoch {epoch}: Loss {loss.item():.4f}")
# 4. Save Model Persistently
# Salviamo .pth classico perché rimaniamo in ambiente PyTorch sul server
model_filename = os.path.join(STORAGE_DIR, "persistent_model.pth")
torch.save(model.state_dict(), model_filename)
print(f" 💾 Model saved to SERVER: {model_filename}")
print(" (Il file NON verrà scaricato, rimarrà sul server per inferenza futura)")
# 5. Plot (Questo lo salviamo in locale per vedere come è andata)
print(" Generating training plots...")
plt.figure(figsize=(12, 5))
plt.plot(losses)
plt.title("Training Loss")
plt.savefig("training_report.png") # Salva nella temp dir -> viene scaricato
print(" 📊 Report saved locally as training_report.png")
print("✅ Training Complete.")
'''
def main():
print("📡 Connecting to Remote GPU for Persistent Training...")
gpu = RemoteGPU()
result = gpu.run(TRAINING_CODE)
if "Training Complete" in result.output:
print("\n🏆 Training terminato!")
print(" Il modello è sicuro sul server in '/home/user/app/storage/persistent_model.pth'.")
print(" Il report grafico è stato scaricato.")
else:
print("\n⚠️ Qualcosa è andato storto.")
if __name__ == "__main__":
main()