AdriBat1
Implement and verify remote persistence workflow
979e977
#!/usr/bin/env python3
"""
Remote Persistent Inference
===========================
Questo script chiede al server di caricare il modello salvato in precedenza
(nella cartella persistente) e di eseguire un'inferenza.
Nessun file viene scaricato: solo i risultati testuali tornano indietro.
"""
from antigravity_sdk import RemoteGPU
INFERENCE_CODE = r'''
import torch
import torch.nn as nn
import numpy as np
import os
import sys
# 1. Re-define the Model (Same architecture)
class SimpleNN(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Linear(2, 16),
nn.ReLU(),
nn.Linear(16, 16),
nn.ReLU(),
nn.Linear(16, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.net(x)
# 2. Load Persisted Model
STORAGE_DIR = "/home/user/app/storage"
model_path = os.path.join(STORAGE_DIR, "persistent_model.pth")
print(f"๐Ÿ“‚ Loading model from SERVER storage: {model_path}...")
if not os.path.exists(model_path):
print("โŒ Model not found! Have you run 'train_nn.py' in persistent mode?")
sys.exit(1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SimpleNN().to(device)
try:
model.load_state_dict(torch.load(model_path))
model.eval()
print("โœ… Model loaded successfully on GPU!")
except Exception as e:
print(f"โŒ Error loading model: {e}")
sys.exit(1)
# 3. Remote Inference
print("\n๐Ÿ”ฎ Running Inference remotely...")
test_points = np.array([
[0.0, 0.0],
[0.8, 0.8],
[-0.5, 0.5],
[2.0, 2.0]
], dtype=np.float32)
input_tensor = torch.tensor(test_points).to(device)
with torch.no_grad():
predictions = model(input_tensor).cpu().numpy()
print("-" * 40)
print(f"{'X':<10} {'Y':<10} | {'Prob':<10} {'Class':<10}")
print("-" * 40)
for i, point in enumerate(test_points):
prob = predictions[i][0]
cls = 1 if prob > 0.5 else 0
print(f"{point[0]:<10.2f} {point[1]:<10.2f} | {prob:<10.4f} {cls:<10}")
print("-" * 40)
print("โœ… Inference executed on SERVER.")
'''
def main():
print("๐Ÿ“ก Connecting to Remote GPU for Inference...")
gpu = RemoteGPU()
# Eseguiamo e basta, non ci aspettiamo file di ritorno
gpu.run(INFERENCE_CODE)
if __name__ == "__main__":
main()