|
|
""" |
|
|
Run convert_to_ud.py on RunPod GPU instance. |
|
|
""" |
|
|
|
|
|
import os |
|
|
import time |
|
|
import runpod |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
runpod.api_key = os.getenv("RUNPOD_API_KEY") |
|
|
|
|
|
|
|
|
def list_gpus(): |
|
|
"""List available GPU types on RunPod.""" |
|
|
gpus = runpod.get_gpus() |
|
|
print("Available GPUs:") |
|
|
for gpu in gpus: |
|
|
print(f" - {gpu['id']}: {gpu['displayName']} (${gpu.get('securePrice', 'N/A')}/hr)") |
|
|
return gpus |
|
|
|
|
|
|
|
|
def get_ssh_public_key(): |
|
|
"""Get SSH public key from ~/.ssh/""" |
|
|
import os |
|
|
ssh_dir = os.path.expanduser("~/.ssh") |
|
|
for key_file in ["id_ed25519.pub", "id_rsa.pub"]: |
|
|
key_path = os.path.join(ssh_dir, key_file) |
|
|
if os.path.exists(key_path): |
|
|
with open(key_path, "r") as f: |
|
|
return f.read().strip() |
|
|
return None |
|
|
|
|
|
|
|
|
def create_pod(gpu_type="NVIDIA RTX A4000", name="udd-converter"): |
|
|
"""Create a new pod with specified GPU.""" |
|
|
print(f"Creating pod with {gpu_type}...") |
|
|
|
|
|
ssh_key = get_ssh_public_key() |
|
|
if ssh_key: |
|
|
print(f"Using SSH key: {ssh_key[:50]}...") |
|
|
|
|
|
pod = runpod.create_pod( |
|
|
name=name, |
|
|
image_name="runpod/pytorch:2.1.0-py3.10-cuda11.8.0-devel-ubuntu22.04", |
|
|
gpu_type_id=gpu_type, |
|
|
cloud_type="SECURE", |
|
|
support_public_ip=True, |
|
|
ports="22/tcp", |
|
|
volume_in_gb=20, |
|
|
container_disk_in_gb=20, |
|
|
env={ |
|
|
"JUPYTER_PASSWORD": "underthesea", |
|
|
"PUBLIC_KEY": ssh_key or "" |
|
|
} |
|
|
) |
|
|
|
|
|
print(f"Pod created: {pod['id']}") |
|
|
return pod |
|
|
|
|
|
|
|
|
def wait_for_pod_ready(pod_id, timeout=300): |
|
|
"""Wait for pod to be ready.""" |
|
|
print(f"Waiting for pod {pod_id} to be ready...") |
|
|
start_time = time.time() |
|
|
|
|
|
while time.time() - start_time < timeout: |
|
|
pod = runpod.get_pod(pod_id) |
|
|
status = pod.get('desiredStatus') |
|
|
runtime = pod.get('runtime') |
|
|
|
|
|
if status == 'RUNNING' and runtime: |
|
|
ports = runtime.get('ports', []) |
|
|
ssh_port = None |
|
|
for port in ports: |
|
|
if port.get('privatePort') == 22: |
|
|
ssh_port = port.get('publicPort') |
|
|
break |
|
|
|
|
|
if ssh_port: |
|
|
ip = runtime.get('gpus', [{}])[0].get('publicIp') or pod.get('machine', {}).get('publicIp') |
|
|
print(f"Pod is ready!") |
|
|
print(f" SSH: ssh root@{ip} -p {ssh_port}") |
|
|
return pod, ip, ssh_port |
|
|
|
|
|
print(f" Status: {status}, waiting...") |
|
|
time.sleep(10) |
|
|
|
|
|
raise TimeoutError(f"Pod {pod_id} did not become ready within {timeout} seconds") |
|
|
|
|
|
|
|
|
def list_pods(): |
|
|
"""List all pods.""" |
|
|
pods = runpod.get_pods() |
|
|
print("Current pods:") |
|
|
for pod in pods: |
|
|
print(f" - {pod['id']}: {pod['name']} ({pod.get('desiredStatus', 'unknown')})") |
|
|
return pods |
|
|
|
|
|
|
|
|
def terminate_pod(pod_id): |
|
|
"""Terminate a pod.""" |
|
|
print(f"Terminating pod {pod_id}...") |
|
|
runpod.terminate_pod(pod_id) |
|
|
print("Pod terminated.") |
|
|
|
|
|
|
|
|
def get_ssh_command(pod_id): |
|
|
"""Get SSH command for a running pod.""" |
|
|
pod = runpod.get_pod(pod_id) |
|
|
runtime = pod.get('runtime') |
|
|
|
|
|
if not runtime: |
|
|
print("Pod is not running") |
|
|
return None |
|
|
|
|
|
ports = runtime.get('ports', []) |
|
|
ssh_port = None |
|
|
for port in ports: |
|
|
if port.get('privatePort') == 22: |
|
|
ssh_port = port.get('publicPort') |
|
|
break |
|
|
|
|
|
|
|
|
ip = None |
|
|
gpus = runtime.get('gpus', []) |
|
|
if gpus: |
|
|
ip = gpus[0].get('publicIp') |
|
|
|
|
|
if not ip: |
|
|
machine = pod.get('machine', {}) |
|
|
ip = machine.get('publicIp') |
|
|
|
|
|
if ip and ssh_port: |
|
|
cmd = f"ssh root@{ip} -p {ssh_port} -i ~/.ssh/id_ed25519" |
|
|
print(f"SSH command: {cmd}") |
|
|
return cmd |
|
|
|
|
|
print("Could not determine SSH connection details") |
|
|
return None |
|
|
|
|
|
|
|
|
def main(): |
|
|
import argparse |
|
|
parser = argparse.ArgumentParser(description="Manage RunPod for UD conversion") |
|
|
parser.add_argument("action", choices=["list-gpus", "list-pods", "create", "ssh", "terminate"], |
|
|
help="Action to perform") |
|
|
parser.add_argument("--gpu", type=str, default="NVIDIA RTX A4000", help="GPU type") |
|
|
parser.add_argument("--pod-id", type=str, help="Pod ID for ssh/terminate") |
|
|
parser.add_argument("--name", type=str, default="udd-converter", help="Pod name") |
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.action == "list-gpus": |
|
|
list_gpus() |
|
|
elif args.action == "list-pods": |
|
|
list_pods() |
|
|
elif args.action == "create": |
|
|
pod = create_pod(gpu_type=args.gpu, name=args.name) |
|
|
wait_for_pod_ready(pod['id']) |
|
|
elif args.action == "ssh": |
|
|
if not args.pod_id: |
|
|
pods = list_pods() |
|
|
if pods: |
|
|
args.pod_id = pods[0]['id'] |
|
|
if args.pod_id: |
|
|
get_ssh_command(args.pod_id) |
|
|
elif args.action == "terminate": |
|
|
if args.pod_id: |
|
|
terminate_pod(args.pod_id) |
|
|
else: |
|
|
print("Please provide --pod-id") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|