File size: 4,326 Bytes
1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 fac50ab 1be3a99 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | """Google Cloud Storage utilities for Cloud Run jobs."""
from __future__ import annotations
import logging
import shutil
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from datasets import Dataset
LOGGER = logging.getLogger(__name__)
def get_gcs_client():
"""Get GCS client."""
from google.cloud import storage
return storage.Client()
def parse_gcs_uri(uri: str) -> tuple[str, str]:
"""Parse gs://bucket/key into (bucket, key)."""
if not uri.startswith("gs://"):
raise ValueError(f"Invalid GCS URI: {uri}")
parts = uri[5:].split("/", 1)
bucket = parts[0]
key = parts[1] if len(parts) > 1 else ""
return bucket, key
def upload_files_to_gcs(
*,
output_dir: Path,
gcs_uri: str,
path_prefix: str = "",
) -> None:
"""Upload local directory contents to GCS."""
if not gcs_uri:
LOGGER.info("No GCS URI provided; skipping upload.")
return
bucket_name, base_prefix = parse_gcs_uri(gcs_uri)
full_prefix = base_prefix.rstrip("/")
if path_prefix:
full_prefix = (
f"{full_prefix}/{path_prefix.strip('/')}"
if full_prefix
else path_prefix.strip("/")
)
client = get_gcs_client()
bucket = client.bucket(bucket_name)
base = output_dir.resolve()
files = sorted(p for p in base.rglob("*") if p.is_file())
if not files:
LOGGER.info("Nothing to upload from %s", output_dir)
return
LOGGER.info(
"Uploading %d files to gs://%s/%s", len(files), bucket_name, full_prefix
)
for local_path in files:
rel = local_path.relative_to(base).as_posix()
gcs_key = f"{full_prefix}/{rel}" if full_prefix else rel
try:
blob = bucket.blob(gcs_key)
blob.upload_from_filename(str(local_path))
except Exception as exc:
LOGGER.error(
"Failed to upload %s to gs://%s/%s: %s",
local_path,
bucket_name,
gcs_key,
exc,
)
raise
def save_dataset_to_gcs(
dataset,
gcs_uri: str,
name: str = "dataset",
) -> str:
"""Save HF dataset to GCS in Arrow format. Returns the GCS URI."""
from datasets import DatasetDict
# Handle DatasetDict by extracting the first split
if isinstance(dataset, DatasetDict):
if "train" in dataset:
dataset = dataset["train"]
else:
split_name = list(dataset.keys())[0]
dataset = dataset[split_name]
LOGGER.info("Using split '%s' from DatasetDict", split_name)
bucket_name, prefix = parse_gcs_uri(gcs_uri)
full_prefix = prefix.rstrip("/")
# Save to local temp directory using Arrow format
local_dir = Path(f"/tmp/{name}_arrow_temp")
if local_dir.exists():
shutil.rmtree(local_dir)
LOGGER.info("Saving dataset to Arrow format...")
dataset.save_to_disk(str(local_dir))
# Upload entire directory to GCS
gcs_prefix = f"{full_prefix}/{name}" if full_prefix else name
upload_files_to_gcs(
output_dir=local_dir, gcs_uri=f"gs://{bucket_name}/{gcs_prefix}"
)
# Cleanup
shutil.rmtree(local_dir)
result_uri = f"gs://{bucket_name}/{gcs_prefix}"
LOGGER.info("Saved dataset to %s", result_uri)
return result_uri
def load_dataset_from_gcs(gcs_uri: str, split: str = "train") -> "Dataset":
"""Load HF dataset from GCS. Downloads locally to avoid gcsfs caching issues."""
from datasets import load_from_disk
import tempfile
LOGGER.info("Loading dataset from %s", gcs_uri)
# Parse GCS URI
bucket_name, prefix = parse_gcs_uri(gcs_uri)
# Download to local temp directory (bypasses gcsfs cache)
client = get_gcs_client()
bucket = client.bucket(bucket_name)
local_dir = tempfile.mkdtemp(prefix="gcs_dataset_")
blobs = list(bucket.list_blobs(prefix=f"{prefix}/"))
for blob in blobs:
filename = blob.name.split("/")[-1]
if filename: # Skip directory markers
local_path = f"{local_dir}/{filename}"
blob.download_to_filename(local_path)
LOGGER.info("Downloaded %d files to %s", len(blobs), local_dir)
# Load from local
ds = load_from_disk(local_dir)
return ds
|