| |
| |
| |
| |
| |
| |
| |
| |
| """Convert .npy telemetry files to per-channel Parquet files for Hugging Face.""" |
|
|
| import csv |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
|
|
|
|
| def load_channel_mapping(csv_path: Path) -> dict[str, str]: |
| """Load channel_id -> spacecraft mapping from labeled_anomalies.csv.""" |
| mapping = {} |
| with open(csv_path) as f: |
| for row in csv.DictReader(f): |
| mapping[row['chan_id']] = row['spacecraft'] |
| return mapping |
|
|
|
|
| def convert_npy_to_parquet(npy_path: Path) -> pd.DataFrame: |
| """Convert a single .npy file to a DataFrame.""" |
| arr = np.load(npy_path) |
| n_timesteps, n_cols = arr.shape |
|
|
| col_names = ['value'] + [f'cmd_{i}' for i in range(n_cols - 1)] |
| df = pd.DataFrame(arr, columns=col_names) |
| df.insert(0, 'timestep', range(n_timesteps)) |
| return df |
|
|
|
|
| def generate_configs_yaml(channels: list[str]) -> str: |
| """Generate the configs section for README YAML frontmatter.""" |
| lines = ['configs:'] |
| for chan in sorted(channels): |
| lines.append(f' - config_name: "{chan}"') |
| lines.append(' data_files:') |
| lines.append(' - split: train') |
| lines.append(f' path: "data/train/{chan}.parquet"') |
| lines.append(' - split: test') |
| lines.append(f' path: "data/test/{chan}.parquet"') |
| return '\n'.join(lines) |
|
|
|
|
| def main() -> None: |
| """Convert all .npy files to per-channel Parquet and print YAML configs.""" |
| npy_dir = Path('data/data') |
| out_dir = Path('data') |
|
|
| channels = [] |
|
|
| for split in ['train', 'test']: |
| split_in = npy_dir / split |
| split_out = out_dir / split |
| split_out.mkdir(parents=True, exist_ok=True) |
|
|
| for npy_path in sorted(split_in.glob('*.npy')): |
| channel_id = npy_path.stem |
| if split == 'train': |
| channels.append(channel_id) |
|
|
| df = convert_npy_to_parquet(npy_path) |
| parquet_path = split_out / f'{channel_id}.parquet' |
| df.to_parquet(parquet_path, index=False, engine='pyarrow') |
|
|
| print(f' {split}/{channel_id}: {df.shape} -> {parquet_path}') |
|
|
| |
| labels_csv = Path('labeled_anomalies.csv') |
| if labels_csv.exists(): |
| labels = pd.read_csv(labels_csv) |
| labels_out = out_dir / 'labeled_anomalies.parquet' |
| labels.to_parquet(labels_out, index=False, engine='pyarrow') |
| print(f' labels: {len(labels)} rows -> {labels_out}') |
|
|
| print('\n--- Copy below into README.md YAML frontmatter ---\n') |
| print(generate_configs_yaml(channels)) |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|