Full Model Emulation
annakwa commited on
Commit
7798813
·
1 Parent(s): 34a9f3f

update config and script

Browse files
hiro_downscaling_ace2s_global_output.yaml CHANGED
@@ -12,7 +12,7 @@ data:
12
  coarse:
13
  - data_path: /output_directory
14
  engine: zarr
15
- file_pattern: output_6hourly_predictions_ic0000.zarr
16
  batch_size: 4
17
  num_data_workers: 2
18
  strict_ensemble: False
@@ -21,9 +21,10 @@ outputs:
21
  save_vars: ["PRATEsfc"]
22
  n_ens: 1
23
  max_samples_per_gpu: 32
 
24
  time_range:
25
  start_time: "2023-01-01T00:00:00"
26
- stop_time: "2023-12-31T18:00:00"
27
  lat_extent:
28
  start: -66.0
29
  stop: 70.0
 
12
  coarse:
13
  - data_path: /output_directory
14
  engine: zarr
15
+ file_pattern: output_6hourly_ace2s_predictions_ic0000.zarr
16
  batch_size: 4
17
  num_data_workers: 2
18
  strict_ensemble: False
 
21
  save_vars: ["PRATEsfc"]
22
  n_ens: 1
23
  max_samples_per_gpu: 32
24
+ # Note: for reference, 1 year of global 3km outputs is ~300 Gb per ensemble member
25
  time_range:
26
  start_time: "2023-01-01T00:00:00"
27
+ stop_time: "2023-01-08T00:00:00"
28
  lat_extent:
29
  start: -66.0
30
  stop: 70.0
run-hiro-ace.sh CHANGED
@@ -19,4 +19,5 @@ python -m fme.ace.inference ace2s_inference_config_global.yaml
19
  # for faster through put more GPUs may be required
20
  NGPU=1
21
 
22
- torchrun --nproc_per_node $NGPU -m fme.downscaling.inference hiro_downscaling_ace2s_global_output.yaml
 
 
19
  # for faster through put more GPUs may be required
20
  NGPU=1
21
 
22
+ # This will downscale a subselected region around the Pacific Northwest.
23
+ torchrun --nproc_per_node $NGPU -m fme.downscaling.inference hiro_downscaling_ace2s_pnw_output.yaml