Commit
0cccafd
·
verified ·
0 Parent(s):

Please upload your results to results/

Browse files

Co-authored-by: Jasdeep50singh <Jasdeep50singh@users.noreply.huggingface.co>
Co-authored-by: ropoir <ropoir@users.noreply.huggingface.co>
Co-authored-by: viserjor <viserjor@users.noreply.huggingface.co>
Co-authored-by: geogpt69 <geogpt69@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
Jasdeep_Emulator.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Jasdeep_Emulator",
3
+ "authors": ["Jasdeep Singh", "hasan al marzouqi", "Panos Liatsis"],
4
+ "affiliations": ["Khalifa University"],
5
+ "description": "Deep learning network block",
6
+ "url": "",
7
+ "doi": "",
8
+ "email": "jasdeep.singh@ku.ac.ae"
9
+ }
Jasdeep_Emulator_A1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:915ce318f6177fa2bb01e77837a00f48da6ec5a12e91ccac2d5be531643d36be
3
+ size 878780127
Jasdeep_Emulator_A2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0e89307b634fae9c0448b606d8f288bc0e84a9a62f584290534d28cc445e2b0
3
+ size 347503201
Jasdeep_Emulator_B1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f33f19467d4a8109d1d05e37a6487ea6168011be6d38c50fe0bc651016a1688
3
+ size 522119636
Jasdeep_Emulator_B2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c5a5d4fa51e55932ca2a9d39910c772290a4a203f35c3050cd8bfa05b7faec9
3
+ size 207858960
README.md ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ pipeline_tag: emulation
6
+ tags:
7
+ - emulation
8
+ - atmosphere radiative transfer models
9
+ - hyperspectral
10
+ pretty_name: Atmospheric Radiative Transfer Emulation Challenge
11
+ title: rtm_emulation
12
+ emoji: 🤖
13
+ colorFrom: gray
14
+ colorTo: green
15
+ sdk: static
16
+ sdk_version: "latest"
17
+ pinned: false
18
+ ---
19
+ Last update: 17-06-2025
20
+
21
+ <img src="https://elias-ai.eu/wp-content/uploads/2023/09/elias_logo_big-1.png" alt="elias_logo" style="width:15%; display: inline-block; margin-right: 150px;">
22
+ <img src="https://elias-ai.eu/wp-content/uploads/2024/01/EN_FundedbytheEU_RGB_WHITE-Outline-1.png" alt="eu_logo" style="width:20%; display: inline-block;">
23
+
24
+ # **Atmospheric Radiative Transfer Emulation Challenge**
25
+
26
+
27
+ 1. [**Introduction**](#introduction)
28
+ 2. [**Challenge Tasks and Data**](#challenge-tasks-and-data):
29
+
30
+ 2.1. [**Proposed Experiments**](#proposed-experiments)
31
+
32
+ 2.2. [**Data Availability and Format**](#data-availability-and-format)
33
+ 3. [**Evaluation methodology**](#evaluation-methodology)
34
+
35
+ 3.1. [**Prediction Accuracy**](#prediction-accuracy)
36
+
37
+ 3.2. [**Computational efficiency**](#computational-efficiency)
38
+
39
+ 3.3. [**Proposed Protocol**](#proposed-protocol)
40
+
41
+ 4. [**Expected Outcomes**](#expected-outcomes)
42
+
43
+
44
+
45
+ ## **Benchmark Results**
46
+
47
+ | **Model** | **MRE A1 (%)** | **MRE A2 (%)** | **MRE B1 (%)** | **MRE B2 (%)** | **Score** | **Runtime** | **Rank** |
48
+ |-----------|---------------|---------------|---------------|---------------|----------|----------|--------|
49
+ | Jasdeep_Emulator_2 | 0.110 | 3.116 | 0.647 | 12.926 | 1.700 | 16.873 | 1° |
50
+ | Hugo | 0.203 | 4.579 | 0.666 | 3.965 | 3.125 | 1.532 | 2° |
51
+ | rpgpr | 0.121 | 6.040 | 0.661 | 16.669 | 3.375 | 39.325 | 3° |
52
+ | Jasdeep_Emulator | 0.301 | 11.158 | 0.655 | 6.108 | 3.675 | 41.307 | 4° |
53
+ | Jasdeep_Emulator_4 | 0.202 | 7.044 | 0.683 | 13.823 | 4.350 | 28.544 | 5° |
54
+ | Krtek | 0.545 | 7.693 | 0.823 | 7.877 | 5.475 | 0.764 | 6° |
55
+ | baseline | 0.998 | 12.604 | 1.084 | 7.072 | 6.300 | 0.241 | 7° |
56
+ | Jasdeep_Emulator_3 | 1.953 | 17.078 | 1.293 | 19.109 | 8.000 | 1.767 | 8° |
57
+
58
+ ## **Introduction**
59
+
60
+ Atmospheric Radiative Transfer Models (RTM) are crucial in Earth and climate sciences with applications such as synthetic scene generation, satellite data processing, or
61
+ numerical weather forecasting. However, their increasing complexity results in a computational burden that limits direct use in operational settings. A practical solution
62
+ is to interpolate look-up-tables (LUTs) of pre-computed RTM simulations generated from long and costly model runs. However, large LUTs are still needed to achieve accurate
63
+ results, requiring significant time to generate and demanding high memory capacity. Alternative, ad hoc solutions make data processing algorithms mission-specific and
64
+ lack generalization. These problems are exacerbated for hyperspectral satellite missions, where the data volume of LUTs can increase by one or two orders of magnitude,
65
+ limiting the applicability of advanced data processing algorithms. In this context, emulation offers an alternative, allowing for real-time satellite data processing
66
+ algorithms while providing high prediction accuracy and adaptability across atmospheric conditions. Emulation replicate the behavior of a deterministic and computationally
67
+ demanding model using statistical regression algorithms. This approach facilitates the implementation of physics-based inversion algorithms, yielding accurate and
68
+ computationally efficient model predictions compared to traditional look-up table interpolation methods.
69
+
70
+ RTM emulation is challenging due to the high-dimensional nature of both input (~10 dimensions) and output (several thousand) spaces, and the complex interactions of
71
+ electromagnetic radiation with the atmosphere. The research implications are vast, with potential breakthroughs in surrogate modeling, uncertainty quantification,
72
+ and physics-aware AI systems that can significantly contribute to climate and Earth observation sciences.
73
+
74
+ This challenge will contribute to reducing computational burdens in climate and atmospheric research, enabling (1) Faster satellite data processing for applications in
75
+ remote sensing and weather prediction, (2) improved accuracy in atmospheric correction of hyperspectral imaging data, and (3) more efficient climate simulations, allowing
76
+ broader exploration of emission pathways aligned with sustainability goals.
77
+
78
+ ## **Challenge Tasks and Data**
79
+
80
+ Participants in this challenge will develop emulators trained on provided datasets to predict spectral magnitudes (atmospheric transmittances and reflectances)
81
+ based on input atmospheric and geometric conditions. The challenge is structured around three main tasks: (1) training ML models
82
+ using predefined datasets, (2) predicting outputs for given test conditions, and (3) evaluating emulator performance based on accuracy.
83
+
84
+ ### **Proposed Experiments**
85
+
86
+ The challenge includes two primary application test scenarios:
87
+ 1. **Atmospheric Correction** (`A`): This scenario focuses on the atmospheric correction of hyperspectral satellite imaging data. Emulators will be tested on
88
+ their ability to reproduce key atmospheric transfer functions that influence radiance measurements. This includes path radiance, direct/diffuse solar irradiance, and
89
+ transmittance properties. Full spectral range simulations (400-2500 nm) will be provided at a resolution of 5cm<sup>-1</sup>.
90
+ 2. **CO<sub>2</sub> Column Retrieval** (`B`): This scenario is in the context of atmospheric CO<sub>2</sub> retrieval by modeling how radiation interacts with various gas
91
+ layers. The emulators will be evaluated on their accuracy in predicting top-of-atmosphere radiance, particularly within the spectral range sensitive to CO<sub>2</sub>
92
+ absorption (2000-2100 nm) at high spectral resolution (0.1cm<sup>-1</sup>).
93
+
94
+ For both scenarios, two test datasets (tracks) will be provided to evaluate 1) interpolation, and 2) extrapolation.
95
+
96
+ Each scenario-track combination will be identified using alphanumeric ID `Sn`, where `S`={`A`,`B`} denotes to the scenario, and `n`={1,2}
97
+ represents test dataset type (i.e., track). For example, `A2` refers to prediction for the atmospheric correction scenario using the the extrapolation dataset.
98
+
99
+ Participants may choose their preferred scenario(s) and tracks; however, we encourage submitting predictions for all test conditions.
100
+
101
+ ### **Data Availability and Format**
102
+
103
+ Participants will have access to multiple training datasets of atmospheric RTM simulations varying in sample sizes, input parameters, and spectral range/resolution.
104
+ These datasets will be generated using Latin Hypercube Sampling to ensure a comprehensive input space coverage and minimize issues related to ill-posedness and
105
+ unrealistic results.
106
+
107
+ The training data (i.e., inputs and outputs of RTM simulations) will be stored in [HDF5](https://docs.h5py.org/en/stable/) format with the following structure:
108
+
109
+ | **Dimensions** | |
110
+ |:---:|:---:|
111
+ | **Name** | **Description** |
112
+ | `n_wl` | Number of wavelengths for which spectral data is provided |
113
+ | `n_funcs` | Number of atmospheric transfer functions |
114
+ | `n_comb` | Number of data points at which spectral data is provided |
115
+ | `n_param` | Dimensionality of the input variable space |
116
+
117
+ | **Data Components** | | | |
118
+ |:---:|:---:|:---:|:---:|
119
+ | **Name** | **Description** | **Dimensions** | **Datatype** |
120
+ | **`LUTdata`** | Atmospheric transfer functions (i.e. outputs) | `n_funcs*n_wvl x n_comb` | single |
121
+ | **`LUTHeader`** | Matrix of input variable values for each combination (i.e., inputs) | `n_param x n_comb` | double |
122
+ | **`wvl`** | Wavelength values associated with the atmospheric transfer functions (i.e., spectral grid) | `n_wvl` | double |
123
+
124
+ **Note:** Participants may choose to predict the spectral data either as a single vector of length `n_funcs*n_wvl` or as `n_funcs` separate vectors of lenght `n_wvl`.
125
+
126
+ Testing input datasets (i.e., input for predictions) will be stored in a tabulated `.csv` format with dimensions `n_param x n_comb`.
127
+
128
+ The trainng and testing dataset will be organized organized into scenario-specific folders (see
129
+ [**Proposed experiments**](/datasets/isp-uv-es/rtm_emulation#proposed-experiments)): `scenarioA` (Atmospheric Correction), and `scenarioB` (CO<sub>2</sub> Column Retrieval).
130
+ Each folder will contain:
131
+ - A `train` with multiple `.h5` files corresponding to different training sample sizes (e.g. `train2000.h5`contains 2000 samples).
132
+ - A `reference` subfolder containg two test files (`refInterp` and `refExtrap`) referring to the two aforementioned tracks (i.e., interpolation and extrapolation).
133
+
134
+ Here is an example of how to load each dataset in python:
135
+ ```{python}
136
+ import h5py
137
+ import pandas as pd
138
+ import numpy as np
139
+
140
+ # Replace with the actual path to your training and testing data
141
+ trainFile = 'train2000.h5'
142
+ testFile = 'refInterp.csv'
143
+
144
+ # Open the H5 file
145
+ with h5py.File(file_path, 'r') as h5_file
146
+ Ytrain = h5_file['LUTdata'][:]
147
+ Xtrain = h5_file['LUTHeader'][:]
148
+ wvl = h5_file['wvl'][:]
149
+
150
+ # Read testing data
151
+ df = pd.read_csv(testFile)
152
+ Xtest = df.to_numpy()
153
+ ```
154
+
155
+ in Matlab:
156
+ ```{matlab}
157
+ # Replace with the actual path to your training and testing data
158
+ trainFile = 'train2000.h5';
159
+ testFile = 'refInterp.csv';
160
+
161
+ # Open the H5 file
162
+ Ytrain = h5read(trainFile,'/LUTdata');
163
+ Xtrain = h5read(trainFile,'/LUTheader');
164
+ wvl = h5read(trainFile,'/wvl');
165
+
166
+ # Read testing data
167
+ Xtest = importdata(testFile);
168
+ ```
169
+
170
+ and in R language:
171
+ ```{r}
172
+ library(rhdf5)
173
+
174
+ # Replace with the actual path to your training and testing data
175
+ trainFile <- "train2000.h5"
176
+ testFile <- "refInterp.csv"
177
+
178
+ # Open the H5 file
179
+ lut_data <- h5read(file_path, "LUTdata")
180
+ lut_header <- h5read(file_path, "LUTHeader")
181
+ wavelengths <- h5read(file_path, "wvl")
182
+
183
+ # Read testing data
184
+ Xtest <- as.matrix(read.table(file_path, sep = ",", header = TRUE))
185
+ ```
186
+
187
+ All data will be shared through a this [repository](ttps://huggingface.co/datasets/isp-uv-es/rtm_emulation/tree/main). After the challenge finishes, participants
188
+ will also have access to the evaluation scripts on [this GitLab](http://to_be_prepared) to ensure transparency and reproducibility.
189
+
190
+
191
+ ## **Evaluation methodology**
192
+
193
+ The evaluation will focus on three key aspects: prediction accuracy, computational efficiency, and extrapolation performance.
194
+
195
+ ### **Prediction Accuracy**
196
+
197
+ For the **atmospheric correction** scenario (`A`), the predicted atmospheric transfer functions will be used to retrieve surface reflectance from the top-of-atmosphere
198
+ (TOA) radiance simulations in the testing dataset. The evaluation will proceed as follows:
199
+ 1. The relative difference between retrieved and reference reflectance will be computed for each spectral channel and sample from the testing dataset.
200
+ 2. The mean relative error (MRE) will be calculated over the enrire reference dataset to assess overall emulator bias.
201
+ 3. The spectrally-averaged MRE (MRE<sub>λ</sub> will be computed, excluding wavelengths in the deep H<sub>2</sub>O. absorption regions, to ensure direct comparability between participants.
202
+
203
+ For the **CO<sub>2</sub> retrieval** scenario (`B`), evaluation will follow the same steps, comparing predicted TOA radiance spectral data against the reference values
204
+ in the testing dataset.
205
+
206
+ Since each participant/model can contribute to up to four scenario-track combinations, we will consolidate results into a single final ranking using the following process:
207
+ 1. **Individual ranking**: For each of the four combinations, submissions will be ranked based on their MRE<sub>λ</sub> values. Lower MRE<sub>λ</sub> values correspond to
208
+ better performance. In the unlikely case of ties, these will be handled by averaging the tied ranks.
209
+ 2. **Final ranking**: Rankings will be aggregated into a single final score using a weighted average. The following weights will be applied: 0.375 for interpolation and
210
+ 0.175 for extrapolation tracks. That is:
211
+ **Final score = (0.325 × AC-Interp Rank) + (0.175 × AC-Extrap Rank) + (0.325 × CO2-Interp Rank) + (0.175 × CO2-Extrap Rank)**
212
+ 3. **Missing Submissions**: If a participant does not submit results for a particular scenario-track combination, they will be placed in the last position for that track.
213
+
214
+ To ensure fairness in the final ranking, we will use the **standard competition ranking** method in the case of ties. If two or more participants achieve the same
215
+ weighted average rank, they will be assigned the same final position, and the subsequent rank(s) will be skipped accordingly. For example, if two participants are tied
216
+ for 1st place, they will both receive rank 1, and the next participant will be ranked 3rd (not 2nd).
217
+
218
+ **Note:** while the challenge is open, the daily evaluation of error metrics will be done on a subset of the test data. This will avoid participants to have detailed
219
+ information that would allow them to fine-tune their models. The final results and ranking evaluated with all the validation data will be provided and the end-date of the challenge.
220
+
221
+ ### **Computational efficiency**
222
+ Participants must report the runtime required to generate predictions across different emulator configurations. The average runtime of all scenario-track combinations
223
+ will be calculated and reported in the table. **Runtime won't be taken into account for the final ranking**. After the competition ends, and to facilitate fair
224
+ comparisons, participants will be requested to provide a report with hardware specifications, including: CPU, Parallelization settings (e.g., multi-threading, GPU
225
+ acceleration), RAM availability. Additionally, participants should report key model characteristics, such as the number of operations required for a single prediction and the number of trainable
226
+ parameters in their ML models.
227
+
228
+ All evaluation scripts will be publicly available on GitLab and Huggingface to ensure fairness, trustworthiness, and transparency.
229
+
230
+ ### **Proposed Protocol**
231
+
232
+ - Participant must generate emulator predictions on the provided testing datasets before the submission deadline. Multiple emulator models can be submitted.
233
+
234
+ - The submission will be made via a [pull request](https://huggingface.co/docs/hub/en/repositories-pull-requests-discussions) to this repository.
235
+
236
+ - Each submission **MUST** include the prediction results in hdf5 format and a `metadata.json`.
237
+
238
+ - The predictions should be stored in a `.h5`file with the same format as the [training data](/datasets/isp-uv-es/rtm_emulation#data-availability-and-format).
239
+ Note that only the **`LUTdata`** matrix (i.e., the predictions) are needed. A baseline example of this file is available for participants (`baseline_Sn.h5`).
240
+ We encourage participants to compress their hdf5 files using the deflate option.
241
+
242
+ - Each prediction file must be stored in the `results` folder in this repository. The prediction files should be named using the emulator/model name followed by
243
+ the scenario-track ID (e.g. `/results/mymodel_A1.h5`). A global attributed named `runtime` must be included to report the
244
+ computational efficiency of your model (value expressed in seconds).
245
+ Note that all predictions for different scenario-tracks should be stored in separate files.
246
+
247
+ - The metadata file (`metadata.json`) shall contain the following information:
248
+
249
+ ```{json}
250
+ {
251
+ "name": "model_name",
252
+ "authors": ["author1", "author2"],
253
+ "affiliations": ["affiliation1", "affiliation2"],
254
+ "description": "A brief description of the emulator",
255
+ "url": "[OPTIONAL] URL to the model repository if it is open-source",
256
+ "doi": "DOI to the model publication (if available)",
257
+ "email": <main_contact_email>
258
+ }
259
+ ```
260
+
261
+ - Emulator predictions will be evaluated once per day at 12:00 CET based on the defined metrics.
262
+
263
+ - After the deadline, teams will be contacted with their evaluation results. If any issues are identified, theams will have up to two
264
+ weeks to provide the necessary corrections.
265
+
266
+ - In case of **problems with the pull request** or incorrect validity of the submitted files, all discussions will be held in the [discussion board](https://huggingface.co/isp-uv-es/rtm_emulation/discussions).
267
+
268
+ - After all the participants have provided the necessary corrections, the results will be published in the discussion section of this repository.
269
+
270
+
271
+ ## **Expected Outcomes**
272
+
273
+ - No clear superiority of any methodology in all metrics is expected.
274
+ - Participants will benefit from the analysis on scenarios/tracks, which will serve them to improve their models.
275
+ - A research publication will be submitted to a remote sensing journal with the top three winners.
276
+ - An overview paper of the challenge will be published at the [ECML-PKDD 2025](https://ecmlpkdd.org/2025/) workshop proceedings.
277
+ - The winner will get covered the registratin cost for the [ECML-PKDD 2025](https://ecmlpkdd.org/2025/).
278
+ - We are exploring the possibility to provid an economic prizes for the top three winners. Stay tuned!
results/baseline.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "baseline",
3
+ "authors": ["Jorge Vicent Servera"],
4
+ "affiliations": ["Image & Signal Processing (ISP)"],
5
+ "description": "2nd order hypersurface polynomial fitting",
6
+ "url": "",
7
+ "doi": "",
8
+ "email": "jorge.vicent@uv.es"
9
+ }
results/baseline_A1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:564673d17c17cd3f4eefd92690bcfba799f54009e24d5791408ebdccd2dd2062
3
+ size 1009201264
results/baseline_A2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c2a40f9ee5f302db0cd438011350f4c92080187762244d8fb2087d14ebce31
3
+ size 403681264
results/baseline_B1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e00251de383a612b2b772e55c36687b4170060d6ee0a58eb6573d5d07efd6e2
3
+ size 588001264
results/baseline_B2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da768ed81c1d95397cd548e0a5c494032fa05cce99b2330249823c1c17ef3d55
3
+ size 235201264
scenarioA/reference/refExtrap.csv ADDED
The diff for this file is too large to render. See raw diff
 
scenarioA/reference/refInterp.csv ADDED
The diff for this file is too large to render. See raw diff
 
scenarioA/train/train2000.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45c49623ecfbecb8ef242c59914dba6fe1577fdc046fefcff6e4bedc44cb86fd
3
+ size 202108012
scenarioA/train/train500.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:042d6153eb75fb8d488e3aa78be092ef2920339b3b52b44bc90747682992e1d3
3
+ size 50620012
scenarioB/reference/refExtrap.csv ADDED
The diff for this file is too large to render. See raw diff
 
scenarioB/reference/refInterp.csv ADDED
The diff for this file is too large to render. See raw diff
 
scenarioB/train/train2000.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6c919fedffd47ddcd02fb6dca079e22eaedba4ef81ea53efad5531e603ee4f9
3
+ size 117805394
scenarioB/train/train500.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e1a14a33a7c8cfed622f00b15801c5d58c7f2d31e6459a4b3abf48adbe27f9
3
+ size 29521394