File size: 9,161 Bytes
8d9d827
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
from tqdm import tqdm
import requests
import os
import xarray as xr
import pandas as pd
from glob import glob
import cfgrib

# ### GLOSEA5 ###
# path = "raw/glosea.grib"
# print("Processing Glosea5...")
# # Open the GRIB file as an xarray dataset
# ds_1 = cfgrib.open_datasets("raw/glosea_1.grib")
# ds_2 = cfgrib.open_datasets("raw/glosea_2.grib")
# ds_3 = cfgrib.open_datasets("raw/glosea_3.grib")
# glosea_xarray_1, glosea_xarray_2, glosea_xarray_3 = ds_1[0], ds_2[0], ds_3[0]
# # Convert to pandas DataFrame
# df_1 = glosea_xarray_1.to_dataframe().reset_index()
# df_2 = glosea_xarray_2.to_dataframe().reset_index()
# df_3 = glosea_xarray_3.to_dataframe().reset_index()
# # Concatenate the two DataFrames
# glosea_df = pd.concat([df_1, df_2, df_3], ignore_index=True)
# # Convert tprate NaN to 0.0
# glosea_df['tprate'] = glosea_df['tprate'].fillna(0.0)
# # Aggregate across steps
# glosea_df = glosea_df.groupby(['valid_time', 'latitude', 'longitude'])['tprate'].mean().reset_index()
# # Keep between 1981 and 2019
# glosea_df = glosea_df[glosea_df['valid_time'].between('1981-01-01', '2018-12-31')]
# # Rename columns
# glosea_df.rename(columns={'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon', 'valid_time': 'time'}, inplace=True)
# glosea_df.reset_index(inplace=True, drop=True)
# print(f'Saving Glosea5 data to parquet (length of dataframe = {len(glosea_df)})...')
# glosea_df.to_parquet('processed/glosea5.parquet')
# # Delete any files ending in .idx from the raw folder
# for file in os.listdir("raw"):
#     if file.endswith(".idx"):
#         os.remove(os.path.join("raw", file))

# ### ECMWF ###
# path = "raw/ecmwf.grib"
# print("Processing ECMWF...")
# # Open the GRIB file as an xarray dataset
# ds_1 = cfgrib.open_datasets("raw/ecmwf_1.grib")
# ds_2 = cfgrib.open_datasets("raw/ecmwf_2.grib")
# ds_3 = cfgrib.open_datasets("raw/ecmwf_3.grib")
# ecmwf_xarray_1, ecmwf_xarray_2, ecmwf_xarray_3 = ds_1[0], ds_2[0], ds_3[0]
# # Convert to pandas DataFrame
# df_1 = ecmwf_xarray_1.to_dataframe().reset_index()
# df_2 = ecmwf_xarray_2.to_dataframe().reset_index()
# df_3 = ecmwf_xarray_3.to_dataframe().reset_index()
# # Concatenate the two DataFrames
# ecmwf_df = pd.concat([df_1, df_2, df_3], ignore_index=True)
# # Convert tprate NaN to 0.0
# ecmwf_df['tprate'] = ecmwf_df['tprate'].fillna(0.0)
# # Aggregate across steps
# ecmwf_df = ecmwf_df.groupby(['valid_time', 'latitude', 'longitude'])['tprate'].mean().reset_index()
# # Keep between 1981 and 2019
# ecmwf_df = ecmwf_df[ecmwf_df['valid_time'].between('1981-01-01', '2018-12-31')]
# # Rename columns
# ecmwf_df.rename(columns={'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon', 'valid_time': 'time'}, inplace=True)
# ecmwf_df.reset_index(inplace=True, drop=True)
# print(f'Saving ECMWF data to parquet (length of dataframe = {len(ecmwf_df)})...')
# ecmwf_df.to_parquet('processed/ecmwf.parquet')
# # Delete any files ending in .idx from the raw folder
# for file in os.listdir("raw"):
#     if file.endswith(".idx"):
#         os.remove(os.path.join("raw", file))


# ### ACCESS-S2 ###

# # Define the output file path
# output_file = 'processed/access.parquet'

# # Define the path and file pattern
# path = "/g/data/ux62/access-s2/hindcast/calibrated/atmos/pr/daily/e09/"

# # Check if the output file already exists and read it if it does
# if os.path.exists(output_file):
#     master_df = pd.read_parquet(output_file)
#     # Extract already processed years
#     processed_years = master_df['time'].dt.year.unique()
# else:
#     # Initialise an empty DataFrame if the file does not exist
#     master_df = pd.DataFrame()
#     processed_years = []

# # Generate file patterns for each year from 1983 to 2018 and get matching files
# files = []
# for year in range(1983, 2018):
#     if year not in processed_years:
#         pattern = f"*pr_{year}*.nc"
#         files.extend(glob(os.path.join(path, pattern)))

# print(f"Processing data for years: {set(range(1983, 2018)) - set(processed_years)}")

# # Loop through the list of files and load each one
# for file in tqdm(files):
#     # Load the xarray dataset
#     ds = xr.open_dataset(file)

#     # Slice the dataset for three specific lat/lon grids
#     ds_sliced1 = ds.sel(lon=slice(142, 145), lat=slice(-25, -22))
#     ds_sliced2 = ds.sel(lon=slice(150, 153), lat=slice(-29, -26))
#     ds_sliced3 = ds.sel(lon=slice(143, 146), lat=slice(-20, -17))

    
#     # Flatten the sliced data for 'pr' variable for both slices
#     df1 = ds_sliced1['pr'].to_dataframe().reset_index()
#     df2 = ds_sliced2['pr'].to_dataframe().reset_index()
#     df3 = ds_sliced3['pr'].to_dataframe().reset_index()

#     # Concatenate the two DataFrames
#     combined_df = pd.concat([df1, df2, df3], ignore_index=True)
    
#     # Filter rows where latitude and longitude are integers
#     combined_df = combined_df[combined_df['lat'].apply(lambda x: x.is_integer())]
#     combined_df = combined_df[combined_df['lon'].apply(lambda x: x.is_integer())]


#     # Append the DataFrame to the master DataFrame
#     master_df = pd.concat([master_df, combined_df], ignore_index=True)
    
#     # Close the xarray dataset
#     ds.close()

#     # Save the updated master_df to the Parquet file
#     os.makedirs(os.path.dirname(output_file), exist_ok=True)
#     master_df.to_parquet(output_file)

#     # Print the processed year for tracking
#     processed_year = pd.to_datetime(master_df['time'].max()).year
#     print(f"Year {processed_year} processed and saved.")

# # After the loop, perform any final processing needed on master_df
# if os.path.exists(output_file):
#     master_df = pd.read_parquet(output_file)
#     master_df['time'] = pd.to_datetime(master_df['time'])

#     # Group by time, lat, and lon, then sum the pr values
#     deduped_df = master_df.groupby(['time', 'lat', 'lon']).agg({'pr': 'sum'}).reset_index()

#     # Save the final processed DataFrame to a Parquet file
#     deduped_df.to_parquet(output_file)
#     print(f"Final file saved to {output_file}")
# else:
#     print(f"No data processed. {output_file} does not exist.")


# def create_master_parquet():
#     files = ['access', 'ecmwf', 'glosea5', 'silo']
#     frames = []
#     for file in files:
#         df = pd.read_parquet(f'processed/{file}.parquet')
#         df['model'] = file
#         frames.append(df)

#     access = frames[0]
#     access.reset_index(inplace=True, drop=True)
#     columns = access.columns
#     # Convert time to string
#     access['time'] = access['time'].astype(str)

#     ecmwf = frames[1]
#     ecmwf.rename(columns={'date': 'time', 'precip': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, inplace=True)
#     ecmwf = ecmwf[columns]
#     ecmwf.reset_index(inplace=True, drop=True)
#     # Convert time to string
#     ecmwf['time'] = ecmwf['time'].astype(str)

#     glosea = frames[2]
#     glosea.rename(columns={'date': 'time', 'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, inplace=True)
#     glosea = glosea[columns]
#     glosea.reset_index(inplace=True, drop=True)
#     # Convert time to string
#     glosea['time'] = glosea['time'].astype(str)

#     silo = frames[3]
#     silo.rename(columns={'daily_rain': 'pr'}, inplace=True)
#     silo = silo[columns]
#     # Convert lat and lon to float32
#     silo['lat'] = silo['lat'].astype('float32')
#     silo['lon'] = silo['lon'].astype('float32')
#     silo.reset_index(inplace=True, drop=True)
#     # Convert time to string
#     silo['time'] = silo['time'].astype(str)

#     dfs = [access, ecmwf, glosea, silo]
#     master_df = pd.concat(dfs)
#     master_df.reset_index(inplace=True, drop=True)

#     master_df.to_parquet('processed/master.parquet')
#     print(f"Final file saved to processed/master.parquet")

import pandas as pd

def standardize_df(df, rename_dict, default_columns):
    """Standardize the DataFrame structure."""
    df = df.rename(columns=rename_dict)
    df = df[default_columns]
    df.reset_index(inplace=True, drop=True)
    df['time'] = df['time'].astype(str)
    return df

def create_master_parquet():
    files = ['access', 'ecmwf', 'glosea5', 'silo']
    rename_dicts = [
        {}, 
        {'date': 'time', 'precip': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, 
        {'date': 'time', 'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, 
        {'daily_rain': 'pr'}
    ]

    # Read and append the 'model' column to each DataFrame
    frames = []
    for file, rename_dict in zip(files, rename_dicts):
        df = pd.read_parquet(f'processed/{file}.parquet')
        df['model'] = file
        df = standardize_df(df, rename_dict, default_columns=['time', 'lat', 'lon', 'pr', 'model'])
        frames.append(df)

    # Use the first DataFrame (access) as a template for column names
    columns = frames[0].columns

    # Standardize each DataFrame
    for i in range(1, len(frames)):
        frames[i] = standardize_df(frames[i], rename_dicts[i], columns)

    master_df = pd.concat(frames)
    master_df.reset_index(inplace=True, drop=True)
    master_df.to_parquet('processed/master.parquet')
    print("Final file saved to processed/master.parquet")

create_master_parquet()