| from tqdm import tqdm | |
| import requests | |
| import os | |
| import xarray as xr | |
| import pandas as pd | |
| from glob import glob | |
| import cfgrib | |
| # ### GLOSEA5 ### | |
| # path = "raw/glosea.grib" | |
| # print("Processing Glosea5...") | |
| # # Open the GRIB file as an xarray dataset | |
| # ds_1 = cfgrib.open_datasets("raw/glosea_1.grib") | |
| # ds_2 = cfgrib.open_datasets("raw/glosea_2.grib") | |
| # ds_3 = cfgrib.open_datasets("raw/glosea_3.grib") | |
| # glosea_xarray_1, glosea_xarray_2, glosea_xarray_3 = ds_1[0], ds_2[0], ds_3[0] | |
| # # Convert to pandas DataFrame | |
| # df_1 = glosea_xarray_1.to_dataframe().reset_index() | |
| # df_2 = glosea_xarray_2.to_dataframe().reset_index() | |
| # df_3 = glosea_xarray_3.to_dataframe().reset_index() | |
| # # Concatenate the two DataFrames | |
| # glosea_df = pd.concat([df_1, df_2, df_3], ignore_index=True) | |
| # # Convert tprate NaN to 0.0 | |
| # glosea_df['tprate'] = glosea_df['tprate'].fillna(0.0) | |
| # # Aggregate across steps | |
| # glosea_df = glosea_df.groupby(['valid_time', 'latitude', 'longitude'])['tprate'].mean().reset_index() | |
| # # Keep between 1981 and 2019 | |
| # glosea_df = glosea_df[glosea_df['valid_time'].between('1981-01-01', '2018-12-31')] | |
| # # Rename columns | |
| # glosea_df.rename(columns={'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon', 'valid_time': 'time'}, inplace=True) | |
| # glosea_df.reset_index(inplace=True, drop=True) | |
| # print(f'Saving Glosea5 data to parquet (length of dataframe = {len(glosea_df)})...') | |
| # glosea_df.to_parquet('processed/glosea5.parquet') | |
| # # Delete any files ending in .idx from the raw folder | |
| # for file in os.listdir("raw"): | |
| # if file.endswith(".idx"): | |
| # os.remove(os.path.join("raw", file)) | |
| # ### ECMWF ### | |
| # path = "raw/ecmwf.grib" | |
| # print("Processing ECMWF...") | |
| # # Open the GRIB file as an xarray dataset | |
| # ds_1 = cfgrib.open_datasets("raw/ecmwf_1.grib") | |
| # ds_2 = cfgrib.open_datasets("raw/ecmwf_2.grib") | |
| # ds_3 = cfgrib.open_datasets("raw/ecmwf_3.grib") | |
| # ecmwf_xarray_1, ecmwf_xarray_2, ecmwf_xarray_3 = ds_1[0], ds_2[0], ds_3[0] | |
| # # Convert to pandas DataFrame | |
| # df_1 = ecmwf_xarray_1.to_dataframe().reset_index() | |
| # df_2 = ecmwf_xarray_2.to_dataframe().reset_index() | |
| # df_3 = ecmwf_xarray_3.to_dataframe().reset_index() | |
| # # Concatenate the two DataFrames | |
| # ecmwf_df = pd.concat([df_1, df_2, df_3], ignore_index=True) | |
| # # Convert tprate NaN to 0.0 | |
| # ecmwf_df['tprate'] = ecmwf_df['tprate'].fillna(0.0) | |
| # # Aggregate across steps | |
| # ecmwf_df = ecmwf_df.groupby(['valid_time', 'latitude', 'longitude'])['tprate'].mean().reset_index() | |
| # # Keep between 1981 and 2019 | |
| # ecmwf_df = ecmwf_df[ecmwf_df['valid_time'].between('1981-01-01', '2018-12-31')] | |
| # # Rename columns | |
| # ecmwf_df.rename(columns={'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon', 'valid_time': 'time'}, inplace=True) | |
| # ecmwf_df.reset_index(inplace=True, drop=True) | |
| # print(f'Saving ECMWF data to parquet (length of dataframe = {len(ecmwf_df)})...') | |
| # ecmwf_df.to_parquet('processed/ecmwf.parquet') | |
| # # Delete any files ending in .idx from the raw folder | |
| # for file in os.listdir("raw"): | |
| # if file.endswith(".idx"): | |
| # os.remove(os.path.join("raw", file)) | |
| # ### ACCESS-S2 ### | |
| # # Define the output file path | |
| # output_file = 'processed/access.parquet' | |
| # # Define the path and file pattern | |
| # path = "/g/data/ux62/access-s2/hindcast/calibrated/atmos/pr/daily/e09/" | |
| # # Check if the output file already exists and read it if it does | |
| # if os.path.exists(output_file): | |
| # master_df = pd.read_parquet(output_file) | |
| # # Extract already processed years | |
| # processed_years = master_df['time'].dt.year.unique() | |
| # else: | |
| # # Initialise an empty DataFrame if the file does not exist | |
| # master_df = pd.DataFrame() | |
| # processed_years = [] | |
| # # Generate file patterns for each year from 1983 to 2018 and get matching files | |
| # files = [] | |
| # for year in range(1983, 2018): | |
| # if year not in processed_years: | |
| # pattern = f"*pr_{year}*.nc" | |
| # files.extend(glob(os.path.join(path, pattern))) | |
| # print(f"Processing data for years: {set(range(1983, 2018)) - set(processed_years)}") | |
| # # Loop through the list of files and load each one | |
| # for file in tqdm(files): | |
| # # Load the xarray dataset | |
| # ds = xr.open_dataset(file) | |
| # # Slice the dataset for three specific lat/lon grids | |
| # ds_sliced1 = ds.sel(lon=slice(142, 145), lat=slice(-25, -22)) | |
| # ds_sliced2 = ds.sel(lon=slice(150, 153), lat=slice(-29, -26)) | |
| # ds_sliced3 = ds.sel(lon=slice(143, 146), lat=slice(-20, -17)) | |
| # # Flatten the sliced data for 'pr' variable for both slices | |
| # df1 = ds_sliced1['pr'].to_dataframe().reset_index() | |
| # df2 = ds_sliced2['pr'].to_dataframe().reset_index() | |
| # df3 = ds_sliced3['pr'].to_dataframe().reset_index() | |
| # # Concatenate the two DataFrames | |
| # combined_df = pd.concat([df1, df2, df3], ignore_index=True) | |
| # # Filter rows where latitude and longitude are integers | |
| # combined_df = combined_df[combined_df['lat'].apply(lambda x: x.is_integer())] | |
| # combined_df = combined_df[combined_df['lon'].apply(lambda x: x.is_integer())] | |
| # # Append the DataFrame to the master DataFrame | |
| # master_df = pd.concat([master_df, combined_df], ignore_index=True) | |
| # # Close the xarray dataset | |
| # ds.close() | |
| # # Save the updated master_df to the Parquet file | |
| # os.makedirs(os.path.dirname(output_file), exist_ok=True) | |
| # master_df.to_parquet(output_file) | |
| # # Print the processed year for tracking | |
| # processed_year = pd.to_datetime(master_df['time'].max()).year | |
| # print(f"Year {processed_year} processed and saved.") | |
| # # After the loop, perform any final processing needed on master_df | |
| # if os.path.exists(output_file): | |
| # master_df = pd.read_parquet(output_file) | |
| # master_df['time'] = pd.to_datetime(master_df['time']) | |
| # # Group by time, lat, and lon, then sum the pr values | |
| # deduped_df = master_df.groupby(['time', 'lat', 'lon']).agg({'pr': 'sum'}).reset_index() | |
| # # Save the final processed DataFrame to a Parquet file | |
| # deduped_df.to_parquet(output_file) | |
| # print(f"Final file saved to {output_file}") | |
| # else: | |
| # print(f"No data processed. {output_file} does not exist.") | |
| # def create_master_parquet(): | |
| # files = ['access', 'ecmwf', 'glosea5', 'silo'] | |
| # frames = [] | |
| # for file in files: | |
| # df = pd.read_parquet(f'processed/{file}.parquet') | |
| # df['model'] = file | |
| # frames.append(df) | |
| # access = frames[0] | |
| # access.reset_index(inplace=True, drop=True) | |
| # columns = access.columns | |
| # # Convert time to string | |
| # access['time'] = access['time'].astype(str) | |
| # ecmwf = frames[1] | |
| # ecmwf.rename(columns={'date': 'time', 'precip': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, inplace=True) | |
| # ecmwf = ecmwf[columns] | |
| # ecmwf.reset_index(inplace=True, drop=True) | |
| # # Convert time to string | |
| # ecmwf['time'] = ecmwf['time'].astype(str) | |
| # glosea = frames[2] | |
| # glosea.rename(columns={'date': 'time', 'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, inplace=True) | |
| # glosea = glosea[columns] | |
| # glosea.reset_index(inplace=True, drop=True) | |
| # # Convert time to string | |
| # glosea['time'] = glosea['time'].astype(str) | |
| # silo = frames[3] | |
| # silo.rename(columns={'daily_rain': 'pr'}, inplace=True) | |
| # silo = silo[columns] | |
| # # Convert lat and lon to float32 | |
| # silo['lat'] = silo['lat'].astype('float32') | |
| # silo['lon'] = silo['lon'].astype('float32') | |
| # silo.reset_index(inplace=True, drop=True) | |
| # # Convert time to string | |
| # silo['time'] = silo['time'].astype(str) | |
| # dfs = [access, ecmwf, glosea, silo] | |
| # master_df = pd.concat(dfs) | |
| # master_df.reset_index(inplace=True, drop=True) | |
| # master_df.to_parquet('processed/master.parquet') | |
| # print(f"Final file saved to processed/master.parquet") | |
| import pandas as pd | |
| def standardize_df(df, rename_dict, default_columns): | |
| """Standardize the DataFrame structure.""" | |
| df = df.rename(columns=rename_dict) | |
| df = df[default_columns] | |
| df.reset_index(inplace=True, drop=True) | |
| df['time'] = df['time'].astype(str) | |
| return df | |
| def create_master_parquet(): | |
| files = ['access', 'ecmwf', 'glosea5', 'silo'] | |
| rename_dicts = [ | |
| {}, | |
| {'date': 'time', 'precip': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, | |
| {'date': 'time', 'tprate': 'pr', 'latitude': 'lat', 'longitude': 'lon'}, | |
| {'daily_rain': 'pr'} | |
| ] | |
| # Read and append the 'model' column to each DataFrame | |
| frames = [] | |
| for file, rename_dict in zip(files, rename_dicts): | |
| df = pd.read_parquet(f'processed/{file}.parquet') | |
| df['model'] = file | |
| df = standardize_df(df, rename_dict, default_columns=['time', 'lat', 'lon', 'pr', 'model']) | |
| frames.append(df) | |
| # Use the first DataFrame (access) as a template for column names | |
| columns = frames[0].columns | |
| # Standardize each DataFrame | |
| for i in range(1, len(frames)): | |
| frames[i] = standardize_df(frames[i], rename_dicts[i], columns) | |
| master_df = pd.concat(frames) | |
| master_df.reset_index(inplace=True, drop=True) | |
| master_df.to_parquet('processed/master.parquet') | |
| print("Final file saved to processed/master.parquet") | |
| create_master_parquet() |