Olas-predict-dataset / scripts /cloud_storage.py
Skanislav
chore: fixes for cloud storage
4409d1e
from minio import Minio
from minio.error import S3Error
import os
import argparse
import pandas as pd
from dotenv import load_dotenv
from datetime import datetime
from utils import HIST_DIR, ROOT_DIR, TMP_DIR
load_dotenv()
MINIO_ENDPOINT = "minio.autonolas.tech"
ACCESS_KEY = os.environ.get("CLOUD_ACCESS_KEY", None)
SECRET_KEY = os.environ.get("CLOUD_SECRET_KEY", None)
BUCKET_NAME = "weekly-stats"
FOLDER_NAME = "historical_data"
FILES_IN_TWO_MONTHS = 16 # 2 files per week
FILES_IN_FOUR_MONTHS = 30 # four months ago we did not have two files per week but one
FILES_IN_SIX_MONTHS = 40 # 1 file per week
FILES_IN_EIGHT_MONTHS = 48
FILES_IN_TEN_MONTHS = 56
def initialize_client():
# Initialize the MinIO client
client = Minio(
MINIO_ENDPOINT,
access_key=ACCESS_KEY,
secret_key=SECRET_KEY,
secure=True, # Set to False if not using HTTPS
)
return client
def upload_file(
client, filename: str, file_path: str, extra_folder: str = None
) -> bool:
"""Upload a file to the bucket"""
try:
if extra_folder is not None:
OBJECT_NAME = FOLDER_NAME + "/" + extra_folder + "/" + filename
else:
OBJECT_NAME = FOLDER_NAME + "/" + filename
print(
f"filename={filename}, object_name={OBJECT_NAME} and file_path={file_path}"
)
client.fput_object(
BUCKET_NAME, OBJECT_NAME, file_path, part_size=10 * 1024 * 1024
) # 10MB parts
print(f"File '{file_path}' uploaded as '{OBJECT_NAME}'.")
return True
except S3Error as err:
print(f"Error uploading file: {err}")
return False
def download_file(client, filename: str):
"""Download the file back"""
try:
OBJECT_NAME = FOLDER_NAME + "/" + filename
file_path = filename
client.fget_object(BUCKET_NAME, OBJECT_NAME, "downloaded_" + file_path)
print(f"File '{OBJECT_NAME}' downloaded as 'downloaded_{file_path}'.")
except S3Error as err:
print(f"Error downloading file: {err}")
def load_historical_file(client, filename: str, extra_folder: str = None) -> bool:
"""Function to load one file into the cloud storage"""
file_path = filename
file_path = HIST_DIR / filename
return upload_file(client, filename, file_path, extra_folder)
def upload_historical_file(filename: str):
client = initialize_client()
load_historical_file(client=client, filename=filename)
def process_historical_files(client):
"""Process all parquet files in historical_data folder"""
# Walk through all files in the folder
for filename in os.listdir(HIST_DIR):
# Check if file is a parquet file
if filename.endswith(".parquet"):
try:
if load_historical_file(client, filename):
print(f"Successfully processed {filename}")
else:
print("Error loading the files")
except Exception as e:
print(f"Error processing {filename}: {str(e)}")
def download_tools_historical_files(client, skip_files_count: int) -> pd.DataFrame:
"""Download the last nr_files tools files from the cloud storage"""
try:
nr_files = skip_files_count + 2
print(f"Downloading the last {nr_files} tools files from cloud storage")
# Use recursive=True to get all objects including those in subfolders
objects = client.list_objects(
BUCKET_NAME, prefix=FOLDER_NAME + "/", recursive=True
)
all_objects = list(objects)
print(f"Total objects found: {len(all_objects)}")
tool_files = [
obj.object_name
for obj in all_objects
if obj.object_name.endswith(".parquet") and "tools" in obj.object_name
]
print(f"tool files found: {tool_files}")
if len(tool_files) < nr_files - 1: # at least one file to collect
return None
# format of the filename is tools_YYYYMMDD_HHMMSS.parquet
# get the last nr_files by sorting the tool_files by the YYYYMMDD_HHMMSS part
tool_files.sort() # Sort files by name (assumed to be timestamped)
selected_files = tool_files[-nr_files:] # Get the last nr_files
print(f"Selected files: {selected_files}")
# traverse the selected files in reverse order
selected_files.reverse()
# skip the first FILES_IN_TWO_MONTHS files
selected_files = selected_files[skip_files_count:] # limit to last two months
for filename in selected_files:
# if exclude_filename and exclude_filename in filename:
# continue
local_filename = filename.replace("historical_data/", "")
print(f"Downloading {local_filename}")
download_path = TMP_DIR / local_filename
client.fget_object(BUCKET_NAME, filename, str(download_path))
return local_filename
except S3Error as err:
print(f"Error downloading files: {err}")
return None
if __name__ == "__main__":
# parser = argparse.ArgumentParser(
# description="Load files to the cloud storate for historical data"
# )
# parser.add_argument("param_1", type=str, help="Name of the file to upload")
# # Parse the arguments
# args = parser.parse_args()
# filename = args.param_1
client = initialize_client()
# download_file(client, "all_trades_profitability_20250103_162106.parquet")
download_tools_historical_files(client, skip_files_count=0)
# load_historical_file(client, "all_trades_profitability_20250826_102759.parquet")
# process_historical_files(client)
# checking files at the cloud storage
# files = ["data_delivers_22_04_2024.csv", "data_tools_22_04_2024.csv"]
# for old_file in files:
# # download_file(client=client, filename=tools_file)
# load_historical_file(
# client=client, filename=old_file, extra_folder=APRIL_FOLDER
# )