Datasets:
ArXiv:
License:
File size: 1,930 Bytes
6f49b2b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | import os
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def clean_and_analyze_csv_files(input_path, output_path):
""" Data preprocessing """
for filename in os.listdir(input_path):
if filename.endswith('.csv'):
file_path = os.path.join(input_path, filename)
df = pd.read_csv(file_path)
print(f"processing: {filename}")
df = df.dropna(how='all', axis=0).dropna(how='all', axis=1)
rows, columns = df.shape
missing_rate = df.isnull().sum().sum() / (rows * columns)
print(f"filename: {filename}")
print(f"rows: {rows}")
print(f"columns: {columns}")
print(f"missing_rate: {missing_rate:.2%}")
print("-" * 30)
path = os.path.join(output_path, filename)
df.to_csv(path, index=False)
def analyze_csv_folder(folder_path):
""" Data info """
total_memory = 0
total_rows = 0
total_columns = 0
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.csv'):
file_path = os.path.join(root, file)
df = pd.read_csv(file_path)
memory_usage = df.memory_usage(deep=True).sum()
total_memory += memory_usage
rows = df.shape[0]
total_rows += rows
columns = df.shape[1]
total_columns += columns
print(f"Total memory size occupied by all CSV files: {total_memory}")
print(f"Total rows in all CSV files: {total_rows}")
print(f"Total total_columns in all CSV files: {total_columns}")
if __name__ == '__main__':
input_path = '../metadata/table_data/'
output_path = './output'
clean_and_analyze_csv_files(input_path, output_path)
analyze_csv_folder('output')
|