Datasets:

ArXiv:
License:
cuiqiang
CataTQA processed data
6f49b2b verified
import os
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def clean_and_analyze_csv_files(input_path, output_path):
""" Data preprocessing """
for filename in os.listdir(input_path):
if filename.endswith('.csv'):
file_path = os.path.join(input_path, filename)
df = pd.read_csv(file_path)
print(f"processing: {filename}")
df = df.dropna(how='all', axis=0).dropna(how='all', axis=1)
rows, columns = df.shape
missing_rate = df.isnull().sum().sum() / (rows * columns)
print(f"filename: {filename}")
print(f"rows: {rows}")
print(f"columns: {columns}")
print(f"missing_rate: {missing_rate:.2%}")
print("-" * 30)
path = os.path.join(output_path, filename)
df.to_csv(path, index=False)
def analyze_csv_folder(folder_path):
""" Data info """
total_memory = 0
total_rows = 0
total_columns = 0
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.csv'):
file_path = os.path.join(root, file)
df = pd.read_csv(file_path)
memory_usage = df.memory_usage(deep=True).sum()
total_memory += memory_usage
rows = df.shape[0]
total_rows += rows
columns = df.shape[1]
total_columns += columns
print(f"Total memory size occupied by all CSV files: {total_memory}")
print(f"Total rows in all CSV files: {total_rows}")
print(f"Total total_columns in all CSV files: {total_columns}")
if __name__ == '__main__':
input_path = '../metadata/table_data/'
output_path = './output'
clean_and_analyze_csv_files(input_path, output_path)
analyze_csv_folder('output')