Taiwan-Netizen / prepare /convert.py
hibana2077
Enhance convert.py to save DataFrame as CSV; add load_test.py for testing Parquet file
1f9bc59
import os
import pandas as pd
def read_text_file(file_path):
"""
Read a text file and return its content and filename.
Args:
file_path (str): Path to the text file
Returns:
list: List of tuples containing (text_line, file_origin)
"""
try:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
# Split content by newline character
lines = content.split('\n')
# Get origin name from filename without extension
origin = os.path.basename(file_path)
if '.' in origin:
origin = origin.split('.')[0]
# Create list of tuples (text, origin)
result = [(line, origin) for line in lines if line.strip()]
return result
except Exception as e:
print(f"Error processing {file_path}: {e}")
return []
def main():
# List of files to process
files = ["fb.txt", "threads.txt", "tbrain.txt", "ptt.txt", "dcard.txt", "discord.txt"]
# Collect all data
all_data = []
# Process each file
for file_path in files:
if os.path.exists(file_path):
print(f"Processing {file_path}...")
file_data = read_text_file(file_path)
all_data.extend(file_data)
else:
print(f"File not found: {file_path}")
# Create DataFrame
df = pd.DataFrame(all_data, columns=['text', 'origin'])
df.drop_duplicates(subset=['text'], inplace=True)
df.reset_index(drop=True, inplace=True)
print(f"Total unique lines: {len(df)}")
# Save to CSV
csv_path = "train.csv"
df.to_csv(csv_path, index=False)
print(f"CSV file created: {csv_path}")
# Save to Parquet
parquet_path = "train.parquet"
df.to_parquet(parquet_path, index=False)
print(f"Parquet file created: {parquet_path}")
# Print summary
print("\nSummary:")
print(f"Total rows: {len(df)}")
print("Rows by origin:")
print(df['origin'].value_counts())
if __name__ == "__main__":
main()