File size: 2,092 Bytes
cdc58bb 39c419c cdc58bb 1f9bc59 cdc58bb 69d82e1 cdc58bb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import os
import pandas as pd
def read_text_file(file_path):
"""
Read a text file and return its content and filename.
Args:
file_path (str): Path to the text file
Returns:
list: List of tuples containing (text_line, file_origin)
"""
try:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
# Split content by newline character
lines = content.split('\n')
# Get origin name from filename without extension
origin = os.path.basename(file_path)
if '.' in origin:
origin = origin.split('.')[0]
# Create list of tuples (text, origin)
result = [(line, origin) for line in lines if line.strip()]
return result
except Exception as e:
print(f"Error processing {file_path}: {e}")
return []
def main():
# List of files to process
files = ["fb.txt", "threads.txt", "tbrain.txt", "ptt.txt", "dcard.txt", "discord.txt"]
# Collect all data
all_data = []
# Process each file
for file_path in files:
if os.path.exists(file_path):
print(f"Processing {file_path}...")
file_data = read_text_file(file_path)
all_data.extend(file_data)
else:
print(f"File not found: {file_path}")
# Create DataFrame
df = pd.DataFrame(all_data, columns=['text', 'origin'])
df.drop_duplicates(subset=['text'], inplace=True)
df.reset_index(drop=True, inplace=True)
print(f"Total unique lines: {len(df)}")
# Save to CSV
csv_path = "train.csv"
df.to_csv(csv_path, index=False)
print(f"CSV file created: {csv_path}")
# Save to Parquet
parquet_path = "train.parquet"
df.to_parquet(parquet_path, index=False)
print(f"Parquet file created: {parquet_path}")
# Print summary
print("\nSummary:")
print(f"Total rows: {len(df)}")
print("Rows by origin:")
print(df['origin'].value_counts())
if __name__ == "__main__":
main() |