|
|
|
|
|
|
|
|
import os |
|
|
import datetime |
|
|
|
|
|
def clean_data(): |
|
|
""" |
|
|
Reads a raw text file, cleans it, and saves it to the processed data folder. |
|
|
""" |
|
|
|
|
|
|
|
|
current_date = datetime.datetime.now().strftime("%Y-%m-%d") |
|
|
raw_filename = f"bbc_nepali_articles_{current_date}.txt" |
|
|
cleaned_filename = f"bbc_nepali_articles_{current_date}_cleaned.txt" |
|
|
|
|
|
|
|
|
raw_file_path = os.path.join("data", "raw", raw_filename) |
|
|
processed_file_path = os.path.join("data", "processed", cleaned_filename) |
|
|
|
|
|
|
|
|
MIN_WORDS_PER_LINE = 5 |
|
|
|
|
|
|
|
|
print("--- Starting data cleaning process ---") |
|
|
|
|
|
|
|
|
if not os.path.exists(raw_file_path): |
|
|
print(f"Error: Raw data file not found at '{raw_file_path}'") |
|
|
print("Please run the scraping script first.") |
|
|
return |
|
|
|
|
|
print(f"Reading raw data from: {raw_file_path}") |
|
|
|
|
|
|
|
|
with open(raw_file_path, "r", encoding="utf-8") as f: |
|
|
lines = f.readlines() |
|
|
|
|
|
cleaned_lines = [] |
|
|
for line in lines: |
|
|
|
|
|
text = line.strip() |
|
|
|
|
|
|
|
|
|
|
|
if text and len(text.split()) >= MIN_WORDS_PER_LINE: |
|
|
cleaned_lines.append(text) |
|
|
|
|
|
|
|
|
print(f"Saving cleaned data to: {processed_file_path}") |
|
|
os.makedirs(os.path.dirname(processed_file_path), exist_ok=True) |
|
|
with open(processed_file_path, "w", encoding="utf-8") as f: |
|
|
f.write("\n".join(cleaned_lines)) |
|
|
|
|
|
|
|
|
print("\n--- Cleaning Summary ---") |
|
|
print(f"Total lines read: {len(lines)}") |
|
|
print(f"Lines after cleaning: {len(cleaned_lines)}") |
|
|
print(f"Lines discarded: {len(lines) - len(cleaned_lines)}") |
|
|
print("------------------------") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
clean_data() |