mal_char / extract_bio_old.py
charsiu's picture
Upload 2 files
bcd0bca verified
import os
import json
from bs4 import BeautifulSoup
from concurrent.futures import ProcessPoolExecutor, as_completed
def extract_biography_from_html(soup):
header = soup.find("h2", class_="normal_header")
if not header:
return None, ""
name = header.get_text(strip=True)
# Extract bio content after the header
bio_parts = []
current = header.next_sibling
while current:
if str(current).strip() == "":
current = current.next_sibling
continue
if getattr(current, 'name', None) == "div":
break
if getattr(current, 'name', None) == "br":
bio_parts.append("\n")
else:
bio_parts.append(current.get_text(strip=True) if hasattr(current, "get_text") else str(current).strip())
current = current.next_sibling
biography = " ".join(bio_parts).replace("\n\n", "\n").strip()
# If it's a placeholder, return an empty string
if "no biography written" in biography.lower():
biography = ""
return name, biography
def extract_animeography_from_html(soup):
anime_list = []
anime_section = soup.find("div", class_="normal_header", string="Animeography")
if not anime_section:
return anime_list
table = anime_section.find_next("table")
if not table:
return anime_list
rows = table.find_all("tr")
for row in rows:
cells = row.find_all("td")
if len(cells) < 2:
continue
anime_link_tag = cells[1].find("a", href=True)
role_tag = cells[1].find("small")
if anime_link_tag:
anime_title = anime_link_tag.text.strip()
anime_url = anime_link_tag['href']
role = role_tag.text.strip() if role_tag else "Unknown"
anime_list.append({
"title": anime_title,
"url": anime_url,
"role": role
})
return anime_list
def process_single_file(args):
input_path, output_path = args
try:
with open(input_path, "r", encoding="utf-8") as file:
html_content = file.read()
soup = BeautifulSoup(html_content, "html.parser")
name, bio = extract_biography_from_html(soup)
animeography = extract_animeography_from_html(soup)
if name:
data = {
"name": name,
"biography": bio,
"animeography": animeography
}
with open(output_path, "w", encoding="utf-8") as json_file:
json.dump(data, json_file, indent=2, ensure_ascii=False)
return f"✅ Saved: {os.path.basename(output_path)}"
else:
return f"⚠️ Skipped (no name found): {os.path.basename(input_path)}"
except Exception as e:
return f"❌ Error processing {os.path.basename(input_path)}: {e}"
def process_all_html_files_parallel(input_dir, output_dir, max_workers=None):
os.makedirs(output_dir, exist_ok=True)
tasks = []
for filename in os.listdir(input_dir):
if filename.endswith(".html"):
input_path = os.path.join(input_dir, filename)
output_filename = os.path.splitext(filename)[0] + ".json"
output_path = os.path.join(output_dir, output_filename)
tasks.append((input_path, output_path))
with ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(process_single_file, task) for task in tasks]
for future in as_completed(futures):
print(future.result())
# Update these paths as needed
input_folder = "output/mal_char_ind_pages"
output_folder = "output/char_bio_json"
# Run with default CPU core count (or specify max_workers=4, etc.)
process_all_html_files_parallel(input_folder, output_folder, max_workers=4)