import xml.etree.ElementTree as ET from pathlib import Path from PIL import Image import pandas as pd from datasets import Dataset, Features, Image as HFImage, Value from huggingface_hub import HfApi def parse_kanjidic(xml_file_path): """ Parses the kanjidic2.xml file to extract kanji literals and their English meanings. Args: xml_file_path (str or Path): Path to the kanjidic2.xml file. Returns: dict: A dictionary mapping each kanji character to a list of its English meanings. """ print(f"Parsing {xml_file_path}...") try: tree = ET.parse(xml_file_path) root = tree.getroot() except (ET.ParseError, FileNotFoundError) as e: print(f"Error reading or parsing XML file: {e}") return {} kanji_meanings = {} # The kanjidic2.xml file contains tags for each kanji[6] for char_element in root.findall('character'): literal = char_element.find('literal').text meanings = [] # Meanings are found within reading_meaning -> rmgroup -> meaning rmgroup = char_element.find('reading_meaning/rmgroup') if rmgroup is not None: # We select meanings without an 'm_lang' attribute, as they are English by default for meaning_element in rmgroup.findall('meaning'): if 'm_lang' not in meaning_element.attrib: meanings.append(meaning_element.text) if literal and meanings: kanji_meanings[literal] = meanings print(f"Successfully parsed {len(kanji_meanings)} kanji characters.") return kanji_meanings def create_kanji_dataset(image_dir, kanji_data): """ Creates a Hugging Face Dataset from a directory of kanji images and parsed kanji meanings. Args: image_dir (str or Path): Directory containing the rendered kanji images. kanji_data (dict): Dictionary of kanji meanings from parse_kanjidic. Returns: datasets.Dataset: The final Hugging Face Dataset object. """ image_dir = Path(image_dir) records = [] print(f"Processing images from {image_dir.resolve()}...") image_paths = list(image_dir.glob('*.png')) if not image_paths: print("No PNG images found in the specified directory.") return None for img_path in image_paths: # Your rendering script saves files as U+.png hex_codepoint = img_path.stem.split('U+')[-1] try: # Convert hex codepoint to the actual kanji character kanji_char = chr(int(hex_codepoint, 16)) except ValueError: print(f"Skipping invalid filename: {img_path.name}") continue if kanji_char in kanji_data: # Concatenate meanings into a single descriptive string text_description = ", ".join(kanji_data[kanji_char]) records.append({ "image": str(img_path), "text": text_description }) if not records: print("No matching data found. Ensure image filenames correspond to kanji in the XML.") return None print(f"Created {len(records)} records for the dataset.") # Create a DataFrame first, then convert to a Dataset df = pd.DataFrame(records) # Define the dataset features to ensure correct type handling, especially for images[7] features = Features({ 'image': HFImage(decode=True), 'text': Value(dtype='string'), }) # Create the dataset from the pandas DataFrame dataset = Dataset.from_pandas(df, features=features) return dataset def main(): image_directory = "kanji_images" kanjidic_xml_path = "kanjidic2.xml" repo_id = "LLParallax/kanji-dataset" # --- Step 1: Parse Kanji Meanings --- kanji_meanings_data = parse_kanjidic(kanjidic_xml_path) if not kanji_meanings_data: return # --- Step 2: Create the Dataset Object --- kanji_dataset = create_kanji_dataset(image_directory, kanji_meanings_data) if kanji_dataset is None: return print("\nDataset created successfully. Sample record:") print(kanji_dataset[0]) # --- Step 3: Push to Hugging Face Hub --- # This will automatically convert the dataset to the Parquet format[5][7] print(f"\nPushing dataset to Hugging Face Hub at {repo_id}...") try: kanji_dataset.push_to_hub(repo_id, private=False) # Set to True for a private dataset print("Dataset pushed successfully!") print(f"View your dataset at: https://huggingface.co/datasets/{repo_id}") except Exception as e: print(f"Failed to push dataset to Hub: {e}") if __name__ == "__main__": main()