File size: 4,793 Bytes
52481c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import xml.etree.ElementTree as ET
from pathlib import Path
from PIL import Image
import pandas as pd
from datasets import Dataset, Features, Image as HFImage, Value
from huggingface_hub import HfApi


def parse_kanjidic(xml_file_path):
    """
    Parses the kanjidic2.xml file to extract kanji literals and their
    English meanings.

    Args:
        xml_file_path (str or Path): Path to the kanjidic2.xml file.

    Returns:
        dict: A dictionary mapping each kanji character to a list of its
              English meanings.
    """
    print(f"Parsing {xml_file_path}...")
    try:
        tree = ET.parse(xml_file_path)
        root = tree.getroot()
    except (ET.ParseError, FileNotFoundError) as e:
        print(f"Error reading or parsing XML file: {e}")
        return {}

    kanji_meanings = {}
    # The kanjidic2.xml file contains <character> tags for each kanji[6]
    for char_element in root.findall('character'):
        literal = char_element.find('literal').text
        meanings = []
        
        # Meanings are found within reading_meaning -> rmgroup -> meaning
        rmgroup = char_element.find('reading_meaning/rmgroup')
        if rmgroup is not None:
            # We select meanings without an 'm_lang' attribute, as they are English by default
            for meaning_element in rmgroup.findall('meaning'):
                if 'm_lang' not in meaning_element.attrib:
                    meanings.append(meaning_element.text)
        
        if literal and meanings:
            kanji_meanings[literal] = meanings
            
    print(f"Successfully parsed {len(kanji_meanings)} kanji characters.")
    return kanji_meanings

def create_kanji_dataset(image_dir, kanji_data):
    """
    Creates a Hugging Face Dataset from a directory of kanji images and
    parsed kanji meanings.

    Args:
        image_dir (str or Path): Directory containing the rendered kanji images.
        kanji_data (dict): Dictionary of kanji meanings from parse_kanjidic.

    Returns:
        datasets.Dataset: The final Hugging Face Dataset object.
    """
    image_dir = Path(image_dir)
    records = []

    print(f"Processing images from {image_dir.resolve()}...")
    image_paths = list(image_dir.glob('*.png'))
    if not image_paths:
        print("No PNG images found in the specified directory.")
        return None

    for img_path in image_paths:
        # Your rendering script saves files as U+<hex_codepoint>.png
        hex_codepoint = img_path.stem.split('U+')[-1]
        try:
            # Convert hex codepoint to the actual kanji character
            kanji_char = chr(int(hex_codepoint, 16))
        except ValueError:
            print(f"Skipping invalid filename: {img_path.name}")
            continue

        if kanji_char in kanji_data:
            # Concatenate meanings into a single descriptive string
            text_description = ", ".join(kanji_data[kanji_char])
            records.append({
                "image": str(img_path),
                "text": text_description
            })

    if not records:
        print("No matching data found. Ensure image filenames correspond to kanji in the XML.")
        return None
        
    print(f"Created {len(records)} records for the dataset.")
    
    # Create a DataFrame first, then convert to a Dataset
    df = pd.DataFrame(records)
    
    # Define the dataset features to ensure correct type handling, especially for images[7]
    features = Features({
        'image': HFImage(decode=True),
        'text': Value(dtype='string'),
    })
    
    # Create the dataset from the pandas DataFrame
    dataset = Dataset.from_pandas(df, features=features)
    
    return dataset

def main():
    image_directory = "kanji_images"
    kanjidic_xml_path = "kanjidic2.xml"
    repo_id = "LLParallax/kanji-dataset" 

    # --- Step 1: Parse Kanji Meanings ---
    kanji_meanings_data = parse_kanjidic(kanjidic_xml_path)
    if not kanji_meanings_data:
        return

    # --- Step 2: Create the Dataset Object ---
    kanji_dataset = create_kanji_dataset(image_directory, kanji_meanings_data)
    if kanji_dataset is None:
        return
        
    print("\nDataset created successfully. Sample record:")
    print(kanji_dataset[0])

    # --- Step 3: Push to Hugging Face Hub ---
    # This will automatically convert the dataset to the Parquet format[5][7]
    print(f"\nPushing dataset to Hugging Face Hub at {repo_id}...")
    try:
        kanji_dataset.push_to_hub(repo_id, private=False) # Set to True for a private dataset
        print("Dataset pushed successfully!")
        print(f"View your dataset at: https://huggingface.co/datasets/{repo_id}")
    except Exception as e:
        print(f"Failed to push dataset to Hub: {e}")

if __name__ == "__main__":
    main()