File size: 5,160 Bytes
9966db9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import os
import numpy as np
import pickle
from imutils import paths
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import logging
import sys
from PIL import Image

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

img_height, img_width = 28, 28

def get_data(imagePaths, csv_path, verbose=245):
    # Load the CSV mapping
    try:
        df = pd.read_csv(csv_path, encoding='utf-8')
        char_to_class = dict(zip(df['char'], df['class']))
        logger.info(f"Loaded CSV with {len(char_to_class)} character mappings")
        
    except Exception as e:
        logger.error(f"Error loading CSV: {e}")
        return None, None
    
    # initialize the list of features and labels
    data = []
    labels = []
    failed_images = 0
    successful_images = 0

    # loop over the input images
    for (i, imagePath) in enumerate(imagePaths):
        try:
            # Check if file exists and is readable
            if not os.path.exists(imagePath):
                failed_images += 1
                continue
            
            # Extract the folder name (character) from the path
            path_parts = imagePath.split(os.path.sep)
            if len(path_parts) < 2:
                failed_images += 1
                continue
                
            folder_name = path_parts[-2]  # Second to last part is folder name
            
            # Check if this character is in our mapping
            if folder_name not in char_to_class:
                failed_images += 1
                if failed_images <= 5:
                    logger.warning(f"Unmapped character: '{folder_name}' in path: {imagePath}")
                continue
            
            # Load and preprocess the image using PIL
            try:
                with Image.open(imagePath) as img:
                    # Convert to grayscale
                    if img.mode != 'L':
                        img = img.convert('L')
                    
                    # Resize to target dimensions
                    img = img.resize((img_width, img_height), Image.Resampling.LANCZOS)
                    
                    # Convert to numpy array and normalize
                    image_array = np.array(img, dtype=np.float32) / 255.0
                    
            except Exception as e:
                logger.debug(f"PIL failed to read {imagePath}: {e}")
                failed_images += 1
                continue
            
            # Get the class label from mapping
            label = char_to_class[folder_name]
            
            # Add to our dataset
            data.append(image_array)
            labels.append(label)
            successful_images += 1

            # show an update every 'verbose' images
            if verbose > 0 and successful_images > 0 and (successful_images) % verbose == 0:
                logger.info(f"Processed {successful_images} images successfully")

        except Exception as e:
            failed_images += 1
            if failed_images <= 5:  # Log first few errors
                logger.error(f"Error processing image {imagePath}: {e}")
            continue

    logger.info(f"Successfully processed {successful_images} images")
    logger.info(f"Failed to process {failed_images} images")
    
    if successful_images == 0:
        logger.error("No images were successfully processed!")
        return None, None

    # Convert to numpy arrays
    data = np.array(data)
    labels = np.array(labels)
    
    # Reshape data to add channel dimension (grayscale)
    data = data.reshape((data.shape[0], img_height, img_width, 1))
    
    # show some information on memory consumption of the images
    logger.info(f"Features matrix: {data.nbytes / (1024 * 1000.0):.1f}MB")
    logger.info(f"Number of classes: {len(np.unique(labels))}")
    logger.info(f"Total samples: {len(data)}")

    return data, labels

if __name__ == "__main__":
    # Get image paths
    dataset_path = './dataset_ka_kha'
    if not os.path.exists(dataset_path):
        logger.error(f"Dataset path {dataset_path} does not exist!")
        sys.exit(1)
        
    imagePaths = list(paths.list_images(dataset_path))
    logger.info(f"Found {len(imagePaths)} images in dataset")
    
    if len(imagePaths) == 0:
        logger.error("No images found in the dataset directory!")
        sys.exit(1)
    
    # Process the data using PIL instead of OpenCV
    data, labels = get_data(imagePaths, 'sample.csv', 20000)  # Reduced verbose for more frequent updates
    
    if data is None or labels is None:
        logger.error("Failed to prepare data!")
        sys.exit(1)
    
    # Save both data and labels
    os.makedirs('dataset_pickles', exist_ok=True)
    with open('dataset_pickles/tigrigna_dataset.pickle', 'wb') as f:
        pickle.dump((data, labels), f)
    
    logger.info("Data preparation complete. Saved to dataset_pickles/tigrigna_dataset.pickle")