import os import numpy as np import pickle from imutils import paths from sklearn.preprocessing import LabelEncoder import pandas as pd import logging import sys from PIL import Image # Set up logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) img_height, img_width = 28, 28 def get_data(imagePaths, csv_path, verbose=245): # Load the CSV mapping try: df = pd.read_csv(csv_path, encoding='utf-8') char_to_class = dict(zip(df['char'], df['class'])) logger.info(f"Loaded CSV with {len(char_to_class)} character mappings") except Exception as e: logger.error(f"Error loading CSV: {e}") return None, None # initialize the list of features and labels data = [] labels = [] failed_images = 0 successful_images = 0 # loop over the input images for (i, imagePath) in enumerate(imagePaths): try: # Check if file exists and is readable if not os.path.exists(imagePath): failed_images += 1 continue # Extract the folder name (character) from the path path_parts = imagePath.split(os.path.sep) if len(path_parts) < 2: failed_images += 1 continue folder_name = path_parts[-2] # Second to last part is folder name # Check if this character is in our mapping if folder_name not in char_to_class: failed_images += 1 if failed_images <= 5: logger.warning(f"Unmapped character: '{folder_name}' in path: {imagePath}") continue # Load and preprocess the image using PIL try: with Image.open(imagePath) as img: # Convert to grayscale if img.mode != 'L': img = img.convert('L') # Resize to target dimensions img = img.resize((img_width, img_height), Image.Resampling.LANCZOS) # Convert to numpy array and normalize image_array = np.array(img, dtype=np.float32) / 255.0 except Exception as e: logger.debug(f"PIL failed to read {imagePath}: {e}") failed_images += 1 continue # Get the class label from mapping label = char_to_class[folder_name] # Add to our dataset data.append(image_array) labels.append(label) successful_images += 1 # show an update every 'verbose' images if verbose > 0 and successful_images > 0 and (successful_images) % verbose == 0: logger.info(f"Processed {successful_images} images successfully") except Exception as e: failed_images += 1 if failed_images <= 5: # Log first few errors logger.error(f"Error processing image {imagePath}: {e}") continue logger.info(f"Successfully processed {successful_images} images") logger.info(f"Failed to process {failed_images} images") if successful_images == 0: logger.error("No images were successfully processed!") return None, None # Convert to numpy arrays data = np.array(data) labels = np.array(labels) # Reshape data to add channel dimension (grayscale) data = data.reshape((data.shape[0], img_height, img_width, 1)) # show some information on memory consumption of the images logger.info(f"Features matrix: {data.nbytes / (1024 * 1000.0):.1f}MB") logger.info(f"Number of classes: {len(np.unique(labels))}") logger.info(f"Total samples: {len(data)}") return data, labels if __name__ == "__main__": # Get image paths dataset_path = './dataset_ka_kha' if not os.path.exists(dataset_path): logger.error(f"Dataset path {dataset_path} does not exist!") sys.exit(1) imagePaths = list(paths.list_images(dataset_path)) logger.info(f"Found {len(imagePaths)} images in dataset") if len(imagePaths) == 0: logger.error("No images found in the dataset directory!") sys.exit(1) # Process the data using PIL instead of OpenCV data, labels = get_data(imagePaths, 'sample.csv', 20000) # Reduced verbose for more frequent updates if data is None or labels is None: logger.error("Failed to prepare data!") sys.exit(1) # Save both data and labels os.makedirs('dataset_pickles', exist_ok=True) with open('dataset_pickles/tigrigna_dataset.pickle', 'wb') as f: pickle.dump((data, labels), f) logger.info("Data preparation complete. Saved to dataset_pickles/tigrigna_dataset.pickle")