|
|
import os
|
|
|
import numpy as np
|
|
|
import pickle
|
|
|
from imutils import paths
|
|
|
from sklearn.preprocessing import LabelEncoder
|
|
|
import pandas as pd
|
|
|
import logging
|
|
|
import sys
|
|
|
from PIL import Image
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
img_height, img_width = 28, 28
|
|
|
|
|
|
def get_data(imagePaths, csv_path, verbose=245):
|
|
|
|
|
|
try:
|
|
|
df = pd.read_csv(csv_path, encoding='utf-8')
|
|
|
char_to_class = dict(zip(df['char'], df['class']))
|
|
|
logger.info(f"Loaded CSV with {len(char_to_class)} character mappings")
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error loading CSV: {e}")
|
|
|
return None, None
|
|
|
|
|
|
|
|
|
data = []
|
|
|
labels = []
|
|
|
failed_images = 0
|
|
|
successful_images = 0
|
|
|
|
|
|
|
|
|
for (i, imagePath) in enumerate(imagePaths):
|
|
|
try:
|
|
|
|
|
|
if not os.path.exists(imagePath):
|
|
|
failed_images += 1
|
|
|
continue
|
|
|
|
|
|
|
|
|
path_parts = imagePath.split(os.path.sep)
|
|
|
if len(path_parts) < 2:
|
|
|
failed_images += 1
|
|
|
continue
|
|
|
|
|
|
folder_name = path_parts[-2]
|
|
|
|
|
|
|
|
|
if folder_name not in char_to_class:
|
|
|
failed_images += 1
|
|
|
if failed_images <= 5:
|
|
|
logger.warning(f"Unmapped character: '{folder_name}' in path: {imagePath}")
|
|
|
continue
|
|
|
|
|
|
|
|
|
try:
|
|
|
with Image.open(imagePath) as img:
|
|
|
|
|
|
if img.mode != 'L':
|
|
|
img = img.convert('L')
|
|
|
|
|
|
|
|
|
img = img.resize((img_width, img_height), Image.Resampling.LANCZOS)
|
|
|
|
|
|
|
|
|
image_array = np.array(img, dtype=np.float32) / 255.0
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.debug(f"PIL failed to read {imagePath}: {e}")
|
|
|
failed_images += 1
|
|
|
continue
|
|
|
|
|
|
|
|
|
label = char_to_class[folder_name]
|
|
|
|
|
|
|
|
|
data.append(image_array)
|
|
|
labels.append(label)
|
|
|
successful_images += 1
|
|
|
|
|
|
|
|
|
if verbose > 0 and successful_images > 0 and (successful_images) % verbose == 0:
|
|
|
logger.info(f"Processed {successful_images} images successfully")
|
|
|
|
|
|
except Exception as e:
|
|
|
failed_images += 1
|
|
|
if failed_images <= 5:
|
|
|
logger.error(f"Error processing image {imagePath}: {e}")
|
|
|
continue
|
|
|
|
|
|
logger.info(f"Successfully processed {successful_images} images")
|
|
|
logger.info(f"Failed to process {failed_images} images")
|
|
|
|
|
|
if successful_images == 0:
|
|
|
logger.error("No images were successfully processed!")
|
|
|
return None, None
|
|
|
|
|
|
|
|
|
data = np.array(data)
|
|
|
labels = np.array(labels)
|
|
|
|
|
|
|
|
|
data = data.reshape((data.shape[0], img_height, img_width, 1))
|
|
|
|
|
|
|
|
|
logger.info(f"Features matrix: {data.nbytes / (1024 * 1000.0):.1f}MB")
|
|
|
logger.info(f"Number of classes: {len(np.unique(labels))}")
|
|
|
logger.info(f"Total samples: {len(data)}")
|
|
|
|
|
|
return data, labels
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
|
dataset_path = './dataset_ka_kha'
|
|
|
if not os.path.exists(dataset_path):
|
|
|
logger.error(f"Dataset path {dataset_path} does not exist!")
|
|
|
sys.exit(1)
|
|
|
|
|
|
imagePaths = list(paths.list_images(dataset_path))
|
|
|
logger.info(f"Found {len(imagePaths)} images in dataset")
|
|
|
|
|
|
if len(imagePaths) == 0:
|
|
|
logger.error("No images found in the dataset directory!")
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
data, labels = get_data(imagePaths, 'sample.csv', 20000)
|
|
|
|
|
|
if data is None or labels is None:
|
|
|
logger.error("Failed to prepare data!")
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
os.makedirs('dataset_pickles', exist_ok=True)
|
|
|
with open('dataset_pickles/tigrigna_dataset.pickle', 'wb') as f:
|
|
|
pickle.dump((data, labels), f)
|
|
|
|
|
|
logger.info("Data preparation complete. Saved to dataset_pickles/tigrigna_dataset.pickle") |