--- license: apache-2.0 task_categories: - text-classification tags: - legal size_categories: - 1M Step 1: Installing libraries.") !pip install datasets -q !pip install huggingface_hub -q import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import numpy as np import os import shutil from datasets import load_dataset from huggingface_hub import hf_hub_download, login from google.colab import drive print(" --> Libraries installed and imported successfully!") # ================================================================================= # Step 2: Connect to Hugging Face and Google Drive # ============================================================================== # We will authenticate to Hugging Face so that we are able to download your dataset. # You will be prompted to insert a token, which you may retrieve from: # https://huggingface.co/settings/tokens print(" --> Step 2: Connecting to services.") login() # We'll also connect to Google Drive to store our model once it's trained. drive.mount('/content/drive') print("--> Services connected!") # ============================================================================== # Step 3: Load Your Dataset from the Hub # ============================================================================== # Here, we load your wonderful dataset directly from the Hugging Face Hub. print(" --> Step 3: Loading the dataset.") DATASET_REPO = "szili2011/captcha-ocr-dataset" # We first load the metadata from your 'dataset.csv' file. # This won't download the images yet, just the text information. print(" - Loading metadata from dataset.csv.") dataset_info = load_dataset(DATASET_REPO, data_files="dataset.csv") # Now we download the huge 'samples.zip' file. This will take a while! # hf_hub_download is smart and caches the file so you don't re-download it every time. print(" - Downloading samples.zip (this may take a long time).") zip_path = hf_hub_download(repo_id=DATASET_REPO, filename="samples.zip", repo_type="dataset") # Unzip the images into Colab's fast local storage for fast access during training. UNZIPPED_PATH = "/content/images" print(f" - Unzipping images to {UNZIPPED_PATH}.") if os.path.exists(UNZIPPED_PATH): shutil.rmtree(UNZIPPED_PATH) # Cleaning up from previous runs shutil.unpack_archive(zip_path, UNZIPPED_PATH, 'zip') # The images are now at '/content/images/samples/' # Now let's create a full path for each image in our dataset object. SAMPLES_PATH = os.path.join(UNZIPPED_PATH, "samples") def create_full_path(example): # The 'filepath' from your CSV is 'samples/xyz/123.png'. # We'll build the full, correct path to where we just unzipped the files. example['image_path'] = os.path.join(SAMPLES_PATH, *example['filepath'].split('/')[1:]) return example print(" - Creating full image paths.") dataset = dataset_info.map(create_full_path, num_proc=4) # num_proc > 1 speeds this up # Clean up columns we no longer need. dataset = dataset.remove_columns(['filepath', 'width', 'height', 'cycle', 'complexity', 'generation_mode', 'sample_number']) # Split the data into a training set (95%) and a validation set (5%) dataset = dataset["train"].train_test_split(test_size=0.05, seed=42) train_ds = dataset["train"] val_ds = dataset["test"] print(f" --> Dataset is ready! Training samples: {len(train_ds)}, Validation samples: {len(val_ds)}") # ============================================================================= # Step 4: Create the character vocabulary # =========================================================================== # A model can't read 'A', 'B', 'C'. It requires numbers. # Here, we create a mapping from each character to a special number. print(" --> Step 4: Creating character vocabulary.") # Get all the unique characters in the training text all_text = "".join(train_ds["text"]) characters = sorted(list(set(all_text))) # The StringLookup layer does the heavy lifting of creating the mapping. char_to_num = layers.StringLookup(vocabulary=list(characters), mask_token=None) num_to_char = layers.StringLookup(vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True) print(f" - Vocabulary size: {char_to_num.vocabulary_size()}") print(f" - Vocabulary: {''.join(char_to_num.get_vocabulary())}") # ============================================================================== # Step 5: The Preprocessing Pipeline (tf.data) # ============================================================================== # This is our data assembly line. It will convert image paths and labels into # ready-to-use tensors for our model. print(" --> Step 5: Creating TensorFlow data pipeline.") # Constant values for our images and batches IMG_HEIGHT = 50 IMG_WIDTH = 200 BATCH_SIZE = 128 def encode_for_tf(example): # 1. Read the image file from its path img = tf.io.read_file(example['image_path']) # 2. Decode it as a grayscale PNG (1 channel) img = tf.io.decode_png(img, channels=1) # 3. Convert pixels to the [0, 1] range img = tf.image.convert_image_dtype(img, tf.float32) # 4. Resize to a consistent size img = tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH]) # 5. Transpose the image! This is a key step for CRNNs. # We want the RNN to see a sequence of vertical slices of the image. img = tf.transpose(img, perm=[1, 0, 2]) # 6. Convert the text label into a sequence of numbers according to our vocabulary label_str = tf.cast(example['text'], tf.string) label = char_to_num(tf.strings.unicode_split(label_str, input_encoding="UTF-8")) return {"image": img, "label": label} # Convert our Hugging Face Datasets to TensorFlow Datasets # This pipeline will shuffle, batch, and prefetch data for maximum efficiency. tf_train_dataset = train_ds.to_tf_dataset( columns=['image_path', 'text'], shuffle=True, batch_size=BATCH_SIZE ).map(encode_for_tf, num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE) tf_val_dataset = val_ds.to_tf_dataset( columns=['image_path', 'text'], shuffle=False, batch_size=BATCH_SIZE ).map(encode_for_tf, num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE) print("--> Data pipeline is built!") # =============================================================================== # Step 6: The Model Architecture (The Brains of the Operation) # =============================================================================== # Here's the CRNN model itself. It consists of three sections: # 1. CNN (Convolutional) layers to "see" features in the image. # 2. RNN (Recurrent) layers to "read" the sequence of features. # 3. A final layer for CTC loss computation. print(" --> Step 6: Building the CRNN model.") class CTCLayer(layers.Layer): """A custom Keras layer to calculate the CTC loss.""" def __init__(self, name=None): super().__init__(name=name) self.loss_fn = keras.backend.ctc_batch_cost def call(self, y_true, y_pred): batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64") input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64") label_length = tf.cast(tf.shape(y_true)[1], dtype="int64") input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64") label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64") loss = self.loss_fn(y_true, y_pred, input_length, label_length) self.add_loss(loss) return y_pred def build_model(): # Inputs to the model input_img = layers.Input(shape=(IMG_WIDTH, IMG_HEIGHT, 1), name="image", dtype="float32") labels = layers.Input(name="label", shape=(None,), dtype="float32") # Part 1: The CNN to extract visual features x = layers.Conv2D(32, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same")(input_img) x = layers.MaxPooling2D((2, 2))(x) x = layers.Conv2D(64, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same")(x) x = layers.MaxPooling2D((2, 2))(x) # Reshape the output of the CNN to be a sequence for the RNN new_shape = ((IMG_WIDTH // 4), (IMG_HEIGHT // 4) * 64) x = layers.Reshape(target_shape=new_shape)(x) x = layers.Dense(64, activation="relu")(x) x = layers.Dropout(0.2)(x) # Part 2: The RNN to read the sequence x = layers.Bidirectional(layers.LSTM(128, return_sequences=True, dropout=0.25))(x) x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, dropout=0.25))(x) # Part 3: The output layer # The number of units is our vocabulary size + 1 (for the special CTC blank token) x = layers.Dense(char_to_num.vocabulary_size() + 1, activation="softmax", name="dense_output")(x) # Add the CTC loss calculation as a layer output = CTCLayer(name="ctc_loss")(labels, x) # Create the final model model = keras.models.Model(inputs=[input_img, labels], outputs=output, name="captcha_ocr_model") # Compile the model with the Adam optimizer model.compile(optimizer=keras.optimizers.Adam()) return model # Let's build it and see a summary model = build_model() model.summary() # ============================================================================== # Step 7: Train the Model! # ============================================================================== # This is the moment we've been waiting for. Let's start training. print(" --> Step 7: Starting training.") EPOCHS = 20 # You can increase this for best results # This callback will save the best version of our model to your Google Drive # It monitors the validation loss and only saves when it improves. checkpoint_path = "/content/drive/My Drive/captcha_model_best.h5" model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_path, save_weights_only=False, monitor='val_loss', mode='min', save_best_only=True, verbose=1 ) # This callback will cause early stopping if the model doesn't improve early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=5, restore_best_weights=True, verbose=1) # Let's start! history = model.fit( tf_train_dataset, validation_data=tf_val_dataset, epochs=EPOCHS, callbacks=[model_checkpoint_callback, early_stopping_callback],) print(" --> Training complete! Best model saved to your Google Drive.") # =============================================================================# # Step 8: Check the Results (Inference)# # ============================================================================= # Now that we have a trained model, let's attempt to check how well it does. print(" --> Step 8: Checking some predictions.") # First, we construct a prediction-only model from our trained model. # This model takes an image and outputs the raw predictions. prediction_model = keras.models.Model( model.get_layer(name="image").input, model.get_layer(name="dense_output").output ) # A helper function to decode the raw predictions into human-readable text def decode_batch_predictions(pred): input_len = np.ones(pred.shape[0]) * pred.shape[1] # We perform a greedy search for simplicity. Beam search can provide more accurate results. results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0] output_text = [] for res in results: # The [UNK] token is from our StringLookup layer, we can safely delete it. res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8").replace("[UNK]", "") output_text.append(res) return output_text # Let's take one batch from our validation set and examine the predictions for batch in tf_val_dataset.take(1): batch_images = batch["image"] batch_labels = batch["label"] preds = prediction_model.predict(batch_images) pred_texts = decode_batch_predictions(preds) orig_texts = [] for label in batch_labels: label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8").replace("[UNK]", "") orig_texts.append(label) for i in range(min(10, len(pred_texts))): # Show the first 10 predictions print(f" Original: {orig_texts[i]:<10} | Predicted: {pred_texts[i]}") ``` Good Luck!