{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { "f288b0483e30432ca828f6a8fa83c170": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_24d4f3354a1e4cb595178f6c2a3d9184", "IPY_MODEL_12c0bac81f6c433597b9049d65ef3622", "IPY_MODEL_dc4e853706a841e4bb9116367a941777" ], "layout": "IPY_MODEL_3891ce65fe8644ec9613c56d95bace7f" } }, "24d4f3354a1e4cb595178f6c2a3d9184": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_b790751bb9284c91920ac87606388ca9", "placeholder": "​", "style": "IPY_MODEL_484383d87bdc4126bf966a94f68aff93", "value": "Map: 100%" } }, "12c0bac81f6c433597b9049d65ef3622": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_99781a5386464804af0c5e1b9d6c57f6", "max": 700, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_f66bac73f7fb487bad105ba35f5d5e2a", "value": 700 } }, "dc4e853706a841e4bb9116367a941777": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1346c30a4e334dd9a153ab10b1fff76a", "placeholder": "​", "style": "IPY_MODEL_8212317092c04d118da6c8f5c623906d", "value": " 700/700 [00:03<00:00, 200.98 examples/s]" } }, "3891ce65fe8644ec9613c56d95bace7f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b790751bb9284c91920ac87606388ca9": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "484383d87bdc4126bf966a94f68aff93": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "99781a5386464804af0c5e1b9d6c57f6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f66bac73f7fb487bad105ba35f5d5e2a": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "1346c30a4e334dd9a153ab10b1fff76a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8212317092c04d118da6c8f5c623906d": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } } }, "cells": [ { "cell_type": "code", "execution_count": 3, "metadata": { "id": "AVRjhwEW3M7s" }, "outputs": [], "source": [ "import sys\n", "PROJECT_PATH = '/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun'\n", "sys.path.append(PROJECT_PATH)" ] }, { "cell_type": "code", "source": [ "import os\n", "os.environ[\"WANDB_DISABLED\"] = \"true\"" ], "metadata": { "id": "4KD1SwaF6ur9" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import os\n", "import json\n", "import torch\n", "from datasets import load_dataset\n", "from transformers import AutoTokenizer, Trainer, TrainingArguments, HfArgumentParser\n", "from transformers.data.data_collator import DataCollatorForTokenClassification\n", "import numpy as np\n", "from seqeval.metrics import classification_report\n", "\n", "# Import your custom model and its config from the other file\n", "from layout_roberta import LayoutRobertaForTokenClassification, RobertaConfig\n", "\n", "# ===================================================================\n", "# 1. CONFIGURATION\n", "# ===================================================================\n", "class NERConfig:\n", " # --- Paths ---\n", " ANNOTATION_FILE = \"/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout_ner_annotations.jsonl\"\n", " OUTPUT_DIR = \"/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout-aware-invoice-ner-model-final\"\n", "\n", " # --- Model ---\n", " BASE_MODEL = \"roberta-base\"\n", "\n", " # --- Training Hyperparameters ---\n", " LEARNING_RATE = 3e-5\n", " NUM_EPOCHS = 8 # Layout-aware models can learn quickly\n", " BATCH_SIZE = 4 # Adjust based on your memory (4 is a safe start)\n", " WEIGHT_DECAY = 0.01\n", "\n", "config = NERConfig()\n", "\n", "# ===================================================================\n", "# 2. DATA LOADING AND PREPARATION\n", "# ===================================================================\n", "print(\"Loading dataset from:\", config.ANNOTATION_FILE)\n", "full_dataset = load_dataset(\"json\", data_files=config.ANNOTATION_FILE, split=\"train\")\n", "\n", "# Create the mapping from tag names to integers\n", "tags = sorted(list(set([tag for example in full_dataset for tag in example[\"ner_tags\"]])))\n", "tag2id = {tag: i for i, tag in enumerate(tags)}\n", "id2tag = {i: tag for tag, i in tag2id.items()}\n", "label_names = list(tag2id.keys())\n", "\n", "print(f\"Found {len(tags)} unique NER tags.\")\n", "\n", "# Initialize tokenizer. `add_prefix_space` is important for RoBERTa.\n", "tokenizer = AutoTokenizer.from_pretrained(config.BASE_MODEL, add_prefix_space=True)\n", "\n", "def preprocess_data(examples, max_length=512):\n", " \"\"\"\n", " Prepares words, bboxes, and tags for the LayoutRoBERTa model.\n", " \"\"\"\n", " words = examples['words']\n", " boxes = examples['bboxes']\n", " ner_tags = examples['ner_tags']\n", "\n", " # Tokenize words, this will handle subword splitting\n", " tokenized_inputs = tokenizer(\n", " words,\n", " truncation=True,\n", " is_split_into_words=True,\n", " padding=\"max_length\",\n", " max_length=max_length\n", " )\n", "\n", " labels = []\n", " bboxes_aligned = []\n", " for i, label_sequence in enumerate(ner_tags):\n", " word_ids = tokenized_inputs.word_ids(batch_index=i)\n", " previous_word_idx = None\n", " label_ids = []\n", " bbox_sequence = []\n", "\n", " for word_idx in word_ids:\n", " if word_idx is None: # Special token like [CLS], [SEP], [PAD]\n", " label_ids.append(-100)\n", " bbox_sequence.append([0, 0, 0, 0]) # Use a \"null\" bounding box\n", " elif word_idx != previous_word_idx: # First subword of a new word\n", " label_ids.append(tag2id[label_sequence[word_idx]])\n", " bbox_sequence.append(boxes[i][word_idx])\n", " else: # Subsequent subword of the same word\n", " label_ids.append(-100) # Only label the first subword\n", " bbox_sequence.append(boxes[i][word_idx]) # Propagate the bbox\n", " previous_word_idx = word_idx\n", "\n", " labels.append(label_ids)\n", " bboxes_aligned.append(bbox_sequence)\n", "\n", " tokenized_inputs[\"labels\"] = labels\n", " tokenized_inputs[\"bbox\"] = bboxes_aligned\n", " return tokenized_inputs\n", "\n", "print(\"Preprocessing dataset...\")\n", "processed_dataset = full_dataset.map(preprocess_data, batched=True, remove_columns=full_dataset.column_names)\n", "split_dataset = processed_dataset.train_test_split(test_size=0.15, seed=42)\n", "train_dataset = split_dataset[\"train\"]\n", "eval_dataset = split_dataset[\"test\"]\n", "\n", "# ===================================================================\n", "# 3. METRICS COMPUTATION\n", "# ===================================================================\n", "def compute_metrics(p):\n", " \"\"\"Computes precision, recall, F1, and accuracy for token classification.\"\"\"\n", " predictions, labels = p\n", " predictions = np.argmax(predictions, axis=2)\n", "\n", " # Remove ignored index (-100)\n", " true_predictions = [\n", " [label_names[p] for (p, l) in zip(prediction, label) if l != -100]\n", " for prediction, label in zip(predictions, labels)\n", " ]\n", " true_labels = [\n", " [label_names[l] for (p, l) in zip(prediction, label) if l != -100]\n", " for prediction, label in zip(predictions, labels)\n", " ]\n", "\n", " report = classification_report(true_labels, true_predictions, output_dict=True)\n", " return {\n", " \"precision\": report[\"micro avg\"][\"precision\"],\n", " \"recall\": report[\"micro avg\"][\"recall\"],\n", " \"f1\": report[\"micro avg\"][\"f1-score\"],\n", " \"accuracy\": report[\"micro avg\"][\"precision\"], # For seqeval, precision is accuracy\n", " }\n", "\n", "# ===================================================================\n", "# 4. MODEL TRAINING\n", "# ===================================================================\n", "# Data collator handles dynamic padding for batches\n", "data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)\n", "\n", "# Load the custom config and add our layout-specific parameters\n", "model_config = RobertaConfig.from_pretrained(\n", " config.BASE_MODEL,\n", " num_labels=len(tags),\n", " id2label=id2tag,\n", " label2id=tag2id\n", ")\n", "model_config.max_2d_position_embeddings = 1024 # Assumes coordinates are normalized to [0, 1000]\n", "\n", "# Instantiate our custom model\n", "# `ignore_mismatched_sizes=True` is crucial because we are replacing the embedding layer.\n", "model = LayoutRobertaForTokenClassification.from_pretrained(\n", " config.BASE_MODEL,\n", " config=model_config,\n", " ignore_mismatched_sizes=True\n", ")\n", "\n", "training_args = TrainingArguments(\n", " output_dir=config.OUTPUT_DIR,\n", " learning_rate=config.LEARNING_RATE,\n", " per_device_train_batch_size=config.BATCH_SIZE,\n", " per_device_eval_batch_size=config.BATCH_SIZE,\n", " num_train_epochs=config.NUM_EPOCHS,\n", " weight_decay=config.WEIGHT_DECAY,\n", " evaluation_strategy=\"epoch\",\n", " save_strategy=\"epoch\",\n", " load_best_model_at_end=True, # Automatically save the best model\n", " metric_for_best_model=\"f1\",\n", " push_to_hub=False,\n", " logging_steps=20,\n", " save_total_limit=1, # Only keep the best checkpoint\n", ")\n", "\n", "trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=eval_dataset,\n", " tokenizer=tokenizer,\n", " data_collator=data_collator,\n", " compute_metrics=compute_metrics, # Add our metrics function\n", ")\n", "\n", "print(\"Starting Layout-Aware NER model training...\")\n", "trainer.train()\n", "\n", "# Save the final best model to a clean directory\n", "final_model_path = os.path.join(config.OUTPUT_DIR, \"best_model\")\n", "trainer.save_model(final_model_path)\n", "print(f\"✓ Training complete! Best model saved to: {final_model_path}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": [ "f288b0483e30432ca828f6a8fa83c170", "24d4f3354a1e4cb595178f6c2a3d9184", "12c0bac81f6c433597b9049d65ef3622", "dc4e853706a841e4bb9116367a941777", "3891ce65fe8644ec9613c56d95bace7f", "b790751bb9284c91920ac87606388ca9", "484383d87bdc4126bf966a94f68aff93", "99781a5386464804af0c5e1b9d6c57f6", "f66bac73f7fb487bad105ba35f5d5e2a", "1346c30a4e334dd9a153ab10b1fff76a", "8212317092c04d118da6c8f5c623906d" ] }, "id": "ygc3ibH03xQA", "outputId": "34d5400f-fd6a-42d8-d364-8983ed6e0963" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.12/dist-packages/transformers/utils/generic.py:441: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead.\n", " _torch_pytree._register_pytree_node(\n", "/usr/local/lib/python3.12/dist-packages/transformers/utils/generic.py:309: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead.\n", " _torch_pytree._register_pytree_node(\n", "/usr/local/lib/python3.12/dist-packages/transformers/utils/generic.py:309: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead.\n", " _torch_pytree._register_pytree_node(\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Loading dataset from: /content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout_ner_annotations.jsonl\n", "Found 26 unique NER tags.\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.12/dist-packages/huggingface_hub/file_download.py:942: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", " warnings.warn(\n", "/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_auth.py:94: UserWarning: \n", "The secret `HF_TOKEN` does not exist in your Colab secrets.\n", "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n", "You will be able to reuse this secret in all of your notebooks.\n", "Please note that authentication is recommended but still optional to access public models or datasets.\n", " warnings.warn(\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Preprocessing dataset...\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "Map: 0%| | 0/700 [00:00" ], "text/html": [ "\n", "
\n", " \n", " \n", " [1192/1192 10:53, Epoch 8/8]\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
EpochTraining LossValidation LossPrecisionRecallF1Accuracy
10.0646000.0586150.9564790.9682910.9623490.956479
20.0297000.0150700.9933360.9949930.9941640.993336
30.0161000.0108660.9943370.9963280.9953320.994337
40.0093000.0098880.9953360.9973300.9963320.995336
50.0055000.0099360.9956700.9976640.9966660.995670
60.0062000.0086430.9963350.9979970.9971650.996335
70.0042000.0080440.9960030.9979970.9969990.996003
80.0014000.0080210.9963360.9983310.9973320.996336

" ] }, "metadata": {} }, { "output_type": "stream", "name": "stdout", "text": [ "✓ Training complete! Best model saved to: /content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout-aware-invoice-ner-model-final/best_model\n", "\n", "--- RUNNING A QUICK INFERENCE EXAMPLE ---\n", "Running model prediction...\n" ] }, { "output_type": "error", "ename": "RuntimeError", "evalue": "Expected all tensors to be on the same device, but got index is on cpu, different from other tensors on cuda:0 (when checking argument in method wrapper_CUDA__index_select)", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipython-input-4013341825.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Running model prediction...\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mno_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1771\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1772\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1773\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1774\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1782\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1783\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1784\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1785\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout_roberta.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input_ids, bbox, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 171\u001b[0m ):\n\u001b[1;32m 172\u001b[0m \u001b[0;31m# Call the forward pass of our custom LayoutRobertaModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 173\u001b[0;31m outputs = self.roberta(\n\u001b[0m\u001b[1;32m 174\u001b[0m \u001b[0minput_ids\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 175\u001b[0m \u001b[0mbbox\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbbox\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1771\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1772\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1773\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1774\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1782\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1783\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1784\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1785\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout_roberta.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input_ids, bbox, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[0;31m# Generate embeddings using our custom embedding layer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minputs_embeds\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 103\u001b[0;31m embedding_output = self.embeddings(\n\u001b[0m\u001b[1;32m 104\u001b[0m \u001b[0minput_ids\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minput_ids\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0mbbox\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbbox\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1771\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1772\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1773\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1774\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1782\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1783\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1784\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1785\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout_roberta.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input_ids, bbox, token_type_ids, position_ids)\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0;31m# --- Get Embeddings ---\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0;31m# 1. Word embeddings from token IDs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 55\u001b[0;31m \u001b[0mword_embeds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mword_embeddings\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_ids\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 56\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0;31m# 2. 1D Position embeddings\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1771\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1772\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1773\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1774\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1782\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1783\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1784\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1785\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/sparse.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 190\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 192\u001b[0;31m return F.embedding(\n\u001b[0m\u001b[1;32m 193\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/functional.py\u001b[0m in \u001b[0;36membedding\u001b[0;34m(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)\u001b[0m\n\u001b[1;32m 2544\u001b[0m \u001b[0;31m# remove once script supports set_grad_enabled\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2545\u001b[0m \u001b[0m_no_grad_embedding_renorm_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_norm\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnorm_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2546\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membedding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpadding_idx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscale_grad_by_freq\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2547\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2548\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mRuntimeError\u001b[0m: Expected all tensors to be on the same device, but got index is on cpu, different from other tensors on cuda:0 (when checking argument in method wrapper_CUDA__index_select)" ] } ] }, { "cell_type": "code", "source": [ "# ===================================================================\n", "# 5. INFERENCE EXAMPLE\n", "# ===================================================================\n", "print(\"\\n--- RUNNING A QUICK INFERENCE EXAMPLE ---\")\n", "\n", "# Get the device the model is on\n", "device = model.device\n", "\n", "# Get an example from the evaluation dataset\n", "example = eval_dataset[0]\n", "words = tokenizer.convert_ids_to_tokens(example['input_ids'])\n", "bboxes = example['bbox']\n", "\n", "# Manually prepare inputs and move them to the same device as the model\n", "inputs = {\n", " \"input_ids\": torch.tensor([example['input_ids']]).to(device),\n", " \"attention_mask\": torch.tensor([example['attention_mask']]).to(device),\n", " \"bbox\": torch.tensor([example['bbox']]).to(device),\n", " \"return_dict\": True # Force return_dict to True\n", "}\n", "\n", "print(\"Running model prediction...\")\n", "model.eval() # Set model to evaluation mode\n", "with torch.no_grad():\n", " outputs = model(**inputs)\n", " # Handle both tuple and dict outputs\n", " if isinstance(outputs, tuple):\n", " logits = outputs[0] # First element is logits\n", " else:\n", " logits = outputs.logits\n", " predictions = torch.argmax(logits, dim=2)\n", "\n", "print(\"\\n--- PARSED ENTITIES (EXAMPLE) ---\")\n", "predicted_labels = predictions[0].cpu().tolist() # Move predictions back to CPU\n", "true_labels = example['labels']\n", "\n", "for token, pred_id, true_label_id in zip(words, predicted_labels, true_labels):\n", " if token not in [tokenizer.bos_token, tokenizer.eos_token, tokenizer.pad_token]:\n", " if true_label_id != -100: # Skip padding tokens\n", " predicted_label = id2tag[pred_id]\n", " true_label = id2tag[true_label_id]\n", " match = \"✓\" if predicted_label == true_label else \"✗\"\n", " if predicted_label != 'O' or true_label != 'O': # Only show non-O tags\n", " print(f\"{match} Token: {token:15s} | Predicted: {predicted_label:20s} | True: {true_label}\")\n", "\n", "print(\"\\n✓ Inference example complete!\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Vtnhqy0n4ZcJ", "outputId": "089d8d3e-f2e1-4881-d668-9672240f2913" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "--- RUNNING A QUICK INFERENCE EXAMPLE ---\n", "Running model prediction...\n", "\n", "--- PARSED ENTITIES (EXAMPLE) ---\n", "✓ Token: Ġ25 | Predicted: B-INV_NO | True: B-INV_NO\n", "✓ Token: ĠS | Predicted: B-SELLER_NAME | True: B-SELLER_NAME\n", "✓ Token: ĠELECT | Predicted: I-SELLER_NAME | True: I-SELLER_NAME\n", "✓ Token: Ġ16 | Predicted: B-DATE | True: B-DATE\n", "✓ Token: Ġ05 | Predicted: B-SELLER_ADDR | True: B-SELLER_ADDR\n", "✓ Token: ĠMoh | Predicted: I-SELLER_ADDR | True: I-SELLER_ADDR\n", "✓ Token: ĠRoad | Predicted: I-SELLER_ADDR | True: I-SELLER_ADDR\n", "✓ Token: ĠHaj | Predicted: I-SELLER_ADDR | True: I-SELLER_ADDR\n", "✓ Token: Ġ06 | Predicted: B-SELLER_TAX_ID | True: B-SELLER_TAX_ID\n", "✓ Token: ĠR | Predicted: B-CLIENT_NAME | True: B-CLIENT_NAME\n", "✓ Token: ĠNAD | Predicted: I-CLIENT_NAME | True: I-CLIENT_NAME\n", "✓ Token: ĠH | Predicted: B-CLIENT_ADDR | True: B-CLIENT_ADDR\n", "✓ Token: Ġ7 | Predicted: I-CLIENT_ADDR | True: I-CLIENT_ADDR\n", "✓ Token: ĠBo | Predicted: I-CLIENT_ADDR | True: I-CLIENT_ADDR\n", "✓ Token: ĠNag | Predicted: I-CLIENT_ADDR | True: I-CLIENT_ADDR\n", "✓ Token: ĠBh | Predicted: I-CLIENT_ADDR | True: I-CLIENT_ADDR\n", "✓ Token: Ġ6 | Predicted: B-CLIENT_PHONE | True: B-CLIENT_PHONE\n", "✓ Token: Ġ31 | Predicted: B-CLIENT_TAX_ID | True: B-CLIENT_TAX_ID\n", "✓ Token: ĠB | Predicted: B-ITEM_DESC | True: B-ITEM_DESC\n", "✓ Token: ĠSpeaker | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: ĠModel | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: ĠB | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: Ġ85 | Predicted: B-ITEM_HSN | True: B-ITEM_HSN\n", "✓ Token: Ġ2 | Predicted: B-ITEM_QTY | True: B-ITEM_QTY\n", "✓ Token: Ġ50 | Predicted: B-ITEM_PRICE | True: B-ITEM_PRICE\n", "✓ Token: Ġ100 | Predicted: B-ITEM_AMOUNT | True: B-ITEM_AMOUNT\n", "✓ Token: ĠTV | Predicted: B-ITEM_DESC | True: B-ITEM_DESC\n", "✓ Token: ĠBike | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: Ġ2 | Predicted: B-ITEM_QTY | True: B-ITEM_QTY\n", "✓ Token: ĠModel | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: ĠTV | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: Ġ87 | Predicted: B-ITEM_HSN | True: B-ITEM_HSN\n", "✓ Token: Ġ1 | Predicted: B-ITEM_QTY | True: B-ITEM_QTY\n", "✓ Token: Ġ66 | Predicted: B-ITEM_PRICE | True: B-ITEM_PRICE\n", "✓ Token: Ġ66 | Predicted: B-ITEM_PRICE | True: B-ITEM_PRICE\n", "✓ Token: ĠNikon | Predicted: B-ITEM_DESC | True: B-ITEM_DESC\n", "✓ Token: ĠCamera | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: ĠModel | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: ĠN | Predicted: I-ITEM_DESC | True: I-ITEM_DESC\n", "✓ Token: Ġ85 | Predicted: B-ITEM_HSN | True: B-ITEM_HSN\n", "✓ Token: Ġ1 | Predicted: B-ITEM_QTY | True: B-ITEM_QTY\n", "✓ Token: Ġ60 | Predicted: B-ITEM_PRICE | True: B-ITEM_PRICE\n", "✓ Token: Ġ60 | Predicted: B-ITEM_PRICE | True: B-ITEM_PRICE\n", "✓ Token: Ġ227 | Predicted: B-SUBTOTAL | True: B-SUBTOTAL\n", "✓ Token: Ġ13 | Predicted: B-CGST | True: B-CGST\n", "✓ Token: Ġ13 | Predicted: B-CGST | True: B-CGST\n", "✓ Token: Ġ255 | Predicted: B-TOTAL | True: B-TOTAL\n", "\n", "✓ Inference example complete!\n" ] } ] }, { "cell_type": "code", "source": [ "!pip install paddlepaddle-gpu==2.6.2 paddleocr==2.10.0" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "8neo7QNT-Su1", "outputId": "1c7b0cf5-48fe-4986-b4af-deff9d0b01bf" }, "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting paddlepaddle-gpu==2.6.2\n", " Downloading paddlepaddle_gpu-2.6.2-cp312-cp312-manylinux1_x86_64.whl.metadata (8.6 kB)\n", "Collecting paddleocr==2.10.0\n", " Downloading paddleocr-2.10.0-py3-none-any.whl.metadata (12 kB)\n", "Requirement already satisfied: httpx in /usr/local/lib/python3.12/dist-packages (from paddlepaddle-gpu==2.6.2) (0.28.1)\n", "Requirement already satisfied: numpy>=1.13 in /usr/local/lib/python3.12/dist-packages (from paddlepaddle-gpu==2.6.2) (2.0.2)\n", "Requirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from paddlepaddle-gpu==2.6.2) (11.3.0)\n", "Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from paddlepaddle-gpu==2.6.2) (4.4.2)\n", "Collecting astor (from paddlepaddle-gpu==2.6.2)\n", " Downloading astor-0.8.1-py2.py3-none-any.whl.metadata (4.2 kB)\n", "Collecting opt-einsum==3.3.0 (from paddlepaddle-gpu==2.6.2)\n", " Downloading opt_einsum-3.3.0-py3-none-any.whl.metadata (6.5 kB)\n", "Requirement already satisfied: protobuf>=3.20.2 in /usr/local/lib/python3.12/dist-packages (from paddlepaddle-gpu==2.6.2) (5.29.5)\n", "Requirement already satisfied: shapely in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (2.1.2)\n", "Requirement already satisfied: scikit-image in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (0.25.2)\n", "Collecting pyclipper (from paddleocr==2.10.0)\n", " Downloading pyclipper-1.3.0.post6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.0 kB)\n", "Collecting lmdb (from paddleocr==2.10.0)\n", " Downloading lmdb-1.7.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (1.4 kB)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (4.67.1)\n", "Collecting rapidfuzz (from paddleocr==2.10.0)\n", " Downloading rapidfuzz-3.14.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl.metadata (12 kB)\n", "Requirement already satisfied: opencv-python in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (4.12.0.88)\n", "Requirement already satisfied: opencv-contrib-python in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (4.12.0.88)\n", "Requirement already satisfied: cython in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (3.0.12)\n", "Requirement already satisfied: pyyaml in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (6.0.3)\n", "Collecting python-docx (from paddleocr==2.10.0)\n", " Downloading python_docx-1.2.0-py3-none-any.whl.metadata (2.0 kB)\n", "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (4.13.5)\n", "Requirement already satisfied: fonttools>=4.24.0 in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (4.60.1)\n", "Collecting fire>=0.3.0 (from paddleocr==2.10.0)\n", " Downloading fire-0.7.1-py3-none-any.whl.metadata (5.8 kB)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (2.32.4)\n", "Requirement already satisfied: albumentations in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (2.0.8)\n", "Requirement already satisfied: albucore in /usr/local/lib/python3.12/dist-packages (from paddleocr==2.10.0) (0.0.24)\n", "Requirement already satisfied: termcolor in /usr/local/lib/python3.12/dist-packages (from fire>=0.3.0->paddleocr==2.10.0) (3.2.0)\n", "Requirement already satisfied: stringzilla>=3.10.4 in /usr/local/lib/python3.12/dist-packages (from albucore->paddleocr==2.10.0) (4.2.3)\n", "Requirement already satisfied: simsimd>=5.9.2 in /usr/local/lib/python3.12/dist-packages (from albucore->paddleocr==2.10.0) (6.5.3)\n", "Requirement already satisfied: opencv-python-headless>=4.9.0.80 in /usr/local/lib/python3.12/dist-packages (from albucore->paddleocr==2.10.0) (4.12.0.88)\n", "Requirement already satisfied: scipy>=1.10.0 in /usr/local/lib/python3.12/dist-packages (from albumentations->paddleocr==2.10.0) (1.16.3)\n", "Requirement already satisfied: pydantic>=2.9.2 in /usr/local/lib/python3.12/dist-packages (from albumentations->paddleocr==2.10.0) (2.11.10)\n", "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->paddleocr==2.10.0) (2.8)\n", "Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->paddleocr==2.10.0) (4.15.0)\n", "Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx->paddlepaddle-gpu==2.6.2) (4.11.0)\n", "Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx->paddlepaddle-gpu==2.6.2) (2025.10.5)\n", "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx->paddlepaddle-gpu==2.6.2) (1.0.9)\n", "Requirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx->paddlepaddle-gpu==2.6.2) (3.11)\n", "Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx->paddlepaddle-gpu==2.6.2) (0.16.0)\n", "Requirement already satisfied: lxml>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from python-docx->paddleocr==2.10.0) (5.4.0)\n", "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->paddleocr==2.10.0) (3.4.4)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->paddleocr==2.10.0) (2.5.0)\n", "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from scikit-image->paddleocr==2.10.0) (3.5)\n", "Requirement already satisfied: imageio!=2.35.0,>=2.33 in /usr/local/lib/python3.12/dist-packages (from scikit-image->paddleocr==2.10.0) (2.37.0)\n", "Requirement already satisfied: tifffile>=2022.8.12 in /usr/local/lib/python3.12/dist-packages (from scikit-image->paddleocr==2.10.0) (2025.10.16)\n", "Requirement already satisfied: packaging>=21 in /usr/local/lib/python3.12/dist-packages (from scikit-image->paddleocr==2.10.0) (25.0)\n", "Requirement already satisfied: lazy-loader>=0.4 in /usr/local/lib/python3.12/dist-packages (from scikit-image->paddleocr==2.10.0) (0.4)\n", "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations->paddleocr==2.10.0) (0.7.0)\n", "Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations->paddleocr==2.10.0) (2.33.2)\n", "Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations->paddleocr==2.10.0) (0.4.2)\n", "Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.12/dist-packages (from anyio->httpx->paddlepaddle-gpu==2.6.2) (1.3.1)\n", "Downloading paddlepaddle_gpu-2.6.2-cp312-cp312-manylinux1_x86_64.whl (758.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m758.9/758.9 MB\u001b[0m \u001b[31m603.1 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading paddleocr-2.10.0-py3-none-any.whl (2.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m92.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading opt_einsum-3.3.0-py3-none-any.whl (65 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m65.5/65.5 kB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading fire-0.7.1-py3-none-any.whl (115 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.9/115.9 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading astor-0.8.1-py2.py3-none-any.whl (27 kB)\n", "Downloading lmdb-1.7.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (299 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m299.4/299.4 kB\u001b[0m \u001b[31m28.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading pyclipper-1.3.0.post6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (963 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m963.8/963.8 kB\u001b[0m \u001b[31m68.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading python_docx-1.2.0-py3-none-any.whl (252 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m253.0/253.0 kB\u001b[0m \u001b[31m28.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading rapidfuzz-3.14.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (3.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m101.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: pyclipper, lmdb, rapidfuzz, python-docx, opt-einsum, fire, astor, paddlepaddle-gpu, paddleocr\n", " Attempting uninstall: opt-einsum\n", " Found existing installation: opt_einsum 3.4.0\n", " Uninstalling opt_einsum-3.4.0:\n", " Successfully uninstalled opt_einsum-3.4.0\n", "Successfully installed astor-0.8.1 fire-0.7.1 lmdb-1.7.5 opt-einsum-3.3.0 paddleocr-2.10.0 paddlepaddle-gpu-2.6.2 pyclipper-1.3.0.post6 python-docx-1.2.0 rapidfuzz-3.14.3\n" ] } ] }, { "cell_type": "code", "source": [ "import os\n", "import json\n", "import re\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "from transformers import AutoTokenizer\n", "from paddleocr import PaddleOCR\n", "from collections import defaultdict\n", "import cv2\n", "\n", "from layout_roberta import LayoutRobertaForTokenClassification, RobertaConfig\n", "\n", "\n", "class InferenceConfig:\n", " MODEL_PATH = r\"/content/drive/MyDrive/durable_invoices_dataset_v9_fixed_finalrun/layout-aware-invoice-ner-model-final/best_model\"\n", "\n", " # Path to the new invoice image you want to process\n", " IMAGE_PATH = r\"/content/original-scanned.jpg\" # Confidence threshold for NER predictions\n", " SCORE_THRESHOLD = 0.85\n", "\n", "# ===================================================================\n", "# 2. HELPER FUNCTIONS (OCR & Post-Processing)\n", "# ===================================================================\n", "\n", "def run_ocr_on_image(image_path: str, ocr_engine: PaddleOCR) -> (list, list, tuple):\n", " \"\"\"\n", " Performs OCR on an image and returns words, normalized bboxes, and image dimensions.\n", " \"\"\"\n", " if not os.path.exists(image_path):\n", " raise FileNotFoundError(f\"Image not found at path: {image_path}\")\n", "\n", " words = []\n", " bboxes = []\n", " try:\n", " img = cv2.imread(image_path)\n", " if img is None:\n", " raise IOError(\"Could not read image with OpenCV.\")\n", " img_height, img_width = img.shape[:2]\n", "\n", " result = ocr_engine.ocr(img, cls=True)\n", " if not result or not result[0]:\n", " print(\"Warning: OCR did not detect any text.\")\n", " return [], [], (img_width, img_height)\n", "\n", " for line in result[0]:\n", " bbox_points, (text, confidence) = line\n", " if confidence < 0.5:\n", " continue\n", "\n", " text_words = text.split()\n", " if not text_words:\n", " continue\n", "\n", " xs = [p[0] for p in bbox_points]\n", " ys = [p[1] for p in bbox_points]\n", " x0, x1 = min(xs), max(xs)\n", " y0, y1 = min(ys), max(ys)\n", "\n", " line_width = x1 - x0\n", " if len(text_words) > 1 and line_width > 0:\n", " word_width_avg = line_width / len(text_words)\n", " for idx, word in enumerate(text_words):\n", " word_x0 = x0 + (idx * word_width_avg)\n", " word_x1 = word_x0 + word_width_avg\n", " norm_bbox = [\n", " int((word_x0 / img_width) * 1000),\n", " int((y0 / img_height) * 1000),\n", " int((word_x1 / img_width) * 1000),\n", " int((y1 / img_height) * 1000)\n", " ]\n", " words.append(word)\n", " bboxes.append(norm_bbox)\n", " else:\n", " norm_bbox = [\n", " int((x0 / img_width) * 1000),\n", " int((y0 / img_height) * 1000),\n", " int((x1 / img_width) * 1000),\n", " int((y1 / img_height) * 1000)\n", " ]\n", " words.append(text)\n", " bboxes.append(norm_bbox)\n", "\n", " except Exception as e:\n", " print(f\"An error occurred during OCR processing: {e}\")\n", " return [], [], (0, 0)\n", "\n", " return words, bboxes, (img_width, img_height)\n", "\n", "\n", "def group_and_clean_entities(ner_results: list, score_threshold: float = 0.85) -> dict:\n", " \"\"\"\n", " The definitive post-processing function.\n", " Cleans, groups, and structures raw NER predictions.\n", " \"\"\"\n", " # Step 1: Group B- and I- tags\n", " grouped_by_tag = []\n", " current_entity = None\n", "\n", " for res in ner_results:\n", " if res['score'] < score_threshold:\n", " continue\n", "\n", " tag = res['entity']\n", " word = res['word'].replace('Ġ', ' ').strip()\n", "\n", " if tag.startswith(\"B-\"):\n", " if current_entity:\n", " grouped_by_tag.append(current_entity)\n", " current_entity = {\"entity_group\": tag[2:], \"value\": word}\n", "\n", " elif tag.startswith(\"I-\") and current_entity and tag[2:] == current_entity[\"entity_group\"]:\n", " if word.startswith(' ') or res['word'].startswith('Ġ'):\n", " current_entity[\"value\"] += \" \" + word\n", " else:\n", " current_entity[\"value\"] += word\n", " else:\n", " if current_entity:\n", " grouped_by_tag.append(current_entity)\n", " current_entity = None\n", "\n", " if current_entity:\n", " grouped_by_tag.append(current_entity)\n", "\n", " # Step 2: Aggregate fragments and build the final JSON\n", " aggregated_entities = defaultdict(list)\n", "\n", " for entity in grouped_by_tag:\n", " key = entity['entity_group'].lower()\n", " value = entity['value'].strip().lstrip(':').strip()\n", "\n", " junk_words = [\n", " \"invoice\", \"no\", \"date\", \"bill\", \"to\", \"ship\", \"ph\", \"gstin\",\n", " \"description\", \"hsn\", \"qty\", \"rate\", \"amount\", \"total\",\n", " \"subtotal\", \"#\", \"@\", \"%\", \":\", \"model\"\n", " ]\n", "\n", " if value.lower().strip(': ') in junk_words:\n", " continue\n", "\n", " aggregated_entities[key].append(value)\n", "\n", " final_json = {}\n", " item_lists = {k: v for k, v in aggregated_entities.items() if k.startswith(\"item_\")}\n", "\n", " # Add non-item fields\n", " for key, values in aggregated_entities.items():\n", " if not key.startswith(\"item_\"):\n", " final_json[key] = \" \".join(values)\n", "\n", " # Reconstruct line items\n", " num_items = 0\n", " if item_lists:\n", " try:\n", " num_items = max(len(v) for v in item_lists.values())\n", " except ValueError:\n", " num_items = 0\n", "\n", " items = []\n", " for i in range(num_items):\n", " item = {}\n", " for key, values in item_lists.items():\n", " if i < len(values):\n", " item_key = key.replace(\"item_\", \"\")\n", " item[item_key] = values[i]\n", " if item:\n", " items.append(item)\n", "\n", " if items:\n", " final_json['items'] = items\n", "\n", " return final_json\n", "\n", "\n", "# ===================================================================\n", "# 3. MAIN INFERENCE PIPELINE\n", "# ===================================================================\n", "def main():\n", " config = InferenceConfig()\n", " device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "\n", " # --- Step 1: Initialize OCR and Model ---\n", " print(\"Initializing PaddleOCR engine...\")\n", " ocr_engine = PaddleOCR(use_angle_cls=True, lang='en')\n", "\n", " print(\"Loading fine-tuned Layout-Aware NER model...\")\n", " tokenizer = AutoTokenizer.from_pretrained(config.MODEL_PATH)\n", " model = LayoutRobertaForTokenClassification.from_pretrained(config.MODEL_PATH)\n", " model.to(device)\n", " model.eval()\n", "\n", " # --- Step 2: Run OCR on the new image ---\n", " print(\"\\n--- RUNNING OCR ---\")\n", " words, bboxes, (img_w, img_h) = run_ocr_on_image(config.IMAGE_PATH, ocr_engine)\n", "\n", " if not words:\n", " print(\"Inference aborted as no text was found in the image.\")\n", " return\n", "\n", " print(f\"OCR detected {len(words)} words\")\n", "\n", " # --- Step 3: Preprocess data for the model ---\n", " tokenized_inputs = tokenizer(\n", " words,\n", " is_split_into_words=True,\n", " return_tensors=\"pt\",\n", " padding=\"max_length\",\n", " truncation=True,\n", " max_length=512\n", " )\n", "\n", " aligned_bboxes = []\n", " word_ids = tokenized_inputs.word_ids()\n", " previous_word_idx = None\n", "\n", " for word_idx in word_ids:\n", " if word_idx is None:\n", " aligned_bboxes.append([0, 0, 0, 0])\n", " elif word_idx != previous_word_idx:\n", " aligned_bboxes.append(bboxes[word_idx])\n", " else:\n", " aligned_bboxes.append(bboxes[word_idx])\n", " previous_word_idx = word_idx\n", "\n", " # --- Step 4: Run model prediction ---\n", " print(\"\\n--- RUNNING MODEL PREDICTION ---\")\n", " inputs = {\n", " \"input_ids\": tokenized_inputs[\"input_ids\"].to(device),\n", " \"attention_mask\": tokenized_inputs[\"attention_mask\"].to(device),\n", " \"bbox\": torch.tensor([aligned_bboxes]).to(device),\n", " \"return_dict\": True # Force return_dict\n", " }\n", "\n", " with torch.no_grad():\n", " outputs = model(**inputs)\n", " # Handle both tuple and dict outputs\n", " if isinstance(outputs, tuple):\n", " logits = outputs[0]\n", " else:\n", " logits = outputs.logits\n", " predictions = torch.argmax(logits, dim=2)\n", "\n", " # --- Step 5: Decode and Post-process the results ---\n", " print(\"\\n--- PARSING ENTITIES ---\")\n", " preds = predictions[0].cpu().tolist()\n", " tokens = tokenizer.convert_ids_to_tokens(tokenized_inputs[\"input_ids\"][0])\n", "\n", " raw_results = []\n", " for token, pred_id in zip(tokens, preds):\n", " label = model.config.id2label[pred_id]\n", " if label != \"O\":\n", " raw_results.append({\n", " \"entity\": label,\n", " \"word\": token,\n", " \"score\": 1.0 # Confidence score (could extract from logits if needed)\n", " })\n", "\n", " print(f\"Found {len(raw_results)} entity predictions\")\n", "\n", " # Post-process and structure the results\n", " final_json = group_and_clean_entities(raw_results, score_threshold=config.SCORE_THRESHOLD)\n", "\n", " print(\"\\n\" + \"=\"*60)\n", " print(\"--- FINAL EXTRACTED INVOICE DATA ---\")\n", " print(\"=\"*60)\n", " print(json.dumps(final_json, indent=2))\n", " print(\"=\"*60)\n", " print(\"\\n✓ Inference complete!\")\n", "\n", "if __name__ == '__main__':\n", " main()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "VuRchHy9DGcS", "outputId": "8a250e8f-21be-4724-97d9-8fc299b9147b" }, "execution_count": 7, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Initializing PaddleOCR engine...\n", "[2025/11/04 07:02:33] ppocr WARNING: The first GPU is used for inference by default, GPU ID: 0\n", "[2025/11/04 07:02:37] ppocr WARNING: The first GPU is used for inference by default, GPU ID: 0\n", "[2025/11/04 07:02:41] ppocr WARNING: The first GPU is used for inference by default, GPU ID: 0\n", "Loading fine-tuned Layout-Aware NER model...\n", "\n", "--- RUNNING OCR ---\n", "OCR detected 69 words\n", "\n", "--- RUNNING MODEL PREDICTION ---\n", "\n", "--- PARSING ENTITIES ---\n", "Found 108 entity predictions\n", "\n", "============================================================\n", "--- FINAL EXTRACTED INVOICE DATA ---\n", "============================================================\n", "{\n", " \"seller_name\": \"NEW FREEZE LAND Club AHMED \",\n", " \"client_name\": \"MANISHIKA\",\n", " \"inv_no\": \"18 / 0000 15 24 / 76\",\n", " \"client_phone\": \" 76\",\n", " \"date\": \"25 / 8 / 17\",\n", " \"subtotal\": \"257\",\n", " \"total\": \"303\",\n", " \"client_tax_id\": \"GST IN - 24 ABC FM 73 13 D 1 Z Q\",\n", " \"items\": [\n", " {\n", " \"hsn\": \"15\",\n", " \"qty\": \"1\",\n", " \"price\": \"151\",\n", " \"desc\": \"Tortas\"\n", " },\n", " {\n", " \"qty\": \"00\",\n", " \"price\": \"151\",\n", " \"desc\": \"Tortas\"\n", " },\n", " {\n", " \"qty\": \"00\",\n", " \"price\": \"53\"\n", " },\n", " {\n", " \"qty\": \"1\",\n", " \"price\": \"53\"\n", " },\n", " {\n", " \"qty\": \"00\",\n", " \"price\": \"53\"\n", " },\n", " {\n", " \"qty\": \"00\",\n", " \"price\": \"53\"\n", " },\n", " {\n", " \"qty\": \"1\"\n", " },\n", " {\n", " \"qty\": \"00\"\n", " },\n", " {\n", " \"qty\": \"00\"\n", " }\n", " ]\n", "}\n", "============================================================\n", "\n", "✓ Inference complete!\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "o3oX3W08D58s" }, "execution_count": null, "outputs": [] } ] }