{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "markdown", "source": [ "# Batch Processing for Multiple-Choice Questions" ], "metadata": { "id": "bjbqHUTuNFyd" } }, { "cell_type": "markdown", "source": [ "# 1. Install Litelines" ], "metadata": { "id": "B8jIr1KAKQWF" } }, { "cell_type": "code", "execution_count": 1, "metadata": { "id": "cvgtiC0Kcl-z" }, "outputs": [], "source": [ "%pip install --quiet --upgrade litelines" ] }, { "cell_type": "markdown", "source": [ "## 2. Download a model and its tokenizer" ], "metadata": { "id": "-dLJS16ZNy97" } }, { "cell_type": "code", "source": [ "# Use cuda for faster inference\n", "import torch\n", "\n", "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", "assert device == torch.device(\"cuda\"), \"In the Runtime tab, please Change runtime type to GPU\"" ], "metadata": { "id": "68zKdmOQcqLo" }, "execution_count": 2, "outputs": [] }, { "cell_type": "code", "source": [ "from transformers import AutoModelForCausalLM, AutoTokenizer\n", "\n", "MODEL_ID = \"Qwen/Qwen2.5-0.5B-Instruct\"\n", "tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)\n", "model = AutoModelForCausalLM.from_pretrained(MODEL_ID).to(device)" ], "metadata": { "id": "U-a-w5jwc6Xg" }, "execution_count": 3, "outputs": [] }, { "cell_type": "markdown", "source": [ "## 3. Prepare the inputs to the LLM" ], "metadata": { "id": "ohUnAx9NJ7AV" } }, { "cell_type": "code", "source": [ "user_inputs = [\"What is the capital of France?\", \"What is the capital of Italy?\", \"What is the capital of Spain?\"]\n", "alternatives = \"\"\"\n", "A) Madrid\n", "B) Paris\n", "C) Rome\n", "\"\"\"\n", "messages = [[{\"role\": \"user\", \"content\": user_input + alternatives}] for user_input in user_inputs]\n", "formatted_messages = [\n", " tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in messages\n", "]\n", "inputs = tokenizer(\n", " formatted_messages,\n", " return_tensors=\"pt\",\n", " padding=True,\n", " truncation=True\n", ").to(device)\n", "prompt_length = inputs['input_ids'].shape[-1]" ], "metadata": { "id": "6YLLQze67KV1" }, "execution_count": 4, "outputs": [] }, { "cell_type": "markdown", "source": [ "## 4. Define a processor through a regular expression" ], "metadata": { "id": "9LaJrh0PKkcn" } }, { "cell_type": "code", "source": [ "from litelines.transformers import SchemaProcessor\n", "\n", "processor = SchemaProcessor(response_format=r\"A\\.|B\\.|C\\.\", tokenizer=tokenizer)\n", "processor.show_graph()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331 }, "id": "sVx6IzInKHoL", "outputId": "9c3bf54f-95a5-4b7b-e870-dd7a3d2e45dd" }, "execution_count": 5, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "" ], "image/svg+xml": "\n\n%3\n\nAllowed Paths\nRegular expression: A\\.|B\\.|C\\.\n\n\n0\n\n0\n\n\n\n1\n\n1\n\n\n\n0->1\n\n\n\n\nid\n\n\ntoken\n\n34\n\nC\n\n\n\n2\n\n2\n\n\n\n0->2\n\n\n\n\nid\n\n\ntoken\n\n33\n\nB\n\n\n\n3\n\n3\n\n\n\n0->3\n\n\n\n\nid\n\n\ntoken\n\n32\n\nA\n\n\n\n4\n\n\n4\n\n\n\n1->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n2->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n3->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n\n\n\n->0\n\n\n\n\n" }, "metadata": {} } ] }, { "cell_type": "markdown", "source": [ "## 5. Generate a structured response" ], "metadata": { "id": "UUkc7wXcOQlN" } }, { "cell_type": "code", "source": [ "outputs = model.generate(\n", " inputs.input_ids,\n", " attention_mask=inputs.attention_mask,\n", " pad_token_id=tokenizer.eos_token_id,\n", " logits_processor=[processor],\n", " temperature=0.1\n", ")\n", "generated_texts = tokenizer.batch_decode([output[prompt_length:] for output in outputs], skip_special_tokens=True)\n", "formatted_strings = [f\"Question: {user_inputs[i]+alternatives}\\n\\x1b[34mResponse:\\x1b[0m \\x1b[31m{generated_texts[i]}\\x1b[0m\" for i in range(len(user_inputs))]\n", "for string in formatted_strings:\n", " print(string)\n", " print()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "h9dLX3Xl_9dy", "outputId": "567cf76e-179c-463c-abb2-ff8522237f3f" }, "execution_count": 6, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Question: What is the capital of France?\n", "A) Madrid\n", "B) Paris\n", "C) Rome\n", "\n", "\u001b[34mResponse:\u001b[0m \u001b[31mB.\u001b[0m\n", "\n", "Question: What is the capital of Italy?\n", "A) Madrid\n", "B) Paris\n", "C) Rome\n", "\n", "\u001b[34mResponse:\u001b[0m \u001b[31mC.\u001b[0m\n", "\n", "Question: What is the capital of Spain?\n", "A) Madrid\n", "B) Paris\n", "C) Rome\n", "\n", "\u001b[34mResponse:\u001b[0m \u001b[31mA.\u001b[0m\n", "\n" ] } ] }, { "cell_type": "markdown", "source": [ "## 6. Visualize the selected paths" ], "metadata": { "id": "7ANqIJQ_M9fH" } }, { "cell_type": "code", "source": [ "processor.show_graph(batch_number=0)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331 }, "id": "Pqo5AVexMnJ6", "outputId": "116ec551-7bf9-4cd8-ec7a-d393fad7392f" }, "execution_count": 7, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "" ], "image/svg+xml": "\n\n%3\n\nAllowed Paths\nRegular expression: A\\.|B\\.|C\\.\n\n\n0\n\n0\n\n\n\n1\n\n1\n\n\n\n0->1\n\n\n\n\nid\n\n\ntoken\n\n34\n\nC\n\n\n\n2\n\n2\n\n\n\n0->2\n\n\n\n\nid\n\n\ntoken\n\n33\n\nB\n\n\n\n3\n\n3\n\n\n\n0->3\n\n\n\n\nid\n\n\ntoken\n\n32\n\nA\n\n\n\n4\n\n\n4\n\n\n\n1->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n2->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n3->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n\n\n\n->0\n\n\n\n\n" }, "metadata": {} } ] }, { "cell_type": "code", "source": [ "processor.show_graph(batch_number=1)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331 }, "id": "sDvGUSXZAaC0", "outputId": "ca2c16e0-7f96-4fbd-ef3f-570775394d93" }, "execution_count": 8, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "" ], "image/svg+xml": "\n\n%3\n\nAllowed Paths\nRegular expression: A\\.|B\\.|C\\.\n\n\n0\n\n0\n\n\n\n1\n\n1\n\n\n\n0->1\n\n\n\n\nid\n\n\ntoken\n\n34\n\nC\n\n\n\n2\n\n2\n\n\n\n0->2\n\n\n\n\nid\n\n\ntoken\n\n33\n\nB\n\n\n\n3\n\n3\n\n\n\n0->3\n\n\n\n\nid\n\n\ntoken\n\n32\n\nA\n\n\n\n4\n\n\n4\n\n\n\n1->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n2->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n3->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n\n\n\n->0\n\n\n\n\n" }, "metadata": {} } ] }, { "cell_type": "code", "source": [ "processor.show_graph(batch_number=2)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331 }, "id": "DDyE3bb2AT6V", "outputId": "5b7243ec-a902-4cc8-c45e-e840a27094f0" }, "execution_count": 9, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "" ], "image/svg+xml": "\n\n%3\n\nAllowed Paths\nRegular expression: A\\.|B\\.|C\\.\n\n\n0\n\n0\n\n\n\n1\n\n1\n\n\n\n0->1\n\n\n\n\nid\n\n\ntoken\n\n34\n\nC\n\n\n\n2\n\n2\n\n\n\n0->2\n\n\n\n\nid\n\n\ntoken\n\n33\n\nB\n\n\n\n3\n\n3\n\n\n\n0->3\n\n\n\n\nid\n\n\ntoken\n\n32\n\nA\n\n\n\n4\n\n\n4\n\n\n\n1->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n2->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n3->4\n\n\n\n\nid\n\n\ntoken\n\n13\n\n.\n\n\n\n\n\n\n->0\n\n\n\n\n" }, "metadata": {} } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "xNEEh_naAY_w" }, "execution_count": 9, "outputs": [] } ] }