{ "cells": [ { "cell_type": "code", "execution_count": 10, "id": "db9062a2", "metadata": {}, "outputs": [], "source": [ "def training_prompt(medical_text, subclaim, label):\n", " \"\"\"\n", " Constructs an SFT prompt with grounding constraints to minimize hallucinations.\n", " \"\"\"\n", " \n", " # The system prompt sets strict 'Closed-World' assumptions.\n", " system_prompt = (\n", " \"You are a clinical evidence auditor. Your evaluation must be based \"\n", " \"STRICTLY and ONLY on the provided medical text. Do not use outside \"\n", " \"medical knowledge or assume facts not explicitly stated. If the text \"\n", " \"does not provide enough information to confirm the claim, you must \"\n", " \"mark it as 'not_supported'.\"\n", " )\n", " \n", " user_content = f\"\"\"EVALUATION TASK:\n", "1. Read the Medical Text.\n", "2. Verify the Subclaim.\n", "3. If the evidence is missing, ambiguous, or unconfirmed in the text, label it 'not_supported'.\n", "\n", "### Medical Text:\n", "{medical_text}\n", "\n", "### Subclaim:\n", "{subclaim}\n", "\n", "Output exactly one word ('supported' or 'not_supported'):\"\"\"\n", "\n", " conversation = {\n", " \"conversations\": [\n", " {\"from\": \"user\", \"content\": f\"{system_prompt}\\n\\n{user_content}\"},\n", " {\"from\": \"assistant\", \"content\": label}\n", " ]\n", " }\n", " \n", " return conversation" ] }, { "cell_type": "code", "execution_count": 11, "id": "50172521", "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "# /home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2.json\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/train_subclaim_support_v2.json\", \"r\") as f:\n", " data = json.load(f)\n", "\n", "prompts = []\n", "for item in train_data:\n", " prompt = training_prompt(\n", " medical_text=item['medical_text'],\n", " subclaim=item['subclaim'],\n", " label=item['label']\n", " )\n", " prompts.append(prompt)\n", "output_path = \"/home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2_sft_prompt.json\"\n", "with open(output_path, \"w\") as f:\n", " json.dump(prompts, f, indent=2)" ] }, { "cell_type": "code", "execution_count": null, "id": "33ca269d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Train data size: 90, Test data size: 10\n", "2160\n" ] } ], "source": [ "import os\n", "import json\n", "# /home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2.json\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2.json\", \"r\") as f:\n", " data = json.load(f)\n", "train_test_split=0.9\n", "split_index=int(len(data)*train_test_split)\n", "train_data=data[:split_index]\n", "test_data=data[split_index:]\n", "print(f\"Train data size: {len(train_data)}, Test data size: {len(test_data)}\")\n", "# assert False, \"Stop here\"\n", "prompts = []\n", "for item in train_data:\n", " # import ipdb; ipdb.set_trace()\n", " sub_item=item['items'][0]\n", " for label in ['easy','intermediate','hard']:\n", " for subclaim in sub_item['subclaims']:\n", " prompt = training_prompt(\n", " medical_text=sub_item[f'{label}_text'],\n", " subclaim=subclaim['subclaim'],\n", " label=subclaim['label']\n", " )\n", " prompts.append(prompt)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "e478818f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 6, "id": "aaa8f81a", "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "# /home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2.json\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2.json\", \"r\") as f:\n", " data = json.load(f)\n", "# train_test_split=0.9\n", "# split_index=int(len(data)*train_test_split)\n", "# train_data=data[:split_index]\n", "# test_data=data[split_index:]\n", "# print(f\"Train data size: {len(train_data)}, Test data size: {len(test_data)}\")\n", "# assert False, \"Stop here\"\n", "direct = []\n", "for item in data:\n", " # import ipdb; ipdb.set_trace()\n", " sub_item=item['items'][0]\n", " for label in ['easy','intermediate','hard']:\n", " for subclaim in sub_item['subclaims']:\n", " direct.append({\n", " \"medical_text\":sub_item[f'{label}_text'],\n", " \"subclaim\":subclaim['subclaim'],\n", " \"label\":subclaim['label']\n", " })\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/processed_finetune_dataset_subclaim_support_v2.json\", \"w\") as f:\n", " json.dump(direct, f, ensure_ascii=False, indent=4)" ] }, { "cell_type": "code", "execution_count": 14, "id": "78edcc06", "metadata": {}, "outputs": [], "source": [ "# save\n", "output_path = \"/home/mshahidul/readctrl/data/finetuning_data/finetune_dataset_subclaim_support_v2_sft_prompt.json\"\n", "with open(output_path, \"w\") as f:\n", " json.dump(prompts, f, indent=2)" ] }, { "cell_type": "code", "execution_count": 9, "id": "36b396bb", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Train label distribution:\n", "{'supported': 960, 'not_supported': 960}\n", "Test label distribution:\n", "{'supported': 240, 'not_supported': 240}\n" ] } ], "source": [ "from sklearn.model_selection import train_test_split\n", "\n", "# Assume data is your list of dicts\n", "data = direct\n", "\n", "labels = [d[\"label\"] for d in data]\n", "\n", "train_data, test_data = train_test_split(\n", " data,\n", " test_size=0.2,\n", " random_state=42,\n", " stratify=labels\n", ")\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/train_subclaim_support_v2.json\",\"w\") as f:\n", " json.dump(train_data, f, indent=2)\n", "\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/test_subclaim_support_v2.json\",\"w\") as f:\n", " json.dump(test_data, f, indent=2)\n", "\n", "print(\"Train label distribution:\")\n", "print({l: sum(d[\"label\"] == l for d in train_data) for l in set(labels)})\n", "\n", "print(\"Test label distribution:\")\n", "print({l: sum(d[\"label\"] == l for d in test_data) for l in set(labels)})\n" ] }, { "cell_type": "code", "execution_count": 16, "id": "01033a5f", "metadata": {}, "outputs": [], "source": [ "testing_data_v2 = []\n", "for item in test_data:\n", " sub_item=item['items'][0]\n", " for label in ['easy','intermediate','hard']:\n", " for subclaim in sub_item['subclaims']:\n", " testing_data_v2.append({\n", " \"medical_text\": sub_item[f'{label}_text'],\n", " \"subclaim\": subclaim['subclaim'],\n", " \"label\": subclaim['label']\n", " })\n", "with open(\"/home/mshahidul/readctrl/data/finetuning_data/test_dataset_subclaim_support_v2.json\", \"w\") as f:\n", " json.dump(testing_data_v2, f, indent=2)\n", " " ] }, { "cell_type": "code", "execution_count": 3, "id": "74d71220", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/mshahidul/miniconda3/envs/un/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] } ], "source": [ "from datasets import load_dataset\n", "dataset = load_dataset(\"HuggingFaceH4/Multilingual-Thinking\", split=\"train\")" ] }, { "cell_type": "code", "execution_count": 5, "id": "6d6a6b9a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'content': 'reasoning language: French\\n\\nYou are an AI chatbot with a lively and energetic personality.',\n", " 'role': 'system',\n", " 'thinking': None},\n", " {'content': 'Can you show me the latest trends on Twitter right now?',\n", " 'role': 'user',\n", " 'thinking': None},\n", " {'content': 'Hey there! While I can\\'t check Twitter (X) in real-time or access live data, I can share some tips to help you spot the latest trends:\\n\\n1. **Open the \"Trending\" tab** on the Twitter app or website – it updates constantly! \\n2. **Search for hashtags** like #Trending or #Viral to see what’s blowing up. \\n3. **Follow accounts** that curate trends (e.g., @TrendingNow, @ViralThreads). \\n4. **Check regional trends** – they often differ by location! \\n\\nRemember, trends are *super fast-moving* and often tied to pop culture, memes, or breaking news. For example, recent trends have included viral challenges (like the \"Distracted Boyfriend\" meme revival), celebrity drama, or unexpected events (hello, weather disasters!). \\n\\nWant me to brainstorm *what* might trend next? I’ve got ideas!',\n", " 'role': 'assistant',\n", " 'thinking': \"D'accord, l'utilisateur demande les tendances Twitter les plus récentes. Tout d'abord, je dois vérifier si j'ai accès à des données en temps réel. Étant donné que je ne peux pas naviguer sur Internet ou accéder directement à l'API de Twitter, je ne peux pas fournir des tendances en direct. Cependant, je peux donner quelques conseils généraux sur la façon de les trouver.\\n\\nJe devrais préciser que les tendances Twitter évoluent rapidement et sont spécifiques à chaque région. Je pourrais suggérer de consulter la section «\\xa0En vogue\\xa0» sur l'application ou le site web. Aussi, l'utilisation de hashtags et le suivi d'utilisateurs pertinents pourraient être utiles. Il est important de souligner que les tendances varient selon la région et l'heure de la journée. Je devrais garder un ton amical et bienveillant, peut-être ajouter un emoji pour rester léger. Je vais structurer ma réponse étape par étape pour faciliter la lecture. Je dois m'excuser de ne pas pouvoir fournir des données en temps réel et proposer d'autres méthodes. Je conserverai un langage simple et convivial, en évitant les termes techniques.\"}]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]['messages']" ] }, { "cell_type": "code", "execution_count": null, "id": "bba9e8ba", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "un", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.14" } }, "nbformat": 4, "nbformat_minor": 5 }