{ "cells": [ { "cell_type": "code", "execution_count": 1492, "id": "5ff255f7-7ecf-409c-b45e-2b0ee45308ff", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "from tqdm.notebook import tqdm\n", "from statistics import mean\n", "import string" ] }, { "cell_type": "markdown", "id": "b52cc255-409c-4f4b-9d86-e95bdc887da2", "metadata": {}, "source": [ "# Load data" ] }, { "cell_type": "code", "execution_count": null, "id": "c4125362-08c9-454b-b3f0-eb991c94359a", "metadata": {}, "outputs": [], "source": [ "res = pd.read_excel(\"result-here\")\n", "res.head()" ] }, { "cell_type": "code", "execution_count": 1724, "id": "651aec9e-1141-4818-9a6a-60c7b4c17df2", "metadata": {}, "outputs": [], "source": [ "answers = res[\"Answer\"].tolist()\n", "labels = res[\"Label\"].tolist()\n", "n_thought = res[\"n_CoT\"].tolist()" ] }, { "cell_type": "markdown", "id": "c2cc7168-91ab-4942-b178-80c69d9d71f7", "metadata": {}, "source": [ "# Evaluate" ] }, { "cell_type": "markdown", "id": "b439a16d-6401-418e-9f12-63d692c06b31", "metadata": {}, "source": [ "## F1_score" ] }, { "cell_type": "code", "execution_count": 1725, "id": "8a88165d-ae20-47d1-b6ef-f6c3b2013993", "metadata": {}, "outputs": [], "source": [ "def precision(answer, label):\n", " answer_tokens = set(answer.lower().split())\n", " label_tokens = set(label.lower().split())\n", "\n", " intersection = answer_tokens & label_tokens\n", " precision = len(intersection) / len(answer_tokens) if len(answer_tokens) > 0 else 0\n", "\n", " return precision\n", "\n", "def recall(answer, label):\n", " answer_tokens = set(answer.lower().split())\n", " label_tokens = set(label.lower().split())\n", "\n", " intersection = answer_tokens & label_tokens\n", " recall = len(intersection) / len(label_tokens) if len(label_tokens) > 0 else 0\n", "\n", " return recall\n", "\n", "def f1(answer, label):\n", " prec = precision(answer, label)\n", " rec = recall(answer, label)\n", "\n", " if prec == 0 and rec == 0: return 0\n", " return 2*prec*rec/(prec+rec)\n", "\n", "def evaluate_f1(answers, labels):\n", " avg_f1 = []\n", " for i in range(len(answers)):\n", " f1_score = f1(answers[i], labels[i])\n", " avg_f1.append(f1_score)\n", " return mean(avg_f1)" ] }, { "cell_type": "code", "execution_count": null, "id": "a82a603f-480f-419c-bcbd-408578ee4bc5", "metadata": {}, "outputs": [], "source": [ "p, r = [], []\n", "for i in range(len(answers)):\n", " ans, lab = answers[i], labels[i]\n", " p.append(precision(ans, lab))\n", " r.append(recall(ans, lab))\n", "print(f\"Precision: {mean(p)}\")\n", "print(f\"Recall: {mean(r)}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "1a48ff64-2514-45f0-91e2-8698cf12e623", "metadata": {}, "outputs": [], "source": [ "evaluate_f1(answers, labels)" ] }, { "cell_type": "code", "execution_count": null, "id": "b63bfe40-35ac-440d-8a07-8cd11f0e8abf", "metadata": {}, "outputs": [], "source": [ "mean(n_thought)" ] }, { "cell_type": "markdown", "id": "a6f5e95f-2622-4e0f-9088-5ab5b66583ce", "metadata": {}, "source": [ "## GPT Score\n" ] }, { "cell_type": "markdown", "id": "0c76b795-e4ff-4f3f-9707-dc690aafc75f", "metadata": {}, "source": [ "### Initialize LLM" ] }, { "cell_type": "code", "execution_count": 1730, "id": "2a131281-51d9-41b6-ad30-309454d93a8e", "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer\n", "from langchain_community.llms import VLLMOpenAI\n", "from langchain_openai import ChatOpenAI" ] }, { "cell_type": "code", "execution_count": 1731, "id": "fd700efa-2e90-49e1-934c-e14e9f7357b4", "metadata": {}, "outputs": [], "source": [ "inference_server_url = \"your_inference_server_url\"\n", "tokenizer = AutoTokenizer.from_pretrained(\"Qwen2.5-7B-Instruct\")\n", "\n", "### For Chat OpenAI template\n", "llm = ChatOpenAI(\n", " model=\"Qwen2.5-7B-Instruct\",\n", " openai_api_key=\"test\",\n", " openai_api_base=inference_server_url,\n", " temperature=0,\n", " max_tokens=100,\n", " streaming= False\n", ")" ] }, { "cell_type": "markdown", "id": "97fba228-9846-486f-8d3f-8648afd42b27", "metadata": {}, "source": [ "### Metrics implementation" ] }, { "cell_type": "code", "execution_count": 1733, "id": "5f6dc8cd-9d93-4a05-a9ed-ec40d74b3097", "metadata": {}, "outputs": [], "source": [ "class Correctness(BaseModel):\n", " \"\"\"Correctness score ranges from 1-5 to evaluate whether the generated answer aligns with the reference answer\"\"\"\n", " correctness_score: int = Field(\n", " description=\"The correctness of generated answer compares to reference, score ranges from 1-5\"\n", " )\n", " \n", "class Faithfulness(BaseModel):\n", " \"\"\"Faithfulness score ranges from 1-5 to check whether the generated answer remains true to the given context\"\"\"\n", " faithfulness_score: int = Field(\n", " description=\"The generated answer remains true to the given context, score ranges from 1-5\"\n", " )\n", "\n", "class Relevancy(BaseModel):\n", " \"\"\"Relevancy score ranges from 1-5 to check whether the retrieved context and the generated answer relevant to the query\"\"\"\n", " relevancy_score: int = Field(\n", " description=\"The retrieved context and the generated answer relevant to the query, score ranges from 1-5\"\n", " )\n" ] }, { "cell_type": "code", "execution_count": 1734, "id": "da8d2984-2d1c-473e-8e8e-13e5dda6c6aa", "metadata": {}, "outputs": [], "source": [ "def correctness_evaluation(query, answer, label):\n", " system_prompt = (\n", " \"You are a judge. Your task is to evaluate whether the provided answer aligns with the label, given the query, \"\n", " \"by assigning a score strictly based on the following rubric (score must be 1, 2, 3, 4, or 5):\\n\\n\"\n", " \"Score Rubric:\\n\"\n", " \"1: If the generated answer is not relevant to the user query and reference label.\\n\"\n", " \"2: If the generated answer aligns with the reference label but is not relevant to the user query.\\n\"\n", " \"3: If the generated answer is relevant to the user query and reference label but contains mistakes.\\n\"\n", " \"4: If the generated answer is relevant to the user query and has the exact same metrics as the reference label, \"\n", " \"but it is not as concise.\\n\"\n", " \"5: If the generated answer is relevant to the user query and fully correct according to the reference label.\\n\\n\"\n", " \"Important Notes:\\n\"\n", " \"- Only evaluate based on commonalities between the answer and the label.\\n\"\n", " \"- Do not penalize for elements present in the label but missing in the answer.\\n\"\n", " \"\\n\"\n", " \"Only return the score (1, 2, 3, 4, or 5). Do not generate any other text, such as explanations or openings/closings.\"\n", " )\n", " chat_template_contextual = tokenizer.apply_chat_template(\n", " [\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": f\"\\nQuery: {query}\\nAnswer: {answer}\\nLabel: {label}\"}\n", " ],\n", " tokenize=False,\n", " add_generation_prompt=True\n", " )\n", " prompt_gen_answer = PromptTemplate(\n", " template=chat_template_contextual, \n", " input_variables=[\"system_prompt\", \"query\", \"answer\", \"label\"]\n", " )\n", " \n", " structured_check_content = llm.with_structured_output(Correctness)\n", " chain_gen_answer = prompt_gen_answer | structured_check_content\n", " final_score = chain_gen_answer.invoke({\n", " \"system_prompt\": system_prompt, \n", " \"query\": query, \n", " \"answer\": answer, \n", " \"label\": label\n", " }).correctness_score\n", " \n", " return final_score\n", "\n", "def faithfulness_evaluation(answer, context):\n", " system_prompt = (\n", " \"You are a judge. Your task is to evaluate whether the provided answer remains true and faithful \"\n", " \"to the given context by assigning a score strictly based on the following rubric:\\n\\n\"\n", " \"Score Rubric:\\n\"\n", " \"- Score 1: The answer is completely unfaithful and contradicts the context.\\n\"\n", " \"- Score 2: The answer contains mostly false information or is unsupported by the context, with only minor overlaps.\\n\"\n", " \"- Score 3: The answer is partially faithful, with some alignment to the context but contains notable inaccuracies.\\n\"\n", " \"- Score 4: The answer is mostly faithful to the context but may have minor inaccuracies or omissions.\\n\"\n", " \"- Score 5: The answer is completely faithful and aligns fully with the context.\\n\\n\"\n", " \"Important Notes:\\n\"\n", " \"- Only evaluate based on common elements between the answer and the context.\\n\"\n", " \"- Do not penalize the answer for missing elements that are present in the context but not in the answer.\\n\\n\"\n", " \"Only return the score (1, 2, 3, 4, or 5). Do not generate any additional text, such as explanations or openings/closings.\"\n", " )\n", " chat_template_contextual = tokenizer.apply_chat_template(\n", " [\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": f\"Answer: {answer}\\nContext: {context}\"}\n", " ],\n", " tokenize=False,\n", " add_generation_prompt=True\n", " )\n", " prompt_gen_answer = PromptTemplate(\n", " template=chat_template_contextual,\n", " input_variables=[\"system_prompt\", \"answer\", \"context\"]\n", " )\n", " structured_check_content = llm.with_structured_output(Faithfulness)\n", " chain_gen_answer = prompt_gen_answer | structured_check_content\n", " evaluation_score = chain_gen_answer.invoke({\n", " \"system_prompt\": system_prompt,\n", " \"answer\": answer,\n", " \"context\": context\n", " }).faithfulness_score\n", " \n", " return evaluation_score\n", "\n", "\n", "\n", "def relevancy_score(query, context, answer):\n", " system_prompt = (\n", " \"You are a judge. Your task is to evaluate the relevance of the retrieved context and the generated answer \"\n", " \"to the given query. Your evaluation must strictly follow the score rubric below:\\n\\n\"\n", " \"Score Rubric:\\n\"\n", " \"- Score 1: Both the retrieved context and generated answer are completely irrelevant to the query.\\n\"\n", " \"- Score 2: The retrieved context is somewhat related, but the generated answer is irrelevant to the query.\\n\"\n", " \"- Score 3: Both the retrieved context and generated answer are somewhat relevant to the query, but not precise.\\n\"\n", " \"- Score 4: The retrieved context and generated answer are mostly relevant to the query, with minor inaccuracies.\\n\"\n", " \"- Score 5: Both the retrieved context and generated answer are fully relevant and precisely aligned with the query.\\n\\n\"\n", " \"Important Notes:\\n\"\n", " \"- Only return the score (1, 2, 3, 4, or 5). Do not provide any additional text such as explanations, openings, or closings.\"\n", " )\n", " chat_template_contextual = tokenizer.apply_chat_template(\n", " [\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": f\"Query: {query}\\nContext: {context}\\nAnswer: {answer}\"}\n", " ],\n", " tokenize=False,\n", " add_generation_prompt=True\n", " )\n", " prompt_gen_answer = PromptTemplate(\n", " template=chat_template_contextual,\n", " input_variables=[\"system_prompt\", \"query\", \"context\", \"answer\"]\n", " )\n", " structured_check_content = llm.with_structured_output(Relevancy)\n", " chain_gen_answer = prompt_gen_answer | structured_check_content\n", " relevancy_result = chain_gen_answer.invoke({\n", " \"system_prompt\": system_prompt,\n", " \"query\": query,\n", " \"context\": context,\n", " \"answer\": answer\n", " }).relevancy_score\n", " \n", " return relevancy_result\n" ] }, { "cell_type": "markdown", "id": "7926b5c1-2342-476f-8bcd-99c1c2353128", "metadata": {}, "source": [ "### Execution" ] }, { "cell_type": "code", "execution_count": 1783, "id": "b3bdd014-d366-4d2d-bd7c-85d8459c48a4", "metadata": {}, "outputs": [], "source": [ "def run_evaluate(tasks):\n", " query, label, answer, context = tasks[0], tasks[1], tasks[2], tasks[3]\n", " try:\n", " corr = correctness_evaluation(query, answer, label)\n", " faith = faithfulness_evaluation(answer, context)\n", " rele = relevancy_score(query, context, answer)\n", " result = {\"Correctness\": corr, \"Faithfulness\": faith, \"Relevancy\":rele}\n", "\n", " return result\n", " except Exception as e:\n", " print(f\"Error occurred during processing question '{query}': {e}\")\n", " return None\n", " \n", " " ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" } }, "nbformat": 4, "nbformat_minor": 5 }