RAG-evaluation / app.py
pgurazada1's picture
Update app.py
21b9413 verified
import os
import gradio as gr
from openai import AzureOpenAI
client = AzureOpenAI(
api_key=os.environ["AZURE_OPENAI_KEY"],
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
api_version="2024-02-01"
)
model_name = "gpt-4o-mini"
groundedness_rater_system_message = """
You are tasked with rating AI generated answers to questions posed by users.
You will be presented a question, context used by the AI system to generate the answer and an AI generated answer to the question.
In the input, the question will begin with ###Question, the context will begin with ###Context while the AI generated answer will begin with ###Answer.
Evaluation criteria:
The task is to judge the extent to which the metric is followed by the answer.
1 - The metric is not followed at all
2 - The metric is followed only to a limited extent
3 - The metric is followed to a good extent
4 - The metric is followed mostly
5 - The metric is followed completely
Metric:
The answer should be derived only from the information presented in the context
Instructions:
1. First write down the steps that are needed to evaluate the answer as per the metric.
2. Give a step-by-step explanation if the answer adheres to the metric considering the question and context as the input.
3. Next, evaluate the extent to which the metric is followed.
4. Use the previous information to rate the answer using the evaluaton criteria and assign a score.
"""
relevance_rater_system_message = """
You are tasked with rating AI generated answers to questions posed by users.
You will be presented a question, context used by the AI system to generate the answer and an AI generated answer to the question.
In the input, the question will begin with ###Question, the context will begin with ###Context while the AI generated answer will begin with ###Answer.
Evaluation criteria:
The task is to judge the extent to which the metric is followed by the answer.
1 - The metric is not followed at all
2 - The metric is followed only to a limited extent
3 - The metric is followed to a good extent
4 - The metric is followed mostly
5 - The metric is followed completely
Metric:
Relevance measures how well the answer addresses the main aspects of the question, based on the context.
Consider whether all and only the important aspects are contained in the answer when evaluating relevance.
Instructions:
1. First write down the steps that are needed to evaluate the context as per the metric.
2. Give a step-by-step explanation if the context adheres to the metric considering the question as the input.
3. Next, evaluate the extent to which the metric is followed.
4. Use the previous information to rate the context using the evaluaton criteria and assign a score.
"""
user_message_template = """
###Question
{question}
###Context
{context}
###Answer
{answer}
"""
def predict(rag_question, rag_context, rag_answer):
groundedness_prompt = [
{'role':'system', 'content': groundedness_rater_system_message},
{'role': 'user', 'content': user_message_template.format(
question=rag_question,
context=rag_context,
answer=rag_answer
)
}
]
relevance_prompt = [
{'role':'system', 'content': relevance_rater_system_message},
{'role': 'user', 'content': user_message_template.format(
question=rag_question,
context=rag_context,
answer=rag_answer
)
}
]
try:
groundedness_response = client.chat.completions.create(
model=model_name,
messages=groundedness_prompt,
temperature=0
)
groundedness_prediction = groundedness_response.choices[0].message.content
relevance_response = client.chat.completions.create(
model=model_name,
messages=relevance_prompt,
temperature=0
)
relevance_prediction = relevance_response.choices[0].message.content
except Exception as e:
prediction = e
return groundedness_prediction + '\n' + '---' + '\n' + relevance_prediction
rag_question = gr.Textbox(placeholder="Enter your query here", lines=6)
rag_context = gr.Textbox(placeholder="Enter the retrieved context here", lines=6)
rag_answer = gr.Textbox(placeholder="Enter the LLM response here", lines=6)
demo = gr.Interface(
inputs=[rag_question, rag_context, rag_answer], fn=predict, outputs="text",
title="Evaluate RAG output for groundedness and relevance",
description="This web API presents an interface to evaluate RAG output for groundedness and relevance",
examples=[[["What was the increase in annual revenue in 2022 compared to 2021?"],
["Here are some documents that are relevant to the question mentioned below. In 2022, we recognized total revenues of $81.46 billion, respectively, representing an increase of $27.64 billion, compared to the prior year. We continue to ramp production, build new manufacturing capacity and expand our operations to enable increased deliveries and deployments of our products and further revenue growth."],
["$27.64 billion."]]],
cache_examples=False,
concurrency_limit=16
)
demo.queue()
demo.launch(auth=("demouser", os.getenv('PASSWD')))