File size: 1,500 Bytes
4ff79c6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
#
# SPDX-License-Identifier: Apache-2.0
from abc import ABC, abstractmethod
from typing import List, Optional
from pandas import DataFrame
class BaseEvaluationRunResult(ABC):
"""
Represents the results of an evaluation run.
"""
@abstractmethod
def to_pandas(self) -> "DataFrame":
"""
Creates a Pandas DataFrame containing the scores of each metric for every input sample.
:returns:
Pandas DataFrame with the scores.
"""
@abstractmethod
def score_report(self) -> "DataFrame":
"""
Transforms the results into a Pandas DataFrame with the aggregated scores for each metric.
:returns:
Pandas DataFrame with the aggregated scores.
"""
@abstractmethod
def comparative_individual_scores_report(
self, other: "BaseEvaluationRunResult", keep_columns: Optional[List[str]] = None
) -> "DataFrame":
"""
Creates a Pandas DataFrame with the scores for each metric in the results of two different evaluation runs.
The inputs to both evaluation runs is assumed to be the same.
:param other:
Results of another evaluation run to compare with.
:param keep_columns:
List of common column names to keep from the inputs of the evaluation runs to compare.
:returns:
Pandas DataFrame with the score comparison.
"""
|