File size: 2,503 Bytes
630fa04
 
 
f0d635f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630fa04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import pandas as pd
import streamlit as st

def display_model_config(config, title="Model Config"):
    """
    Displays the model configuration and available models in a structured table format.

    Args:
        config (dict): Configuration dictionary containing model, max_tokens, temperature, and available_models.
        title (str): Title to display in the sidebar.
    """
    st.sidebar.subheader(title)

    # Extract main model parameters into a DataFrame
    model_config_df = pd.DataFrame.from_dict(
        {
            "Parameter": ["Model", "Max Tokens", "Temperature"],
            "Value": [
                config["model"],
                config["max_tokens"],
                config["temperature"],
            ],
        }
    )

    # Display the main parameters as a table
    st.sidebar.table(model_config_df)

    # Extract and display available models
    st.sidebar.markdown("### Available Models")
    available_models_df = pd.DataFrame(
        {"Available Models": config["available_models"]}
    )
    st.sidebar.table(available_models_df)


def display_metrics_as_table(metrics):
    """
    Convert evaluation metrics into a readable table and display it.
    
    Args:
        metrics (dict): Evaluation metrics.
    """
    # Create a DataFrame for class-specific metrics
    class_metrics = [
        {"Class": label, "Precision": data["precision"], "Recall": data["recall"], "F1-Score": data["f1-score"], "Support": data["support"]}
        for label, data in metrics.items()
        if label not in ["accuracy", "macro avg", "weighted avg"]
    ]
    class_df = pd.DataFrame(class_metrics)

    # Create a DataFrame for overall metrics
    overall_metrics = [
        {"Metric": "Accuracy", "Value": metrics["accuracy"]},
        {"Metric": "Macro Avg Precision", "Value": metrics["macro avg"]["precision"]},
        {"Metric": "Macro Avg Recall", "Value": metrics["macro avg"]["recall"]},
        {"Metric": "Macro Avg F1-Score", "Value": metrics["macro avg"]["f1-score"]},
        {"Metric": "Weighted Avg Precision", "Value": metrics["weighted avg"]["precision"]},
        {"Metric": "Weighted Avg Recall", "Value": metrics["weighted avg"]["recall"]},
        {"Metric": "Weighted Avg F1-Score", "Value": metrics["weighted avg"]["f1-score"]},
    ]
    overall_df = pd.DataFrame(overall_metrics)

    # Display the tables
    st.subheader("Class-Specific Metrics")
    st.dataframe(class_df)

    st.subheader("Overall Metrics")
    st.dataframe(overall_df)