Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| import streamlit as st | |
| def display_model_config(config, title="Model Config"): | |
| """ | |
| Displays the model configuration and available models in a structured table format. | |
| Args: | |
| config (dict): Configuration dictionary containing model, max_tokens, temperature, and available_models. | |
| title (str): Title to display in the sidebar. | |
| """ | |
| st.sidebar.subheader(title) | |
| # Extract main model parameters into a DataFrame | |
| model_config_df = pd.DataFrame.from_dict( | |
| { | |
| "Parameter": ["Model", "Max Tokens", "Temperature"], | |
| "Value": [ | |
| config["model"], | |
| config["max_tokens"], | |
| config["temperature"], | |
| ], | |
| } | |
| ) | |
| # Display the main parameters as a table | |
| st.sidebar.table(model_config_df) | |
| # Extract and display available models | |
| st.sidebar.markdown("### Available Models") | |
| available_models_df = pd.DataFrame( | |
| {"Available Models": config["available_models"]} | |
| ) | |
| st.sidebar.table(available_models_df) | |
| def display_metrics_as_table(metrics): | |
| """ | |
| Convert evaluation metrics into a readable table and display it. | |
| Args: | |
| metrics (dict): Evaluation metrics. | |
| """ | |
| # Create a DataFrame for class-specific metrics | |
| class_metrics = [ | |
| {"Class": label, "Precision": data["precision"], "Recall": data["recall"], "F1-Score": data["f1-score"], "Support": data["support"]} | |
| for label, data in metrics.items() | |
| if label not in ["accuracy", "macro avg", "weighted avg"] | |
| ] | |
| class_df = pd.DataFrame(class_metrics) | |
| # Create a DataFrame for overall metrics | |
| overall_metrics = [ | |
| {"Metric": "Accuracy", "Value": metrics["accuracy"]}, | |
| {"Metric": "Macro Avg Precision", "Value": metrics["macro avg"]["precision"]}, | |
| {"Metric": "Macro Avg Recall", "Value": metrics["macro avg"]["recall"]}, | |
| {"Metric": "Macro Avg F1-Score", "Value": metrics["macro avg"]["f1-score"]}, | |
| {"Metric": "Weighted Avg Precision", "Value": metrics["weighted avg"]["precision"]}, | |
| {"Metric": "Weighted Avg Recall", "Value": metrics["weighted avg"]["recall"]}, | |
| {"Metric": "Weighted Avg F1-Score", "Value": metrics["weighted avg"]["f1-score"]}, | |
| ] | |
| overall_df = pd.DataFrame(overall_metrics) | |
| # Display the tables | |
| st.subheader("Class-Specific Metrics") | |
| st.dataframe(class_df) | |
| st.subheader("Overall Metrics") | |
| st.dataframe(overall_df) | |