hhh-test / pages /step3.py
github-actions[bot]
Deploy from GitHub Actions (commit: eb2cb1538d89b3093b6b424824dd9aecfc99086b)
cff1e0e
# web/pages/step3.py
import streamlit as st
from pages.step3_left import render_step3_left
from pages.step3_right import render_step3_right
# web/pages/step3.py
def render_step3():
st.header("Step 3: Select Your Metrics")
st.markdown("Choose which metrics and models you want to use for evaluation.")
st.session_state.setdefault("use_openai", True)
st.session_state.setdefault("use_hf", False)
col_left, col_right = st.columns([1, 1])
with col_left:
selected_metrics = render_step3_left() # writes st.session_state.selected_metrics
with col_right:
render_step3_right() # right pane manages refined subset
use_openai = st.session_state.get("use_openai", True)
use_hf = st.session_state.get("use_hf", False)
col1, col2 = st.columns(2)
with col1:
if st.button("← Back", use_container_width=True):
st.session_state.step = 2
st.rerun()
with col2:
can_go = (
bool(selected_metrics) and
st.session_state.get("conversation_uploaded", False) and
(use_openai or use_hf)
)
if st.button("Start Evaluation →", type="primary", use_container_width=True, disabled=not can_go):
# Snapshot refined subset for Step 4
from core.workflow import filter_refined_metrics
refined = st.session_state.get("refined")
allowed = st.session_state.get("allowed_refined_metric_names", [])
if refined:
st.session_state.profile_refined_subset = filter_refined_metrics(refined, allowed)
# continue to results
st.session_state.step = 4
st.rerun()