Update src/streamlit_app.py
Browse files- src/streamlit_app.py +32 -11
src/streamlit_app.py
CHANGED
|
@@ -1060,22 +1060,22 @@ with tabs[4]:
|
|
| 1060 |
if "llm_result" not in st.session_state:
|
| 1061 |
st.session_state["llm_result"] = None
|
| 1062 |
|
| 1063 |
-
if st.button("Get AI Recommendation (
|
| 1064 |
summary = st.session_state.get("automl_summary", {})
|
| 1065 |
if not summary:
|
| 1066 |
st.warning("Please run AutoML first to generate context.")
|
| 1067 |
st.stop()
|
| 1068 |
try:
|
| 1069 |
-
|
| 1070 |
-
st.info("
|
| 1071 |
|
| 1072 |
-
|
| 1073 |
-
|
| 1074 |
|
| 1075 |
prompt = f"""
|
| 1076 |
You are an ML model tuning advisor.
|
| 1077 |
-
Based on this AutoML summary, suggest 3 concise steps
|
| 1078 |
-
if overfitting, underfitting, or data-quality issues are
|
| 1079 |
|
| 1080 |
Use case: {summary.get('use_case')}
|
| 1081 |
Target: {summary.get('target')}
|
|
@@ -1084,11 +1084,32 @@ with tabs[4]:
|
|
| 1084 |
Leaderboard: {summary.get('leaderboard')}
|
| 1085 |
"""
|
| 1086 |
|
| 1087 |
-
|
| 1088 |
-
|
| 1089 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1090 |
except Exception as e:
|
| 1091 |
-
|
|
|
|
|
|
|
|
|
|
| 1092 |
|
| 1093 |
# Persist output even after rerun
|
| 1094 |
if st.session_state["llm_result"]:
|
|
|
|
| 1060 |
if "llm_result" not in st.session_state:
|
| 1061 |
st.session_state["llm_result"] = None
|
| 1062 |
|
| 1063 |
+
if st.button("Get AI Recommendation (via HF API)", key="ai_reco"):
|
| 1064 |
summary = st.session_state.get("automl_summary", {})
|
| 1065 |
if not summary:
|
| 1066 |
st.warning("Please run AutoML first to generate context.")
|
| 1067 |
st.stop()
|
| 1068 |
try:
|
| 1069 |
+
import requests, json
|
| 1070 |
+
st.info("Contacting Hugging Face Inference API (Mixtral-8x7B-Instruct)…")
|
| 1071 |
|
| 1072 |
+
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 1073 |
+
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
|
| 1074 |
|
| 1075 |
prompt = f"""
|
| 1076 |
You are an ML model tuning advisor.
|
| 1077 |
+
Based on this AutoML summary, suggest 3 concise, actionable steps
|
| 1078 |
+
to improve model performance if overfitting, underfitting, or data-quality issues are observed.
|
| 1079 |
|
| 1080 |
Use case: {summary.get('use_case')}
|
| 1081 |
Target: {summary.get('target')}
|
|
|
|
| 1084 |
Leaderboard: {summary.get('leaderboard')}
|
| 1085 |
"""
|
| 1086 |
|
| 1087 |
+
payload = {
|
| 1088 |
+
"inputs": prompt,
|
| 1089 |
+
"parameters": {"max_new_tokens": 200, "temperature": 0.7}
|
| 1090 |
+
}
|
| 1091 |
+
|
| 1092 |
+
response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
|
| 1093 |
+
response.raise_for_status()
|
| 1094 |
+
result = response.json()
|
| 1095 |
+
|
| 1096 |
+
if isinstance(result, list) and "generated_text" in result[0]:
|
| 1097 |
+
text = result[0]["generated_text"]
|
| 1098 |
+
elif isinstance(result, dict) and "generated_text" in result:
|
| 1099 |
+
text = result["generated_text"]
|
| 1100 |
+
else:
|
| 1101 |
+
text = json.dumps(result, indent=2)
|
| 1102 |
+
|
| 1103 |
+
st.session_state["llm_result"] = text.strip()
|
| 1104 |
+
log("HF API recommendation generated successfully.")
|
| 1105 |
+
st.success("AI Recommendation (Mixtral-8x7B-Instruct):")
|
| 1106 |
+
st.markdown(st.session_state["llm_result"])
|
| 1107 |
+
|
| 1108 |
except Exception as e:
|
| 1109 |
+
err_msg = f"HF Inference API call failed: {e}"
|
| 1110 |
+
st.error(err_msg)
|
| 1111 |
+
log(err_msg)
|
| 1112 |
+
|
| 1113 |
|
| 1114 |
# Persist output even after rerun
|
| 1115 |
if st.session_state["llm_result"]:
|