singhn9 commited on
Commit
0604321
·
verified ·
1 Parent(s): a7bac14

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +29 -17
src/streamlit_app.py CHANGED
@@ -24,6 +24,12 @@ from sklearn.metrics import mean_squared_error, r2_score
24
 
25
  # SHAP
26
  import shap
 
 
 
 
 
 
27
 
28
 
29
  # -------------------------
@@ -954,7 +960,9 @@ with tabs[4]:
954
  tokenizer = AutoTokenizer.from_pretrained(tiny_llm_path)
955
  assistant = pipeline("text-generation", model=model, tokenizer=tokenizer)
956
  else:
957
- assistant = pipeline("text-generation", model="sshleifer/tiny-gpt2")
 
 
958
 
959
  llm_prompt = f"""
960
  You are a metallurgical process advisor working in a steel manufacturing unit.
@@ -964,7 +972,7 @@ with tabs[4]:
964
  {deltas.to_dict(orient='index')}
965
  Write a concise 3-line message to the operator suggesting what to adjust this shift.
966
  """
967
- resp = assistant(llm_prompt, max_new_tokens=80, do_sample=True, temperature=0.6)[0]["generated_text"]
968
  st.info(resp)
969
  log("Operator LLM advisory note generated successfully.")
970
  else:
@@ -1049,40 +1057,44 @@ with tabs[4]:
1049
  st.subheader("AI Recommendation Assistant (in-memory mode)")
1050
  st.caption("Generates quick local AI suggestions — no file writes required.")
1051
 
 
 
 
1052
  if st.button("Get AI Recommendation (tiny local LLM)", key="ai_reco"):
1053
  summary = st.session_state.get("automl_summary", {})
1054
  if not summary:
1055
  st.warning("Please run AutoML first to generate context.")
1056
  st.stop()
1057
-
1058
  try:
1059
  from transformers import pipeline
 
1060
 
1061
- st.info("Loading tiny model in-memory (ephemeral)...")
1062
- assistant = pipeline("text-generation", model="sshleifer/tiny-gpt2")
1063
 
1064
  prompt = f"""
1065
- You are an ML model tuning assistant.
1066
- Given this AutoML summary, provide 3 actionable steps for improvement
1067
- if overfitting, underfitting, or data quality issues are suspected.
1068
 
1069
  Use case: {summary.get('use_case')}
1070
  Target: {summary.get('target')}
1071
  Final R²: {summary.get('final_r2')}
1072
  Final RMSE: {summary.get('final_rmse')}
1073
  Leaderboard: {summary.get('leaderboard')}
1074
-
1075
- Respond in concise numbered steps.
1076
  """
1077
 
1078
- result = assistant(prompt, max_new_tokens=90, temperature=0.7, do_sample=True)[0]["generated_text"]
1079
- st.success("LLM Recommendation:")
1080
- st.markdown(result)
1081
- log("Tiny LLM in-memory advisory generated successfully.")
1082
-
1083
  except Exception as e:
1084
- st.error(f"LLM generation failed: {e}")
1085
- st.info("Make sure `transformers` is installed in your Space environment.")
 
 
 
 
 
1086
 
1087
  # ----- Target & Business Impact tab
1088
  with tabs[5]:
 
24
 
25
  # SHAP
26
  import shap
27
+ if "llm_result" not in st.session_state:
28
+ st.session_state["llm_result"] = None
29
+ if "automl_summary" not in st.session_state:
30
+ st.session_state["automl_summary"] = {}
31
+ if "shap_recommendations" not in st.session_state:
32
+ st.session_state["shap_recommendations"] = []
33
 
34
 
35
  # -------------------------
 
960
  tokenizer = AutoTokenizer.from_pretrained(tiny_llm_path)
961
  assistant = pipeline("text-generation", model=model, tokenizer=tokenizer)
962
  else:
963
+ assistant = pipeline("text2text-generation", model="google/flan-t5-small")
964
+
965
+
966
 
967
  llm_prompt = f"""
968
  You are a metallurgical process advisor working in a steel manufacturing unit.
 
972
  {deltas.to_dict(orient='index')}
973
  Write a concise 3-line message to the operator suggesting what to adjust this shift.
974
  """
975
+ resp = assistant(llm_prompt, max_new_tokens=120)[0]["generated_text"]
976
  st.info(resp)
977
  log("Operator LLM advisory note generated successfully.")
978
  else:
 
1057
  st.subheader("AI Recommendation Assistant (in-memory mode)")
1058
  st.caption("Generates quick local AI suggestions — no file writes required.")
1059
 
1060
+ if "llm_result" not in st.session_state:
1061
+ st.session_state["llm_result"] = None
1062
+
1063
  if st.button("Get AI Recommendation (tiny local LLM)", key="ai_reco"):
1064
  summary = st.session_state.get("automl_summary", {})
1065
  if not summary:
1066
  st.warning("Please run AutoML first to generate context.")
1067
  st.stop()
 
1068
  try:
1069
  from transformers import pipeline
1070
+ st.info("Loading compact instruction-tuned model (in-memory)...")
1071
 
1072
+
1073
+ assistant = pipeline("text2text-generation", model="google/flan-t5-small")
1074
 
1075
  prompt = f"""
1076
+ You are an ML model tuning advisor.
1077
+ Based on this AutoML summary, suggest 3 concise steps to improve performance
1078
+ if overfitting, underfitting, or data-quality issues are seen.
1079
 
1080
  Use case: {summary.get('use_case')}
1081
  Target: {summary.get('target')}
1082
  Final R²: {summary.get('final_r2')}
1083
  Final RMSE: {summary.get('final_rmse')}
1084
  Leaderboard: {summary.get('leaderboard')}
 
 
1085
  """
1086
 
1087
+ result = assistant(prompt, max_new_tokens=100)[0]["generated_text"]
1088
+ st.session_state["llm_result"] = result
1089
+ log("LLM in-memory recommendation generated successfully.")
 
 
1090
  except Exception as e:
1091
+ st.session_state["llm_result"] = f" LLM generation failed: {e}"
1092
+
1093
+ # Persist output even after rerun
1094
+ if st.session_state["llm_result"]:
1095
+ st.success("AI Recommendation (cached):")
1096
+ st.markdown(st.session_state["llm_result"])
1097
+
1098
 
1099
  # ----- Target & Business Impact tab
1100
  with tabs[5]: