OmarOmar91 commited on
Commit
610886f
·
verified ·
1 Parent(s): f061325

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +244 -24
app.py CHANGED
@@ -1,7 +1,134 @@
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def respond(
6
  message,
7
  history: list[dict[str, str]],
@@ -12,59 +139,152 @@ def respond(
12
  hf_token: gr.OAuthToken,
13
  ):
14
  """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
16
  """
17
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
  messages = [{"role": "system", "content": system_message}]
20
-
21
  messages.extend(history)
22
-
23
  messages.append({"role": "user", "content": message})
24
 
25
  response = ""
26
-
27
- for message in client.chat_completion(
28
  messages,
29
  max_tokens=max_tokens,
30
  stream=True,
31
  temperature=temperature,
32
  top_p=top_p,
33
  ):
34
- choices = message.choices
35
  token = ""
36
  if len(choices) and choices[0].delta.content:
37
  token = choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
  respond,
48
  type="messages",
49
  additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
 
51
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
  ],
61
  )
62
 
63
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  with gr.Sidebar():
 
65
  gr.LoginButton()
66
- chatbot.render()
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
1
+ # app.py — Self-Sensing Concrete (Prediction + Literature Q&A)
2
+ # --------------------------------------------------------------------------------------
3
+ # Tab 1: Your Stress GF predictor UI (unchanged core logic)
4
+ # Tab 2: Original Gradio ChatInterface template (HF Inference API)
5
+ # --------------------------------------------------------------------------------------
6
+
7
+ import os
8
+ import joblib
9
+ import numpy as np
10
+ import pandas as pd
11
  import gradio as gr
12
  from huggingface_hub import InferenceClient
13
 
14
+ # ========================= Predictor (your code) =========================
15
+ CF_COL = "Conductive Filler Conc. (wt%)"
16
+ TARGET_COL = "Stress GF (MPa-1)"
17
+
18
+ MAIN_VARIABLES = [
19
+ "Filler 1 Type","Filler 1 Diameter (µm)","Filler 1 Length (mm)",
20
+ CF_COL,
21
+ "Filler 1 Dimensionality","Filler 2 Type","Filler 2 Diameter (µm)","Filler 2 Length (mm)",
22
+ "Filler 2 Dimensionality","Specimen Volume (mm3)","Probe Count","Probe Material",
23
+ "W/B","S/B","Gauge Length (mm)","Curing Condition","Number of Fillers",
24
+ "Drying Temperature (°C)","Drying Duration (hr)","Loading Rate (MPa/s)",
25
+ "Modulus of Elasticity (GPa)","Current Type","Applied Voltage (V)"
26
+ ]
27
+ NUMERIC_COLS = {
28
+ "Filler 1 Diameter (µm)","Filler 1 Length (mm)", CF_COL,
29
+ "Filler 2 Diameter (µm)","Filler 2 Length (mm)","Specimen Volume (mm3)","Probe Count",
30
+ "W/B","S/B","Gauge Length (mm)","Number of Fillers",
31
+ "Drying Temperature (°C)","Drying Duration (hr)","Loading Rate (MPa/s)",
32
+ "Modulus of Elasticity (GPa)","Applied Voltage (V)"
33
+ }
34
+ CATEGORICAL_COLS = {
35
+ "Filler 1 Type","Filler 1 Dimensionality","Filler 2 Type","Filler 2 Dimensionality",
36
+ "Probe Material","Curing Condition","Current Type"
37
+ }
38
+ DIM_CHOICES = ["0D", "1D", "2D", "3D", "NA"]
39
+ CURRENT_CHOICES = ["DC", "AC", "NA"]
40
+
41
+ MODEL_CANDIDATES = [
42
+ "stress_gf_xgb.joblib",
43
+ "models/stress_gf_xgb.joblib",
44
+ "/home/user/app/stress_gf_xgb.joblib",
45
+ ]
46
+
47
+ def _load_model_or_error():
48
+ for p in MODEL_CANDIDATES:
49
+ if os.path.exists(p):
50
+ try:
51
+ return joblib.load(p)
52
+ except Exception as e:
53
+ return f"Could not load model from {p}: {e}"
54
+ return ("Model file not found. Upload your trained pipeline as "
55
+ "`stress_gf_xgb.joblib` (or put it in `models/`).")
56
+
57
+ def _coerce_to_row(form_dict: dict) -> pd.DataFrame:
58
+ row = {}
59
+ for col in MAIN_VARIABLES:
60
+ v = form_dict.get(col, None)
61
+ if col in NUMERIC_COLS:
62
+ if v in ("", None):
63
+ row[col] = np.nan
64
+ else:
65
+ try:
66
+ row[col] = float(v)
67
+ except:
68
+ row[col] = np.nan
69
+ else:
70
+ # treat "NA" as empty category so imputers/OneHot handle it
71
+ row[col] = "" if v in (None, "NA") else str(v).strip()
72
+ return pd.DataFrame([row], columns=MAIN_VARIABLES)
73
+
74
+ def predict_fn(**kwargs):
75
+ mdl = _load_model_or_error()
76
+ if isinstance(mdl, str):
77
+ return mdl
78
+
79
+ X_new = _coerce_to_row(kwargs)
80
+ try:
81
+ y_log = mdl.predict(X_new) # model predicts log1p(target)
82
+ y = float(np.expm1(y_log)[0]) # back to original scale MPa^-1
83
+ if -1e-10 < y < 0: # clip tiny negative noise
84
+ y = 0.0
85
+ return y
86
+ except Exception as e:
87
+ return f"Prediction error: {e}"
88
+
89
+ EXAMPLE = {
90
+ "Filler 1 Type": "CNT",
91
+ "Filler 1 Dimensionality": "1D",
92
+ "Filler 1 Diameter (µm)": 0.02,
93
+ "Filler 1 Length (mm)": 1.2,
94
+ CF_COL: 0.5,
95
+ "Filler 2 Type": "",
96
+ "Filler 2 Dimensionality": "NA",
97
+ "Filler 2 Diameter (µm)": None,
98
+ "Filler 2 Length (mm)": None,
99
+ "Specimen Volume (mm3)": 1000,
100
+ "Probe Count": 2,
101
+ "Probe Material": "Copper",
102
+ "W/B": 0.4,
103
+ "S/B": 2.5,
104
+ "Gauge Length (mm)": 20,
105
+ "Curing Condition": "28d water, 20°C",
106
+ "Number of Fillers": 1,
107
+ "Drying Temperature (°C)": 60,
108
+ "Drying Duration (hr)": 24,
109
+ "Loading Rate (MPa/s)": 0.1,
110
+ "Modulus of Elasticity (GPa)": 25,
111
+ "Current Type": "DC",
112
+ "Applied Voltage (V)": 5.0,
113
+ }
114
+
115
+ def _fill_example():
116
+ return [EXAMPLE.get(k, None) for k in MAIN_VARIABLES]
117
 
118
+ def _clear_all():
119
+ cleared = []
120
+ for col in MAIN_VARIABLES:
121
+ if col in NUMERIC_COLS:
122
+ cleared.append(None)
123
+ elif col in {"Filler 1 Dimensionality","Filler 2 Dimensionality"}:
124
+ cleared.append("NA")
125
+ elif col == "Current Type":
126
+ cleared.append("NA")
127
+ else:
128
+ cleared.append("")
129
+ return cleared
130
+
131
+ # ========================= Chat (original template) =========================
132
  def respond(
133
  message,
134
  history: list[dict[str, str]],
 
139
  hf_token: gr.OAuthToken,
140
  ):
141
  """
142
+ Unmodified template logic; you can swap the model below to your preferred one
143
+ (or rewrite to call your own RAG stack instead of Inference API).
144
  """
145
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
 
146
  messages = [{"role": "system", "content": system_message}]
 
147
  messages.extend(history)
 
148
  messages.append({"role": "user", "content": message})
149
 
150
  response = ""
151
+ for msg in client.chat_completion(
 
152
  messages,
153
  max_tokens=max_tokens,
154
  stream=True,
155
  temperature=temperature,
156
  top_p=top_p,
157
  ):
158
+ choices = msg.choices
159
  token = ""
160
  if len(choices) and choices[0].delta.content:
161
  token = choices[0].delta.content
 
162
  response += token
163
  yield response
164
 
165
+ chat_ui = gr.ChatInterface(
 
 
 
 
166
  respond,
167
  type="messages",
168
  additional_inputs=[
169
+ gr.Textbox(value="You are a materials-science assistant for self-sensing composites. Cite sources when possible.",
170
+ label="System message"),
171
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
172
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
173
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
174
  ],
175
  )
176
 
177
+ # ========================= Combined UI =========================
178
+ CSS = """
179
+ .gradio-container {background: linear-gradient(135deg,#0f172a 0%, #1f2937 60%, #334155 100%);}
180
+ * {font-family: ui-sans-serif, system-ui, -apple-system, 'Segoe UI', Roboto, 'Helvetica Neue', Arial;}
181
+ .card {background: rgba(255,255,255,0.04) !important; border: 1px solid rgba(255,255,255,0.08);}
182
+ label.svelte-1ipelgc {color: #e5e7eb !important;}
183
+ """
184
+
185
+ theme = gr.themes.Soft(primary_hue="blue", neutral_hue="slate").set(
186
+ body_background_fill="#0f172a",
187
+ body_text_color="#e5e7eb",
188
+ input_background_fill="#111827",
189
+ input_border_color="#1f2937",
190
+ button_primary_background_fill="#3b82f6",
191
+ button_primary_text_color="#ffffff",
192
+ button_secondary_background_fill="#111827",
193
+ )
194
+
195
+ with gr.Blocks(css=CSS, theme=theme, fill_height=True) as demo:
196
  with gr.Sidebar():
197
+ gr.Markdown("### Sign in for Inference API")
198
  gr.LoginButton()
 
199
 
200
+ gr.Markdown(
201
+ "<h1 style='margin:0'>Self-Sensing Materials Assistant</h1>"
202
+ "<p style='opacity:.9'>Predict stress gauge factor or ask literature questions.</p>"
203
+ )
204
+
205
+ with gr.Tabs():
206
+ # ---------------- Tab 1: Prediction ----------------
207
+ with gr.Tab("Predict Stress Gauge Factor"):
208
+ with gr.Row():
209
+ with gr.Column(scale=7):
210
+ with gr.Accordion("Primary conductive filler", open=True, elem_classes=["card"]):
211
+ f1_type = gr.Textbox(label="Filler 1 Type", placeholder="e.g., CNT, Graphite, Steel fiber")
212
+ f1_diam = gr.Number(label="Filler 1 Diameter (µm)")
213
+ f1_len = gr.Number(label="Filler 1 Length (mm)")
214
+ cf_conc = gr.Number(label=f"{CF_COL}", info="Weight percent of total binder")
215
+ f1_dim = gr.Dropdown(DIM_CHOICES, value="NA", label="Filler 1 Dimensionality")
216
+
217
+ with gr.Accordion("➕ Secondary filler (optional)", open=False, elem_classes=["card"]):
218
+ f2_type = gr.Textbox(label="Filler 2 Type", placeholder="Optional")
219
+ f2_diam = gr.Number(label="Filler 2 Diameter (µm)")
220
+ f2_len = gr.Number(label="Filler 2 Length (mm)")
221
+ f2_dim = gr.Dropdown(DIM_CHOICES, value="NA", label="Filler 2 Dimensionality")
222
+
223
+ with gr.Accordion("Mix design & specimen", open=False, elem_classes=["card"]):
224
+ spec_vol = gr.Number(label="Specimen Volume (mm3)")
225
+ probe_cnt = gr.Number(label="Probe Count")
226
+ probe_mat = gr.Textbox(label="Probe Material", placeholder="e.g., Copper, Silver paste")
227
+ wb = gr.Number(label="W/B")
228
+ sb = gr.Number(label="S/B")
229
+ gauge_len = gr.Number(label="Gauge Length (mm)")
230
+ curing = gr.Textbox(label="Curing Condition", placeholder="e.g., 28d water, 20°C")
231
+ n_fillers = gr.Number(label="Number of Fillers")
232
+
233
+ with gr.Accordion("Processing", open=False, elem_classes=["card"]):
234
+ dry_temp = gr.Number(label="Drying Temperature (°C)")
235
+ dry_hrs = gr.Number(label="Drying Duration (hr)")
236
+
237
+ with gr.Accordion("Mechanical & electrical loading", open=False, elem_classes=["card"]):
238
+ load_rate = gr.Number(label="Loading Rate (MPa/s)")
239
+ E_mod = gr.Number(label="Modulus of Elasticity (GPa)")
240
+ current = gr.Dropdown(CURRENT_CHOICES, value="NA", label="Current Type")
241
+ voltage = gr.Number(label="Applied Voltage (V)")
242
+
243
+ with gr.Column(scale=5):
244
+ with gr.Group(elem_classes=["card"]):
245
+ out_pred = gr.Number(label="Predicted Stress GF (MPa-1)", precision=6)
246
+ with gr.Row():
247
+ btn_pred = gr.Button("Predict", variant="primary")
248
+ btn_clear = gr.Button("Clear")
249
+ btn_demo = gr.Button("Fill Example")
250
+
251
+ with gr.Accordion("About this model", open=False, elem_classes=["card"]):
252
+ gr.Markdown(
253
+ "- Pipeline: ColumnTransformer → (RobustScaler + OneHot) → XGBoost\n"
254
+ "- Target: Stress GF (MPa⁻¹) on original scale (trained with log1p).\n"
255
+ "- Missing values are safely imputed per-feature.\n"
256
+ f"- Trained columns: `{', '.join(MAIN_VARIABLES)}`"
257
+ )
258
+
259
+ # IMPORTANT: inputs order must match MAIN_VARIABLES exactly:
260
+ inputs_in_order = [
261
+ # MAIN_VARIABLES order:
262
+ # "Filler 1 Type","Filler 1 Diameter (µm)","Filler 1 Length (mm)", CF_COL,
263
+ # "Filler 1 Dimensionality","Filler 2 Type","Filler 2 Diameter (µm)","Filler 2 Length (mm)",
264
+ # "Filler 2 Dimensionality","Specimen Volume (mm3)","Probe Count","Probe Material",
265
+ # "W/B","S/B","Gauge Length (mm)","Curing Condition","Number of Fillers",
266
+ # "Drying Temperature (°C)","Drying Duration (hr)","Loading Rate (MPa/s)",
267
+ # "Modulus of Elasticity (GPa)","Current Type","Applied Voltage (V)"
268
+ f1_type, f1_diam, f1_len, cf_conc,
269
+ f1_dim, f2_type, f2_diam, f2_len,
270
+ f2_dim, spec_vol, probe_cnt, probe_mat,
271
+ wb, sb, gauge_len, curing, n_fillers,
272
+ dry_temp, dry_hrs, load_rate,
273
+ E_mod, current, voltage
274
+ ]
275
+
276
+ def _predict_wrapper(*vals):
277
+ data = {k: v for k, v in zip(MAIN_VARIABLES, vals)}
278
+ return predict_fn(**data)
279
+
280
+ btn_pred.click(_predict_wrapper, inputs=inputs_in_order, outputs=out_pred)
281
+ btn_clear.click(lambda: _clear_all(), inputs=None, outputs=inputs_in_order)
282
+ btn_demo.click(lambda: _fill_example(), inputs=None, outputs=inputs_in_order)
283
+
284
+ # ---------------- Tab 2: Literature Q&A (template) ----------------
285
+ with gr.Tab("Ask the Literature (Chat)"):
286
+ chat_ui.render()
287
 
288
+ # ------------- Launch -------------
289
  if __name__ == "__main__":
290
+ demo.queue().launch()