mrob937's picture
Upload app.py with huggingface_hub
ec1108b verified
import math
import gradio
import pandas
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Model
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline(
task="text-generation",
model=AutoModelForCausalLM.from_pretrained(MODEL_ID),
tokenizer=tokenizer
)
# --- Core calculation ---
def conduction_calc(L_m: float, A_m2: float, k_WmK: float, dT_C: float) -> dict:
'''
Steady 1D heat conduction through a flat wall.
Inputs:
L_m: wall thickness [m]
A_m2: area [m²]
k_WmK: thermal conductivity [W/m·K]
dT_C: temperature difference [°C or K]
Returns heat transfer rate, heat flux, and thermal resistance.
'''
q_W = k_WmK * A_m2 * dT_C / L_m # Heat transfer rate
q_flux = k_WmK * dT_C / L_m # Heat flux
R = L_m / (k_WmK * A_m2) # Thermal resistance
return dict(
results={
"q_W": q_W,
"q_flux": q_flux,
"R_KperW": R,
}
)
# Chat formatting helper
def _format_chat(system_prompt: str, user_prompt: str) -> str:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
template = getattr(tokenizer, "chat_template", None)
return tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
def _llm_generate(prompt: str, max_tokens: int) -> str:
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=True,
temperature=0.5,
return_full_text=False,
)
return out[0]["generated_text"]
# --- LLM Explanation ---
def llm_explain(results: dict, inputs: list) -> str:
L_m, A_m2, k_WmK, dT_C = inputs
r = results["results"]
system_prompt = (
"You explain engineering to a smart 5-year-old. "
"Use food-based analogies to support the explanation. "
"Always return CONCISE one-sentence answers."
)
user_prompt = (
f"A wall of thickness {L_m:.3f} m, area {A_m2:.2f} m², "
f"and thermal conductivity {k_WmK:.2f} W/m·K has a temperature difference of {dT_C:.1f} °C.\n"
f"The heat transfer rate is {r['q_W']:.2f} W, "
f"the heat flux is {r['q_flux']:.2f} W/m², "
f"and the thermal resistance is {r['R_KperW']:.5f} K/W.\n"
"Explain these results in ONE friendly analogy-based sentence for a non-expert."
)
formatted = _format_chat(system_prompt, user_prompt)
return _llm_generate(formatted, max_tokens=128)
# --- Runner ---
def run_once(L_m, A_m2, k_WmK, dT_C):
inputs = [L_m, A_m2, k_WmK, dT_C]
d = conduction_calc(
L_m=float(L_m),
A_m2=float(A_m2),
k_WmK=float(k_WmK),
dT_C=float(dT_C),
)
df = pandas.DataFrame([{
"q [W]": round(d["results"]["q_W"], 3),
"q'' [W/m²]": round(d["results"]["q_flux"], 3),
"R_th [K/W]": round(d["results"]["R_KperW"], 5),
}])
narrative = llm_explain(d, inputs).split("\n")[0]
return df, narrative
# --- UI ---
with gradio.Blocks() as demo:
gradio.Markdown("# Heat Conduction Calculator")
gradio.Markdown("This app computes steady heat conduction through a flat wall and explains the results in simple terms.")
with gradio.Row():
L_m = gradio.Number(value=0.2, label="Wall thickness [m]")
A_m2 = gradio.Number(value=1.0, label="Wall area [m²]")
k_WmK = gradio.Number(value=0.8, label="Thermal conductivity [W/m·K]")
dT_C = gradio.Number(value=30.0, label="Temperature difference [°C]")
run_btn = gradio.Button("Compute")
results_df = gradio.Dataframe(label="Numerical results", interactive=False)
explain_md = gradio.Markdown(label="Explanation")
run_btn.click(fn=run_once, inputs=[L_m, A_m2, k_WmK, dT_C], outputs=[results_df, explain_md])
gradio.Examples(
examples=[
[0.2, 1.0, 0.8, 30.0], # Brick wall
[0.05, 0.5, 0.04, 20.0], # Styrofoam insulation
[0.01, 0.2, 205.0, 50.0] # Aluminum sheet
],
inputs=[L_m, A_m2, k_WmK, dT_C],
label="Representative cases",
examples_per_page=3,
cache_examples=False,
)
if __name__ == "__main__":
demo.launch(debug=True)