|
|
import math |
|
|
|
|
|
import gradio |
|
|
import pandas |
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
|
|
|
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct" |
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
|
|
pipe = pipeline( |
|
|
task="text-generation", |
|
|
model=AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
), |
|
|
tokenizer=tokenizer |
|
|
) |
|
|
|
|
|
|
|
|
def expansion_calc(L0: float, alpha: float, Temp0: float, Temp1: float, Clearance: float) -> dict: |
|
|
""" |
|
|
1D thermal expansion. Given inputs: L0 = original length of rod. |
|
|
alpha = coefficient of thermal expansion. Temp0 = initial temperature. |
|
|
Temp1 = final temperature. Clearance = length the rod is able to expand before failure. |
|
|
Returns final length of the rod, and determines whether it is safely within clearance. |
|
|
""" |
|
|
|
|
|
if Temp1 < Temp0: |
|
|
print("Error: Final temperature is lower than initial temperature.") |
|
|
return |
|
|
elif L0 < 0: |
|
|
print("Error: Initial Length must be greater than 0") |
|
|
return |
|
|
elif alpha < 0: |
|
|
print("Error: Coefficient of thermal expansion must be greater than 0") |
|
|
return |
|
|
elif Clearance < 0: |
|
|
print("Error: Clearance must be greater than 0") |
|
|
return |
|
|
|
|
|
|
|
|
dT = Temp1 - Temp0 |
|
|
convert_alpha = 1e-6 * alpha |
|
|
convert_Clearance = Clearance * 1e-3 |
|
|
dL = L0*convert_alpha*dT |
|
|
L1 = L0+dL |
|
|
expansion_ok = dL < convert_Clearance |
|
|
|
|
|
return dict( |
|
|
results={ |
|
|
"Final_Length": L1, |
|
|
"Change_in_Length": dL, |
|
|
}, |
|
|
verdict={ |
|
|
"passes_expansion": bool(expansion_ok), |
|
|
"service_message": ( |
|
|
"OK: deflection is less than limit" if expansion_ok |
|
|
else "Not OK: deflection is greater than limit" |
|
|
), |
|
|
}, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def _format_chat(system_prompt: str, user_prompt: str) -> str: |
|
|
messages = [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt}, |
|
|
] |
|
|
template = getattr(tokenizer, "chat_template", None) |
|
|
return tokenizer.apply_chat_template( |
|
|
messages, |
|
|
tokenize=False, |
|
|
add_generation_prompt=True |
|
|
) |
|
|
|
|
|
|
|
|
def _llm_generate(prompt: str, max_tokens: int) -> str: |
|
|
out = pipe( |
|
|
prompt, |
|
|
max_new_tokens=max_tokens, |
|
|
do_sample=True, |
|
|
temperature=0.5, |
|
|
return_full_text=False, |
|
|
) |
|
|
return out[0]["generated_text"] |
|
|
|
|
|
|
|
|
def llm_explain(results: dict, inputs: list) -> str: |
|
|
L0, alpha, Temp0, Temp1, Clearance = inputs |
|
|
r = results["results"] |
|
|
v = results["verdict"] |
|
|
|
|
|
system_prompt = ( |
|
|
"You explain engineering to a smart 5-year-old. " |
|
|
"You always return CONCISE responses, only one sentence." |
|
|
) |
|
|
|
|
|
user_prompt = ( |
|
|
f"The one-dimensional beam is initially {L0:g} m long.\n" |
|
|
f"The material has a coefficient of thermal expansion of {alpha:g} *10^-6/β.\n" |
|
|
f"The initial temperature of the beam is {Temp0:g} degrees celsius, and the final temperature of the beam is {Temp1:g} degrees celsius.\n" |
|
|
f"The allowed clearance of the beam is {Clearance:g}.\n " |
|
|
f"The final length of the beam is {r['Final_Length']:.2f} m, and the total expansion is {r['Change_in_Length']:.2f} m.\n" |
|
|
f"This means that the thermal expansion is {v['service_message']}; " |
|
|
"Explain the verdict of whether or not the thermal expansion is OK in ONE friendly sentence for a non-expert" |
|
|
"" |
|
|
) |
|
|
|
|
|
formatted = _format_chat(system_prompt, user_prompt) |
|
|
return _llm_generate(formatted, max_tokens=128) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_once(L0, alpha, Temp0, Temp1, Clearance): |
|
|
inputs = [L0, alpha, Temp0, Temp1, Clearance] |
|
|
d = expansion_calc( |
|
|
L0=float(L0), |
|
|
alpha=float(alpha), |
|
|
Temp0=float(Temp0), |
|
|
Temp1=float(Temp1), |
|
|
Clearance=float(Clearance) |
|
|
) |
|
|
|
|
|
df = pandas.DataFrame([{ |
|
|
"Final Length [m]": round(d["results"]["Final_Length"], 3), |
|
|
"Total Expansion [m]": round(d["results"]["Change_in_Length"], 3), |
|
|
"Thermal Expansion Verdict": d["verdict"]["service_message"], |
|
|
}]) |
|
|
|
|
|
narrative = llm_explain(d, inputs).split("\n")[0] |
|
|
return df, narrative |
|
|
|
|
|
|
|
|
with gradio.Blocks() as demo: |
|
|
|
|
|
|
|
|
gradio.Markdown( |
|
|
"# Run and Explain 1D Thermal Expansion Calcs" |
|
|
) |
|
|
gradio.Markdown( |
|
|
"This app runs some simple calculations for 1 dimensional thermal expansion and returns a natural language description of the results" |
|
|
"This assumes the rod is homogeneous and isotropic, and a constant coefficient of thermal expansion." |
|
|
) |
|
|
|
|
|
|
|
|
with gradio.Row(): |
|
|
L0 = gradio.Number(value=2.0, label="Initial Rod length [m]") |
|
|
alpha = gradio.Number(value=23.0, label="Coefficient of Thermal Expansion Ξ± [10^-6/β]") |
|
|
|
|
|
|
|
|
with gradio.Row(): |
|
|
Temp0 = gradio.Number(value=150.0, label="Initial Temperature [β]") |
|
|
Temp1 = gradio.Number(value=200.0, label="Final Temperature [β]") |
|
|
Clearance = gradio.Number(value=3.0, label="Clearance [mm]") |
|
|
|
|
|
|
|
|
run_btn = gradio.Button("Compute") |
|
|
|
|
|
|
|
|
|
|
|
results_df = gradio.Dataframe(label="Numerical results (deterministic)", interactive=False) |
|
|
explain_md = gradio.Markdown(label="Explanation") |
|
|
|
|
|
|
|
|
run_btn.click(fn=run_once, inputs=[L0, alpha, Temp0, Temp1, Clearance], outputs=[results_df, explain_md]) |
|
|
|
|
|
|
|
|
gradio.Examples( |
|
|
examples=[ |
|
|
[2.0, 23.0, 150.0, 200.0, 3.0], |
|
|
[4.5, 10.8, 320.0, 450.0, 5.5], |
|
|
[3.0, 8.0, 40.0, 50.0, 0.5], |
|
|
], |
|
|
inputs=[L0, alpha, Temp0, Temp1, Clearance], |
|
|
label="Representative cases", |
|
|
examples_per_page=3, |
|
|
cache_examples=False, |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch(debug=True) |