HW3Part3 / app.py
emkessle's picture
Update app.py
3ac5798 verified
import math # For access to infinity
import gradio # For building the interface
import pandas # For working with tables
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # For LLMS
# Instantiate the model that we'll be calling. This is a tiny one!
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline(
task="text-generation",
model=AutoModelForCausalLM.from_pretrained(
MODEL_ID,
),
tokenizer=tokenizer
)
# Create a function to do the calculations
def expansion_calc(L0: float, alpha: float, Temp0: float, Temp1: float, Clearance: float) -> dict:
"""
1D thermal expansion. Given inputs: L0 = original length of rod.
alpha = coefficient of thermal expansion. Temp0 = initial temperature.
Temp1 = final temperature. Clearance = length the rod is able to expand before failure.
Returns final length of the rod, and determines whether it is safely within clearance.
"""
#input validation
if Temp1 < Temp0:
print("Error: Final temperature is lower than initial temperature.")
return
elif L0 < 0:
print("Error: Initial Length must be greater than 0")
return
elif alpha < 0:
print("Error: Coefficient of thermal expansion must be greater than 0")
return
elif Clearance < 0:
print("Error: Clearance must be greater than 0")
return
# Calculations
dT = Temp1 - Temp0
convert_alpha = 1e-6 * alpha #convert to ℃^-1
convert_Clearance = Clearance * 1e-3 #convert to m
dL = L0*convert_alpha*dT
L1 = L0+dL
expansion_ok = dL < convert_Clearance
return dict(
results={
"Final_Length": L1,
"Change_in_Length": dL,
},
verdict={
"passes_expansion": bool(expansion_ok),
"service_message": (
"OK: deflection is less than limit" if expansion_ok
else "Not OK: deflection is greater than limit"
),
},
)
# This helper function applies a chat format to help the LLM understand what
# is going on
def _format_chat(system_prompt: str, user_prompt: str) -> str:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
template = getattr(tokenizer, "chat_template", None)
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# This functoin uses hte LLM to generate a response.
def _llm_generate(prompt: str, max_tokens: int) -> str:
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=True,
temperature=0.5,
return_full_text=False,
)
return out[0]["generated_text"]
# This function generates an explanation of the results
def llm_explain(results: dict, inputs: list) -> str:
L0, alpha, Temp0, Temp1, Clearance = inputs
r = results["results"]
v = results["verdict"]
system_prompt = (
"You explain engineering to a smart 5-year-old. "
"You always return CONCISE responses, only one sentence."
)
user_prompt = (
f"The one-dimensional beam is initially {L0:g} m long.\n"
f"The material has a coefficient of thermal expansion of {alpha:g} *10^-6/℃.\n"
f"The initial temperature of the beam is {Temp0:g} degrees celsius, and the final temperature of the beam is {Temp1:g} degrees celsius.\n"
f"The allowed clearance of the beam is {Clearance:g}.\n "
f"The final length of the beam is {r['Final_Length']:.2f} m, and the total expansion is {r['Change_in_Length']:.2f} m.\n"
f"This means that the thermal expansion is {v['service_message']}; "
"Explain the verdict of whether or not the thermal expansion is OK in ONE friendly sentence for a non-expert"
""
)
formatted = _format_chat(system_prompt, user_prompt)
return _llm_generate(formatted, max_tokens=128)
# This function ties everythign together (evaluation, LLM explanaation, output)
# And will be out main entry point for teh GUI
def run_once(L0, alpha, Temp0, Temp1, Clearance):
inputs = [L0, alpha, Temp0, Temp1, Clearance]
d = expansion_calc(
L0=float(L0),
alpha=float(alpha),
Temp0=float(Temp0),
Temp1=float(Temp1),
Clearance=float(Clearance)
)
df = pandas.DataFrame([{
"Final Length [m]": round(d["results"]["Final_Length"], 3),
"Total Expansion [m]": round(d["results"]["Change_in_Length"], 3),
"Thermal Expansion Verdict": d["verdict"]["service_message"],
}])
narrative = llm_explain(d, inputs).split("\n")[0]
return df, narrative
# Last but not least, here's the UI!
with gradio.Blocks() as demo:
# Let's start by adding a title and introduction
gradio.Markdown(
"# Run and Explain 1D Thermal Expansion Calcs"
)
gradio.Markdown(
"This app runs some simple calculations for 1 dimensional thermal expansion and returns a natural language description of the results"
"This assumes the rod is homogeneous and isotropic, and a constant coefficient of thermal expansion."
)
# This row contains all of the physical parameters
with gradio.Row():
L0 = gradio.Number(value=2.0, label="Initial Rod length [m]")
alpha = gradio.Number(value=23.0, label="Coefficient of Thermal Expansion Ξ± [10^-6/℃]")
# This row contains the material properties
with gradio.Row():
Temp0 = gradio.Number(value=150.0, label="Initial Temperature [℃]")
Temp1 = gradio.Number(value=200.0, label="Final Temperature [℃]")
Clearance = gradio.Number(value=3.0, label="Clearance [mm]")
# Add a button to click to run the interface
run_btn = gradio.Button("Compute")
# These are the outputs. We use both a dataframe (for tabular info) and a markdown box
# for info from teh LLM
results_df = gradio.Dataframe(label="Numerical results (deterministic)", interactive=False)
explain_md = gradio.Markdown(label="Explanation")
# Run the calculations when the button is clicked
run_btn.click(fn=run_once, inputs=[L0, alpha, Temp0, Temp1, Clearance], outputs=[results_df, explain_md])
# Finally, add a few examples
gradio.Examples(
examples=[
[2.0, 23.0, 150.0, 200.0, 3.0],
[4.5, 10.8, 320.0, 450.0, 5.5],
[3.0, 8.0, 40.0, 50.0, 0.5],
],
inputs=[L0, alpha, Temp0, Temp1, Clearance],
label="Representative cases",
examples_per_page=3,
cache_examples=False,
)
if __name__ == "__main__":
demo.launch(debug=True)