File size: 7,435 Bytes
6b36e09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
# ================================================================
# HW3: Bolt Torque Calculator with Gradio
#
# Author: Sebastian Andreu
# Course: 24679 - Designing and Deploying AI/ML Systems
# Dataset/Inputs: User-selected bolt geometry, preload, material, lubrication
# Task: Deterministic first-principles calculation of bolt torque with natural language explanation,
# deployed via Hugging Face Space
#
# Acknowledgments:
# - Torque calculation formulas based on standard ISO metric bolt theory
# - Deployment scaffold and documentation supported with AI assistance (ChatGPT, OpenAI)
# - Reference: Class-provided notebook "LLMs for explanability.ipynb"
# ================================================================
import math # For trigonometry
import gradio # For building the interface
import pandas # For working with tables
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # For LLMs
# Instantiate the model that we'll be calling. This is a tiny one!
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline(
task="text-generation",
model=AutoModelForCausalLM.from_pretrained(
MODEL_ID,
),
tokenizer=tokenizer
)
# Lookup table for friction coefficients
# Format: friction_table[material][lubrication] = coefficient
friction_table = {
"Steel": {"Dry": 0.15, "Oil": 0.10, "Grease": 0.08, "Zinc/Anti-seize": 0.05},
"Aluminum": {"Dry": 0.20, "Oil": 0.15, "Grease": 0.12, "Zinc/Anti-seize": 0.10},
"Brass": {"Dry": 0.18, "Oil": 0.13, "Grease": 0.10, "Zinc/Anti-seize": 0.08},
"Titanium": {"Dry": 0.25, "Oil": 0.20, "Grease": 0.18, "Zinc/Anti-seize": 0.15},
}
# Function to calculate bolt torque
def bolt_calc(
d_mm: float,
F_N: float,
p_mm: float,
mu_t: float,
mu_n: float,
d_head_mm: float = None
) -> dict:
"""
Calculates required torque for a standard ISO metric bolt.
Returns torque [Nm] and a message.
"""
# Thread half-angle (ISO metric)
beta_rad = math.radians(30)
# Lead angle
phi_rad = math.atan(p_mm / (math.pi * d_mm))
# If head diameter not provided, assume 1.5 * bolt diameter
if d_head_mm is None:
d_head_mm = 1.5 * d_mm
# Friction angle
rho_rad = math.atan(mu_t / math.cos(beta_rad))
# Torque calculation
T_Nm = F_N * (d_mm / 2 * math.tan(phi_rad + rho_rad) + mu_n * d_head_mm / 2)
return dict(
results={
"torque_Nm": T_Nm,
},
verdict={
"strength_message": "Torque calculated successfully",
},
)
# Helper for chat formatting
def _format_chat(system_prompt: str, user_prompt: str) -> str:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
template = getattr(tokenizer, "chat_template", None)
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# LLM text generation
def _llm_generate(prompt: str, max_tokens: int) -> str:
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=True,
temperature=0.5,
return_full_text=False,
)
return out[0]["generated_text"]
# Generate natural language explanation
def llm_explain(results: dict, inputs: list) -> str:
d_mm, F_N, p_mm, mu_t, mu_n, d_head_mm = inputs
r = results["results"]
v = results["verdict"]
system_prompt = (
"You explain engineering to a smart 5-year-old. "
"Use simple analogies like screwing a jar lid or tightening a bike seat. "
"You always return CONCISE responses, only one sentence."
)
user_prompt = (
f"For a bolt of diameter {d_mm:g} mm with a target preload of {F_N:g} N, "
f"thread pitch {p_mm:g} mm, thread friction {mu_t:g}, "
f"nut/under-head friction {mu_n:g}, and head diameter {d_head_mm:g} mm:\n"
f"The required torque is {r['torque_Nm']:.2f} Nm; "
"Explain this torque in ONE friendly sentence for a non-expert."
)
formatted = _format_chat(system_prompt, user_prompt)
return _llm_generate(formatted, max_tokens=128)
# Run everything together
def run_once(d_mm, F_N, p_mm, thread_material, thread_lubrication, head_material, head_lubrication, d_head_mm):
# Map dropdown selections to friction coefficients
mu_t = friction_table[thread_material][thread_lubrication]
mu_n = friction_table[head_material][head_lubrication]
if d_head_mm is None:
d_head_mm = float(d_mm) * 1.5
inputs = [float(d_mm), float(F_N), float(p_mm), mu_t, mu_n, float(d_head_mm)]
d = bolt_calc(
d_mm=float(d_mm),
F_N=float(F_N),
p_mm=float(p_mm),
mu_t=mu_t,
mu_n=mu_n,
d_head_mm=float(d_head_mm)
)
df = pandas.DataFrame([{
"Torque [Nm]": round(d["results"]["torque_Nm"], 3),
"Verdict": d["verdict"]["strength_message"],
}])
narrative = llm_explain(d, inputs).split("\n")[0]
return df, narrative
# Build the Gradio interface
with gradio.Blocks() as demo:
gradio.Markdown(
"# Bolt Torque Calculator"
)
gradio.Markdown(
"Compute the torque needed to tighten a bolt to a target preload with material and lubrication selection."
)
# Bolt geometry and load
with gradio.Row():
d_mm = gradio.Number(value=10.0, label="Bolt diameter [mm]")
F_N = gradio.Number(value=5000.0, label="Target preload [N]")
p_mm = gradio.Number(value=1.5, label="Thread pitch [mm]")
# Thread friction selection
with gradio.Row():
thread_material = gradio.Dropdown(
choices=["Steel", "Aluminum", "Brass", "Titanium"],
value="Steel",
label="Bolt material"
)
thread_lubrication = gradio.Dropdown(
choices=["Dry", "Oil", "Grease", "Zinc/Anti-seize"],
value="Dry",
label="Thread lubrication"
)
head_material = gradio.Dropdown(
choices=["Steel", "Aluminum", "Brass", "Titanium"],
value="Steel",
label="Nut/Head material"
)
head_lubrication = gradio.Dropdown(
choices=["Dry", "Oil", "Grease", "Zinc/Anti-seize"],
value="Dry",
label="Nut/Head lubrication"
)
d_head_mm = gradio.Number(value=None, label="Head diameter [mm] (optional)")
run_btn = gradio.Button("Compute")
results_df = gradio.Dataframe(label="Numerical results (deterministic)", interactive=False)
explain_md = gradio.Markdown(label="Explanation")
run_btn.click(
fn=run_once,
inputs=[d_mm, F_N, p_mm, thread_material, thread_lubrication, head_material, head_lubrication, d_head_mm],
outputs=[results_df, explain_md]
)
gradio.Examples(
examples=[
[10.0, 5000.0, 1.5, "Steel", "Dry", "Steel", "Dry", None],
[12.0, 8000.0, 1.75, "Aluminum", "Oil", "Steel", "Grease", None],
[8.0, 2000.0, 1.25, "Titanium", "Grease", "Aluminum", "Oil", None],
],
inputs=[d_mm, F_N, p_mm, thread_material, thread_lubrication, head_material, head_lubrication, d_head_mm],
label="Representative cases",
examples_per_page=3,
cache_examples=False,
)
if __name__ == "__main__":
demo.launch(debug=True) |