|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
import gradio |
|
|
import pandas |
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
|
|
|
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct" |
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
|
|
pipe = pipeline( |
|
|
task="text-generation", |
|
|
model=AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
), |
|
|
tokenizer=tokenizer |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
friction_table = { |
|
|
"Steel": {"Dry": 0.15, "Oil": 0.10, "Grease": 0.08, "Zinc/Anti-seize": 0.05}, |
|
|
"Aluminum": {"Dry": 0.20, "Oil": 0.15, "Grease": 0.12, "Zinc/Anti-seize": 0.10}, |
|
|
"Brass": {"Dry": 0.18, "Oil": 0.13, "Grease": 0.10, "Zinc/Anti-seize": 0.08}, |
|
|
"Titanium": {"Dry": 0.25, "Oil": 0.20, "Grease": 0.18, "Zinc/Anti-seize": 0.15}, |
|
|
} |
|
|
|
|
|
|
|
|
def bolt_calc( |
|
|
d_mm: float, |
|
|
F_N: float, |
|
|
p_mm: float, |
|
|
mu_t: float, |
|
|
mu_n: float, |
|
|
d_head_mm: float = None |
|
|
) -> dict: |
|
|
""" |
|
|
Calculates required torque for a standard ISO metric bolt. |
|
|
Returns torque [Nm] and a message. |
|
|
""" |
|
|
|
|
|
|
|
|
beta_rad = math.radians(30) |
|
|
|
|
|
|
|
|
phi_rad = math.atan(p_mm / (math.pi * d_mm)) |
|
|
|
|
|
|
|
|
if d_head_mm is None: |
|
|
d_head_mm = 1.5 * d_mm |
|
|
|
|
|
|
|
|
rho_rad = math.atan(mu_t / math.cos(beta_rad)) |
|
|
|
|
|
|
|
|
T_Nm = F_N * (d_mm / 2 * math.tan(phi_rad + rho_rad) + mu_n * d_head_mm / 2) |
|
|
|
|
|
return dict( |
|
|
results={ |
|
|
"torque_Nm": T_Nm, |
|
|
}, |
|
|
verdict={ |
|
|
"strength_message": "Torque calculated successfully", |
|
|
}, |
|
|
) |
|
|
|
|
|
|
|
|
def _format_chat(system_prompt: str, user_prompt: str) -> str: |
|
|
messages = [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt}, |
|
|
] |
|
|
template = getattr(tokenizer, "chat_template", None) |
|
|
return tokenizer.apply_chat_template( |
|
|
messages, |
|
|
tokenize=False, |
|
|
add_generation_prompt=True |
|
|
) |
|
|
|
|
|
|
|
|
def _llm_generate(prompt: str, max_tokens: int) -> str: |
|
|
out = pipe( |
|
|
prompt, |
|
|
max_new_tokens=max_tokens, |
|
|
do_sample=True, |
|
|
temperature=0.5, |
|
|
return_full_text=False, |
|
|
) |
|
|
return out[0]["generated_text"] |
|
|
|
|
|
|
|
|
def llm_explain(results: dict, inputs: list) -> str: |
|
|
d_mm, F_N, p_mm, mu_t, mu_n, d_head_mm = inputs |
|
|
r = results["results"] |
|
|
v = results["verdict"] |
|
|
|
|
|
system_prompt = ( |
|
|
"You explain engineering to a smart 5-year-old. " |
|
|
"Use simple analogies like screwing a jar lid or tightening a bike seat. " |
|
|
"You always return CONCISE responses, only one sentence." |
|
|
) |
|
|
|
|
|
user_prompt = ( |
|
|
f"For a bolt of diameter {d_mm:g} mm with a target preload of {F_N:g} N, " |
|
|
f"thread pitch {p_mm:g} mm, thread friction {mu_t:g}, " |
|
|
f"nut/under-head friction {mu_n:g}, and head diameter {d_head_mm:g} mm:\n" |
|
|
f"The required torque is {r['torque_Nm']:.2f} Nm; " |
|
|
"Explain this torque in ONE friendly sentence for a non-expert." |
|
|
) |
|
|
|
|
|
formatted = _format_chat(system_prompt, user_prompt) |
|
|
return _llm_generate(formatted, max_tokens=128) |
|
|
|
|
|
|
|
|
def run_once(d_mm, F_N, p_mm, thread_material, thread_lubrication, head_material, head_lubrication, d_head_mm): |
|
|
|
|
|
mu_t = friction_table[thread_material][thread_lubrication] |
|
|
mu_n = friction_table[head_material][head_lubrication] |
|
|
|
|
|
if d_head_mm is None: |
|
|
d_head_mm = float(d_mm) * 1.5 |
|
|
|
|
|
inputs = [float(d_mm), float(F_N), float(p_mm), mu_t, mu_n, float(d_head_mm)] |
|
|
d = bolt_calc( |
|
|
d_mm=float(d_mm), |
|
|
F_N=float(F_N), |
|
|
p_mm=float(p_mm), |
|
|
mu_t=mu_t, |
|
|
mu_n=mu_n, |
|
|
d_head_mm=float(d_head_mm) |
|
|
) |
|
|
|
|
|
df = pandas.DataFrame([{ |
|
|
"Torque [Nm]": round(d["results"]["torque_Nm"], 3), |
|
|
"Verdict": d["verdict"]["strength_message"], |
|
|
}]) |
|
|
|
|
|
narrative = llm_explain(d, inputs).split("\n")[0] |
|
|
return df, narrative |
|
|
|
|
|
|
|
|
with gradio.Blocks() as demo: |
|
|
|
|
|
gradio.Markdown( |
|
|
"# Bolt Torque Calculator" |
|
|
) |
|
|
gradio.Markdown( |
|
|
"Compute the torque needed to tighten a bolt to a target preload with material and lubrication selection." |
|
|
) |
|
|
|
|
|
|
|
|
with gradio.Row(): |
|
|
d_mm = gradio.Number(value=10.0, label="Bolt diameter [mm]") |
|
|
F_N = gradio.Number(value=5000.0, label="Target preload [N]") |
|
|
p_mm = gradio.Number(value=1.5, label="Thread pitch [mm]") |
|
|
|
|
|
|
|
|
with gradio.Row(): |
|
|
thread_material = gradio.Dropdown( |
|
|
choices=["Steel", "Aluminum", "Brass", "Titanium"], |
|
|
value="Steel", |
|
|
label="Bolt material" |
|
|
) |
|
|
thread_lubrication = gradio.Dropdown( |
|
|
choices=["Dry", "Oil", "Grease", "Zinc/Anti-seize"], |
|
|
value="Dry", |
|
|
label="Thread lubrication" |
|
|
) |
|
|
head_material = gradio.Dropdown( |
|
|
choices=["Steel", "Aluminum", "Brass", "Titanium"], |
|
|
value="Steel", |
|
|
label="Nut/Head material" |
|
|
) |
|
|
head_lubrication = gradio.Dropdown( |
|
|
choices=["Dry", "Oil", "Grease", "Zinc/Anti-seize"], |
|
|
value="Dry", |
|
|
label="Nut/Head lubrication" |
|
|
) |
|
|
d_head_mm = gradio.Number(value=None, label="Head diameter [mm] (optional)") |
|
|
|
|
|
run_btn = gradio.Button("Compute") |
|
|
|
|
|
results_df = gradio.Dataframe(label="Numerical results (deterministic)", interactive=False) |
|
|
explain_md = gradio.Markdown(label="Explanation") |
|
|
|
|
|
run_btn.click( |
|
|
fn=run_once, |
|
|
inputs=[d_mm, F_N, p_mm, thread_material, thread_lubrication, head_material, head_lubrication, d_head_mm], |
|
|
outputs=[results_df, explain_md] |
|
|
) |
|
|
|
|
|
gradio.Examples( |
|
|
examples=[ |
|
|
[10.0, 5000.0, 1.5, "Steel", "Dry", "Steel", "Dry", None], |
|
|
[12.0, 8000.0, 1.75, "Aluminum", "Oil", "Steel", "Grease", None], |
|
|
[8.0, 2000.0, 1.25, "Titanium", "Grease", "Aluminum", "Oil", None], |
|
|
], |
|
|
inputs=[d_mm, F_N, p_mm, thread_material, thread_lubrication, head_material, head_lubrication, d_head_mm], |
|
|
label="Representative cases", |
|
|
examples_per_page=3, |
|
|
cache_examples=False, |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch(debug=True) |