bernoulliapp / app.py
ecopus's picture
Update app.py
43ec587 verified
# app.py
import math
from typing import Dict, Any
import gradio as gr
from transformers import AutoTokenizer, pipeline
def darcy_weisbach_head_loss(f: float, L: float, D: float, V: float) -> float:
g = 9.80665
return f * (L / D) * V**2 / (2 * g)
def reynolds_number(V: float, D: float, nu: float) -> float:
return V * D / nu
def default_friction_factor(Re: float, roughness: float, D: float) -> float:
if Re == 0:
return float('nan')
if Re < 2300:
return 64.0 / Re
return (-1.8 * math.log10((roughness / (3.7 * D))**1.11 + 6.9 / Re))**-2
def bernoulli_pipe_flow(P1, P2, V1, V2, z1=0.0, z2=0.0, rho=1000.0, mu=0.001,
roughness=1.5e-6, D=0.1, L=1.0, friction_factor=None,
use_darcy=True, g=9.80665) -> Dict[str, Any]:
errors = []
if rho <= 0: errors.append('rho must be > 0')
if D <= 0: errors.append('D must be > 0')
if L < 0: errors.append('L must be >= 0')
for val in [P1, P2, V1, V2, z1, z2, rho, mu, D, L]:
if not (isinstance(val, (int, float)) and math.isfinite(val)):
errors.append('Inputs must be finite numbers')
break
if errors:
return {'ok': False, 'errors': errors}
nu = mu / rho
Re1 = reynolds_number(V1, D, nu)
Re2 = reynolds_number(V2, D, nu)
f = friction_factor if friction_factor is not None else default_friction_factor(max(Re1, Re2), roughness, D)
h_f = darcy_weisbach_head_loss(f, L, D, (V1 + V2) / 2.0) if use_darcy else 0.0
left = P1 / (rho * g) + V1**2 / (2 * g) + z1
right = P2 / (rho * g) + V2**2 / (2 * g) + z2
h_pump = left - right + h_f
results = {
'ok': True,
'inputs': {'P1': P1, 'P2': P2, 'V1': V1, 'V2': V2, 'z1': z1, 'z2': z2,
'rho': rho, 'mu': mu, 'D': D, 'L': L, 'roughness': roughness},
'intermediate': {'g': g, 'nu': nu, 'Re1': Re1, 'Re2': Re2,
'friction_factor': f, 'head_loss': h_f},
'computed': {'lhs': left, 'rhs': right, 'pump_head': h_pump},
'summary': f"Pump head required = {h_pump:.3f} m, f = {f:.4f}, h_f = {h_f:.3f} m.",
'explanations': [f"ν = {nu:.3e}, Re1={Re1:.1f}, Re2={Re2:.1f}",
f"f = {f:.4f}", f"h_f = {h_f:.3f} m",
f"Left={left:.3f}, Right={right:.3f}, h_pump={h_pump:.3f}"]
}
return results
def deterministic_explainer(record: dict) -> str:
if not record.get('ok', False):
return 'Errors: ' + '; '.join(record.get('errors', ['Unknown error']))
lines = []
lines.append('--- EXPLANATION (deterministic) ---')
lines.append(str(record.get('summary', '')))
lines.append('\nInputs:')
for k,v in record['inputs'].items():
lines.append(f" - {k}: {v}")
lines.append('\nIntermediate:')
for k,v in record['intermediate'].items():
lines.append(f" - {k}: {v}")
lines.append('\nSteps:')
for s in record.get('explanations', []):
lines.append(' * ' + s)
return '\n'.join(lines)
MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline("text-generation", model=MODEL_ID, tokenizer=tokenizer)
def _fmt_num(x, sig=4):
"""Safe, short formatting for numbers (returns string)."""
try:
if x is None:
return "N/A"
if isinstance(x, (int,)):
return str(x)
if isinstance(x, float):
# use general format with sig significant digits
return f"{x:.{sig}g}"
return str(x)
except Exception:
return str(x)
def _llm_generate(prompt: str, max_tokens: int = 300) -> str:
"""
Run the local pipeline, then strip any echoed prompt and common instruction text.
If the model echoes instructions, do one gentle retry with a simplified prompt.
"""
# Primary generation: deterministic (no sampling) is usually safer for engineering text.
try:
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=False,
temperature=0.0,
return_full_text=True,
)
except Exception:
# fallback: try return_full_text=False if first attempt fails for this model
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=False,
temperature=0.0,
return_full_text=False,
)
# get text (handle both pipeline variants)
text = ""
if isinstance(out, list) and len(out) > 0:
text = out[0].get("generated_text", "") or out[0].get("text", "") or ""
text = text or ""
# If the model returned the prompt + output, strip the prompt if present
if text.startswith(prompt):
text = text[len(prompt):]
text = text.strip()
# If output looks like it merely repeated instructions, try a simpler short-prompt retry
low_quality_indicators = [
"Use bullet points", "Be sure to include", "Do not", "Do NOT",
"Now produce", "System:", "User:", "Instruction:"
]
if (not text) or any(ind in text for ind in low_quality_indicators) or len(text) < 10:
# simple short retry prompt asking for only the final answer
simple_prompt = prompt + "\n\nNow produce ONLY the requested explanation below (no re-statement of the prompt or instructions):\n"
try:
out2 = pipe(
simple_prompt,
max_new_tokens=max_tokens,
do_sample=False,
temperature=0.0,
return_full_text=True,
)
except Exception:
out2 = pipe(
simple_prompt,
max_new_tokens=max_tokens,
do_sample=False,
temperature=0.0,
return_full_text=False,
)
text2 = out2[0].get("generated_text", "") or out2[0].get("text", "") or ""
if text2.startswith(simple_prompt):
text2 = text2[len(simple_prompt):]
text2 = text2.strip()
if text2 and len(text2) > 10 and not any(ind in text2 for ind in low_quality_indicators):
return text2
# final fallback:
return "[LLM failed to generate a usable explanation — try a different model or reduce the prompt size]"
return text
def llm_explain(record: dict) -> str:
if not record.get("ok", False):
return "Errors: " + "; ".join(record.get("errors", []))
prompt = (
"Summarize these Bernoulli pipe flow results clearly and factually, "
"without inventing extra details.\n"
"Mention the proximity of the pump and frictional head loss to eachother and if net head gain is present. \n"
f"Summary: {record.get('summary','')}\n"
f"Intermediate: {record.get('intermediate',{})}\n"
f"Computed: {record.get('computed',{})}\n"
"Explanation:"
)
output = pipe(prompt, max_new_tokens=150, do_sample=False)[0]["generated_text"]
return output[len(prompt):].strip()
def compute_and_explain(P1,P2,V1,V2,z1,z2,rho,mu,D,L,roughness,use_darcy,mode):
record = bernoulli_pipe_flow(P1,P2,V1,V2,z1,z2,rho,mu,roughness,D,L,None,use_darcy)
if not record.get('ok'):
return record, 'Errors: ' + '; '.join(record.get('errors', []))
if mode == 'deterministic':
explanation = deterministic_explainer(record)
elif mode == 'local_llm':
explanation = llm_explain(record)
else:
explanation = "Unknown explanation mode."
return record, explanation
with gr.Blocks() as demo:
gr.Markdown("# Bernoulli Pipe Flow Calculator")
gr.Markdown("This Space hosts a Bernoulli pipe flow calculator for calculating the pump head loss for an internal flow system through a pipe. An example of one such pipe system is shown below. To utilize this calculator, simply fill in the required system metrics and hit 'compute'.")
gr.Image("bernoulli.png", label="Bernoulli Diagram", show_label=True)
gr.Markdown("NOTE: Roughness (ε) in pump head loss calculations, measured in meters, represents the average height of internal surface irregularities (like ribs or grooves) in a pipe, which contributes to friction and thus head loss. This absolute roughness is used with the pipe's diameter (D) to calculate relative roughness (ε/D), a key factor, along with the Reynolds number, in determining the Darcy friction factor (f, an output of this calculator) needed for the Darcy-Weisbach equation to find major frictional head losses in a pipe system. ")
gr.Markdown("\n To simplify calculations, this calculator also requires a value for 'mu', or the dynamic viscosity of the fluid (Pa*s)")
with gr.Row():
with gr.Column(scale=2):
P1 = gr.Number(value=101325, label='P1 [Pa]')
P2 = gr.Number(value=101325, label='P2 [Pa]')
V1 = gr.Number(value=1.0, label='V1 [m/s]')
V2 = gr.Number(value=1.0, label='V2 [m/s]')
z1 = gr.Number(value=0.0, label='z1 [m]')
z2 = gr.Number(value=0.0, label='z2 [m]')
rho = gr.Number(value=1000.0, label='rho [kg/m^3]')
mu = gr.Number(value=0.001, label='mu [Pa.s]')
D = gr.Number(value=0.1, label='D [m]')
L = gr.Number(value=10.0, label='L [m]')
roughness = gr.Number(value=1.5e-6, label='roughness [m]')
use_darcy = gr.Checkbox(value=True, label='Use Darcy–Weisbach')
run_btn = gr.Button('Compute')
with gr.Column(scale=3):
numeric_out = gr.JSON(label='Numeric result (JSON)')
explain_mode = gr.Radio(['deterministic', 'local_llm'], value='deterministic', label='Explanation mode')
explanation_out = gr.Textbox(lines=15, label='Explanation')
run_btn.click(compute_and_explain,
inputs=[P1,P2,V1,V2,z1,z2,rho,mu,D,L,roughness,use_darcy,explain_mode],
outputs=[numeric_out, explanation_out])
if __name__ == "__main__":
demo.queue().launch()