ecopus commited on
Commit
fe08240
·
verified ·
1 Parent(s): 0647a5e

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -0
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import math
3
+ from typing import Dict, Any
4
+ import gradio as gr
5
+ from transformers import AutoTokenizer, pipeline
6
+
7
+ def darcy_weisbach_head_loss(f: float, L: float, D: float, V: float) -> float:
8
+ g = 9.80665
9
+ return f * (L / D) * V**2 / (2 * g)
10
+
11
+ def reynolds_number(V: float, D: float, nu: float) -> float:
12
+ return V * D / nu
13
+
14
+ def default_friction_factor(Re: float, roughness: float, D: float) -> float:
15
+ if Re == 0:
16
+ return float('nan')
17
+ if Re < 2300:
18
+ return 64.0 / Re
19
+ return (-1.8 * math.log10((roughness / (3.7 * D))**1.11 + 6.9 / Re))**-2
20
+
21
+ def bernoulli_pipe_flow(P1, P2, V1, V2, z1=0.0, z2=0.0, rho=1000.0, mu=0.001,
22
+ roughness=1.5e-6, D=0.1, L=1.0, friction_factor=None,
23
+ use_darcy=True, g=9.80665) -> Dict[str, Any]:
24
+ errors = []
25
+ if rho <= 0: errors.append('rho must be > 0')
26
+ if D <= 0: errors.append('D must be > 0')
27
+ if L < 0: errors.append('L must be >= 0')
28
+ for val in [P1, P2, V1, V2, z1, z2, rho, mu, D, L]:
29
+ if not (isinstance(val, (int, float)) and math.isfinite(val)):
30
+ errors.append('Inputs must be finite numbers')
31
+ break
32
+ if errors:
33
+ return {'ok': False, 'errors': errors}
34
+
35
+ nu = mu / rho
36
+ Re1 = reynolds_number(V1, D, nu)
37
+ Re2 = reynolds_number(V2, D, nu)
38
+
39
+ f = friction_factor if friction_factor is not None else default_friction_factor(max(Re1, Re2), roughness, D)
40
+ h_f = darcy_weisbach_head_loss(f, L, D, (V1 + V2) / 2.0) if use_darcy else 0.0
41
+
42
+ left = P1 / (rho * g) + V1**2 / (2 * g) + z1
43
+ right = P2 / (rho * g) + V2**2 / (2 * g) + z2
44
+ h_pump = left - right + h_f
45
+
46
+ results = {
47
+ 'ok': True,
48
+ 'inputs': {'P1': P1, 'P2': P2, 'V1': V1, 'V2': V2, 'z1': z1, 'z2': z2,
49
+ 'rho': rho, 'mu': mu, 'D': D, 'L': L, 'roughness': roughness},
50
+ 'intermediate': {'g': g, 'nu': nu, 'Re1': Re1, 'Re2': Re2,
51
+ 'friction_factor': f, 'head_loss': h_f},
52
+ 'computed': {'lhs': left, 'rhs': right, 'pump_head': h_pump},
53
+ 'summary': f"Pump head required = {h_pump:.3f} m, f = {f:.4f}, h_f = {h_f:.3f} m.",
54
+ 'explanations': [f"ν = {nu:.3e}, Re1={Re1:.1f}, Re2={Re2:.1f}",
55
+ f"f = {f:.4f}", f"h_f = {h_f:.3f} m",
56
+ f"Left={left:.3f}, Right={right:.3f}, h_pump={h_pump:.3f}"]
57
+ }
58
+ return results
59
+
60
+ def deterministic_explainer(record: dict) -> str:
61
+ if not record.get('ok', False):
62
+ return 'Errors: ' + '; '.join(record.get('errors', ['Unknown error']))
63
+ lines = []
64
+ lines.append('--- EXPLANATION (deterministic) ---')
65
+ lines.append(str(record.get('summary', '')))
66
+ lines.append('\nInputs:')
67
+ for k,v in record['inputs'].items():
68
+ lines.append(f" - {k}: {v}")
69
+ lines.append('\nIntermediate:')
70
+ for k,v in record['intermediate'].items():
71
+ lines.append(f" - {k}: {v}")
72
+ lines.append('\nSteps:')
73
+ for s in record.get('explanations', []):
74
+ lines.append(' * ' + s)
75
+ return '\n'.join(lines)
76
+
77
+ MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
78
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
79
+ pipe = pipeline("text-generation", model=MODEL_ID, tokenizer=tokenizer)
80
+
81
+ def llm_explain(record: dict) -> str:
82
+ if not record.get("ok", False):
83
+ return "Errors: " + "; ".join(record.get("errors", []))
84
+ comp = record["computed"]
85
+ inter = record["intermediate"]
86
+ Re_avg = (inter["Re1"] + inter["Re2"]) / 2
87
+ regime = "laminar" if Re_avg < 2300 else "turbulent"
88
+ return f"Pump head {comp['pump_head']:.3f} m, f={inter['friction_factor']:.4f}, h_f={inter['head_loss']:.3f} m. Flow regime: {regime}."
89
+
90
+ def compute_and_explain(P1,P2,V1,V2,z1,z2,rho,mu,D,L,roughness,use_darcy,_,mode):
91
+ record = bernoulli_pipe_flow(P1,P2,V1,V2,z1,z2,rho,mu,roughness,D,L,None,use_darcy)
92
+ if not record.get('ok'):
93
+ return record, 'Errors: ' + '; '.join(record.get('errors', []))
94
+ if mode == 'deterministic':
95
+ explanation = deterministic_explainer(record)
96
+ elif mode == 'local_llm':
97
+ explanation = llm_explain(record)
98
+ else:
99
+ explanation = "Unknown explanation mode."
100
+ return record, explanation
101
+
102
+ with gr.Blocks() as demo:
103
+ gr.Markdown("# Bernoulli Pipe Flow Calculator")
104
+ with gr.Row():
105
+ with gr.Column(scale=2):
106
+ P1 = gr.Number(value=101325, label='P1 [Pa]')
107
+ P2 = gr.Number(value=101325, label='P2 [Pa]')
108
+ V1 = gr.Number(value=1.0, label='V1 [m/s]')
109
+ V2 = gr.Number(value=1.0, label='V2 [m/s]')
110
+ z1 = gr.Number(value=0.0, label='z1 [m]')
111
+ z2 = gr.Number(value=0.0, label='z2 [m]')
112
+ rho = gr.Number(value=1000.0, label='rho [kg/m^3]')
113
+ mu = gr.Number(value=0.001, label='mu [Pa.s]')
114
+ D = gr.Number(value=0.1, label='D [m]')
115
+ L = gr.Number(value=10.0, label='L [m]')
116
+ roughness = gr.Number(value=1.5e-6, label='roughness [m]')
117
+ use_darcy = gr.Checkbox(value=True, label='Use Darcy–Weisbach')
118
+ run_btn = gr.Button('Compute')
119
+ with gr.Column(scale=3):
120
+ numeric_out = gr.JSON(label='Numeric result (JSON)')
121
+ explain_mode = gr.Radio(['deterministic', 'local_llm'], value='deterministic', label='Explanation mode')
122
+ explanation_out = gr.Textbox(lines=15, label='Explanation')
123
+ run_btn.click(compute_and_explain,
124
+ inputs=[P1,P2,V1,V2,z1,z2,rho,mu,D,L,roughness,use_darcy,None,explain_mode],
125
+ outputs=[numeric_out, explanation_out])
126
+
127
+ if __name__ == "__main__":
128
+ demo.queue().launch()