Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- README.md +32 -20
- app.py +150 -203
- requirements.txt +171 -4
README.md
CHANGED
|
@@ -1,35 +1,47 @@
|
|
| 1 |
---
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
sdk: gradio
|
| 7 |
-
app_file: app.py
|
| 8 |
-
pinned: false
|
| 9 |
---
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
|
| 13 |
-
#
|
| 14 |
|
| 15 |
-
|
| 16 |
|
| 17 |
## How to Use
|
| 18 |
|
| 19 |
-
1.
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
##
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
|
| 28 |
|
| 29 |
-
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
|
| 32 |
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
tags:
|
| 3 |
+
- gradio
|
| 4 |
+
- machine-learning
|
| 5 |
+
- engineering
|
|
|
|
|
|
|
|
|
|
| 6 |
---
|
| 7 |
+
# Reynolds Number Calculator
|
| 8 |
|
| 9 |
+
This is a simple web application built with Gradio that calculates the Reynolds number for fluid flow in a circular pipe and determines the flow regime (laminar, transitional, or turbulent). It also provides a plain-English explanation of the results generated by a small language model.
|
| 10 |
|
| 11 |
+
## Purpose
|
| 12 |
|
| 13 |
+
The application serves as a demonstration of wrapping a deterministic engineering calculation with a user-friendly interface using Gradio and augmenting the numerical output with an LLM-generated explanation for broader understanding.
|
| 14 |
|
| 15 |
## How to Use
|
| 16 |
|
| 17 |
+
1. Enter the required fluid and pipe properties in the input fields:
|
| 18 |
+
* **Fluid density [kg/m³]:** The mass density of the fluid.
|
| 19 |
+
* **Fluid velocity [m/s]:** The average velocity of the fluid flow.
|
| 20 |
+
* **Pipe diameter [m]:** The inner diameter of the circular pipe.
|
| 21 |
+
* **Dynamic viscosity [Pa*s]:** The dynamic viscosity of the fluid.
|
| 22 |
+
2. Click the "Compute Reynolds Number" button.
|
| 23 |
+
3. The "Calculation Results" panel will display the calculated Reynolds number and the determined flow regime.
|
| 24 |
+
4. The "Explanation" panel will show a simple, LLM-generated explanation of the results.
|
| 25 |
|
| 26 |
+
## Inputs
|
| 27 |
|
| 28 |
+
* `rho`: Fluid density in kilograms per cubic meter (kg/m³).
|
| 29 |
+
* `v`: Fluid velocity in meters per second (m/s).
|
| 30 |
+
* `D`: Pipe diameter in meters (m).
|
| 31 |
+
* `mu`: Dynamic viscosity in Pascal-seconds (Pa*s).
|
| 32 |
|
| 33 |
+
## Outputs
|
| 34 |
|
| 35 |
+
* **Reynolds Number [-]:** The dimensionless Reynolds number.
|
| 36 |
+
* **Flow Regime:** The classification of the flow as "Laminar" (Re < 2100), "Transitional" (2100 <= Re <= 4000), or "Turbulent" (Re > 4000).
|
| 37 |
+
* **Explanation:** A natural language explanation of the results provided by an instruction-tuned language model.
|
| 38 |
|
| 39 |
+
## Libraries Used
|
| 40 |
|
| 41 |
+
* Gradio: For building the web interface.
|
| 42 |
+
* Transformers: For accessing and using the language model.
|
| 43 |
+
* Torch: A dependency for the Transformers library.
|
| 44 |
+
* Hugging Face Hub: For loading the pre-trained language model.
|
| 45 |
+
* Pandas: For formatting the numerical results into a table.
|
| 46 |
|
| 47 |
+
The LLM is used to make the engineering results more accessible and understandable to a non-expert audience by providing a concise, analogy-based explanation.
|
app.py
CHANGED
|
@@ -1,219 +1,166 @@
|
|
| 1 |
-
import
|
| 2 |
-
import
|
| 3 |
-
import
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
#
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
from huggingface_hub import delete_repo
|
| 31 |
-
try:
|
| 32 |
-
# Use the current MODEL_REPO_ID for deletion
|
| 33 |
-
delete_repo(MODEL_REPO_ID, repo_type="model", token=HF_TOKEN)
|
| 34 |
-
except Exception as e:
|
| 35 |
-
print(f"Could not delete repo from cache (may not exist or unauthorized): {e}")
|
| 36 |
-
|
| 37 |
-
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 38 |
-
local_zip = huggingface_hub.hf_hub_download(
|
| 39 |
-
repo_id=MODEL_REPO_ID,
|
| 40 |
-
filename=ZIP_FILENAME,
|
| 41 |
-
repo_type="model",
|
| 42 |
-
token=HF_TOKEN,
|
| 43 |
-
local_dir=str(CACHE_DIR),
|
| 44 |
-
local_dir_use_symlinks=False,
|
| 45 |
)
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
# Attempt to convert to integer first, then use get for safety
|
| 80 |
-
ci = int(c)
|
| 81 |
-
return CLASS_LABELS.get(ci, str(c))
|
| 82 |
-
except (ValueError, TypeError):
|
| 83 |
-
# If conversion fails, try getting directly by key
|
| 84 |
-
return CLASS_LABELS.get(c, str(c))
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
# Function to preprocess image and return processed image - Keep as is for now
|
| 88 |
-
def preprocess_image_for_display(pil_img: PIL.Image.Image):
|
| 89 |
-
if pil_img is None:
|
| 90 |
-
return None
|
| 91 |
-
|
| 92 |
-
# AutoGluon preprocessing (simplified, actual preprocessing is done internally by the predictor)
|
| 93 |
-
# Here we resize for display purposes to show a consistent "processed" image
|
| 94 |
-
processed_img = pil_img.resize((224, 224)) # Example size, adjust as needed
|
| 95 |
-
return processed_img
|
| 96 |
-
|
| 97 |
-
# Do the prediction! - Adjusting outputs to match the new model's likely output
|
| 98 |
-
def do_predict(pil_img: PIL.Image.Image):
|
| 99 |
-
# Make sure there's actually an image to work with and predictor is loaded
|
| 100 |
-
if pil_img is None:
|
| 101 |
-
# Returning None for the processed image output when input is None
|
| 102 |
-
# Adjusting return values to match expected outputs: status, probabilities, processed image
|
| 103 |
-
return "No image provided.", {}, None
|
| 104 |
-
if PREDICTOR is None:
|
| 105 |
-
# Returning None for the processed image output when predictor is not loaded
|
| 106 |
-
# Adjusting return values to match expected outputs: status, probabilities, processed image
|
| 107 |
-
return "Predictor not loaded. Please check the logs for errors.", {}, None
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
# Basic validation (file type is handled by Gradio, checking size)
|
| 111 |
-
# This is a placeholder; real size checks would be on file upload before PIL
|
| 112 |
-
# For now, we'll just check if the image object is valid
|
| 113 |
-
try:
|
| 114 |
-
pil_img.verify()
|
| 115 |
-
except Exception:
|
| 116 |
-
# Returning None for the processed image output for invalid image
|
| 117 |
-
# Adjusting return values to match expected outputs: status, probabilities, processed image
|
| 118 |
-
return "Invalid image file.", {}, None
|
| 119 |
-
|
| 120 |
-
# IF we have something to work with, save it and prepare the input
|
| 121 |
-
tmpdir = pathlib.Path(tempfile.mkdtemp())
|
| 122 |
-
img_path = tmpdir / "input.png"
|
| 123 |
-
pil_img.save(img_path)
|
| 124 |
-
|
| 125 |
-
df = pandas.DataFrame({"image": [str(img_path)]}) # For AutoGluon expected input format
|
| 126 |
-
|
| 127 |
-
# For class probabilities
|
| 128 |
-
# Assuming predict_proba returns a DataFrame where columns are class labels
|
| 129 |
-
proba_output = PREDICTOR.predict_proba(df)
|
| 130 |
-
print(f"Type of proba_output: {type(proba_output)}")
|
| 131 |
-
print(f"Content of proba_output: {proba_output}")
|
| 132 |
-
|
| 133 |
-
# Assuming proba_output is a pandas DataFrame with class probabilities
|
| 134 |
-
if not proba_output.empty:
|
| 135 |
-
# Get probabilities for the first (and likely only) row
|
| 136 |
-
proba_series = proba_output.iloc[0]
|
| 137 |
-
# Convert to dictionary, mapping original labels to probabilities
|
| 138 |
-
proba_dict = proba_series.to_dict()
|
| 139 |
-
|
| 140 |
-
# For user-friendly column names
|
| 141 |
-
# Map the original labels (keys in proba_dict) to human-friendly labels
|
| 142 |
-
pretty_dict = {
|
| 143 |
-
_human_label(k): float(v) for k, v in proba_dict.items()
|
| 144 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
else:
|
| 146 |
-
|
| 147 |
-
pretty_dict = {}
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
return "Prediction Complete", pretty_dict, processed_img_display
|
| 156 |
|
| 157 |
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
-
|
| 167 |
-
|
| 168 |
|
| 169 |
-
# Provide an introduction - Updated for Playing Cards
|
| 170 |
-
gradio.Markdown("# Playing Card Detection")
|
| 171 |
-
gradio.Markdown("""
|
| 172 |
-
This is a simple app that demonstrates how to use an autogluon multimodal
|
| 173 |
-
predictor in a gradio space to predict the type of playing card in a picture. To use,
|
| 174 |
-
just upload a photo using the options below. The original and preprocessed
|
| 175 |
-
images will be displayed, and the prediction results will appear automatically.
|
| 176 |
-
""")
|
| 177 |
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
# Adjusting num_top_classes if the model has more classes
|
| 187 |
-
proba_pretty = gradio.Label(num_top_classes=len(CLASS_LABELS), label="Class probabilities")
|
| 188 |
-
prediction_status = gradio.Textbox(label="Prediction Status") # Added Textbox for status
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
# Expose key inference parameters (placeholder)
|
| 192 |
-
# gradio.Markdown("## Inference Parameters")
|
| 193 |
-
# with gradio.Row():
|
| 194 |
-
# Add parameters here if needed, e.e., a confidence threshold slider
|
| 195 |
-
# confidence_threshold = gradio.Slider(minimum=0, maximum=1, value=0.5, label="Confidence Threshold")
|
| 196 |
-
|
| 197 |
-
# Whenever a new image is uploaded, trigger the prediction directly
|
| 198 |
-
# Wrap do_predict in a lambda to ensure only the image input is passed
|
| 199 |
-
# Ensure outputs match the return values of do_predict
|
| 200 |
-
image_in.change(
|
| 201 |
-
fn=lambda img: do_predict(img),
|
| 202 |
-
inputs=[image_in],
|
| 203 |
-
outputs=[prediction_status, proba_pretty, image_processed_out]
|
| 204 |
)
|
| 205 |
|
|
|
|
|
|
|
| 206 |
|
| 207 |
-
# For clickable example images - ADDED BACK
|
| 208 |
-
if EXAMPLES: # Only show examples if any were successfully fetched
|
| 209 |
-
gradio.Examples(
|
| 210 |
-
examples=EXAMPLES,
|
| 211 |
-
inputs=[image_in],
|
| 212 |
-
label="Representative examples",
|
| 213 |
-
examples_per_page=8,
|
| 214 |
-
cache_examples=False,
|
| 215 |
-
)
|
| 216 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import gradio
|
| 3 |
+
import pandas
|
| 4 |
+
|
| 5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 6 |
+
|
| 7 |
+
# Instantiate the model that we'll be calling. This is a tiny one!
|
| 8 |
+
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
|
| 9 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 10 |
+
pipe = pipeline(
|
| 11 |
+
task="text-generation",
|
| 12 |
+
model=AutoModelForCausalLM.from_pretrained(
|
| 13 |
+
MODEL_ID,
|
| 14 |
+
),
|
| 15 |
+
tokenizer=tokenizer
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
# This helper function applies a chat format to help the LLM understand what
|
| 19 |
+
# is going on
|
| 20 |
+
def _format_chat(system_prompt: str, user_prompt: str) -> str:
|
| 21 |
+
messages = [
|
| 22 |
+
{"role": "system", "content": system_prompt},
|
| 23 |
+
{"role": "user", "content": user_prompt},
|
| 24 |
+
]
|
| 25 |
+
template = getattr(tokenizer, "chat_template", None)
|
| 26 |
+
return tokenizer.apply_chat_template(
|
| 27 |
+
messages,
|
| 28 |
+
tokenize=False,
|
| 29 |
+
add_generation_prompt=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
)
|
| 31 |
+
|
| 32 |
+
# This functoin uses hte LLM to generate a response.
|
| 33 |
+
def _llm_generate(prompt: str, max_tokens: int) -> str:
|
| 34 |
+
out = pipe(
|
| 35 |
+
prompt,
|
| 36 |
+
max_new_tokens=max_tokens,
|
| 37 |
+
do_sample=True,
|
| 38 |
+
temperature=0.5,
|
| 39 |
+
return_full_text=False,
|
| 40 |
+
)
|
| 41 |
+
return out[0]["generated_text"]
|
| 42 |
+
|
| 43 |
+
# Create a function to do the Reynolds number calculation
|
| 44 |
+
def reynolds_number_calc(rho: float, v: float, D: float, mu: float) -> dict:
|
| 45 |
+
"""
|
| 46 |
+
Calculates the Reynolds number for flow in a circular pipe.
|
| 47 |
+
Determines the flow regime (laminar, transitional, or turbulent).
|
| 48 |
+
Inputs:
|
| 49 |
+
rho: fluid density [kg/m^3]
|
| 50 |
+
v: fluid velocity [m/s]
|
| 51 |
+
D: pipe diameter [m]
|
| 52 |
+
mu: dynamic viscosity [Pa*s] or [kg/(m*s)]
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
reynolds_number: The calculated Reynolds number
|
| 56 |
+
flow_regime: The determined flow regime
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
# Input validation
|
| 60 |
+
if rho <= 0 or v < 0 or D <= 0 or mu <= 0:
|
| 61 |
+
return {
|
| 62 |
+
"reynolds_number": None,
|
| 63 |
+
"flow_regime": "Invalid input: Density, velocity, diameter, and viscosity must be positive (velocity can be zero).",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
}
|
| 65 |
+
|
| 66 |
+
reynolds_number = (rho * v * D) / mu
|
| 67 |
+
|
| 68 |
+
if reynolds_number < 2100:
|
| 69 |
+
flow_regime = "Laminar"
|
| 70 |
+
elif 2100 <= reynolds_number <= 4000:
|
| 71 |
+
flow_regime = "Transitional"
|
| 72 |
else:
|
| 73 |
+
flow_regime = "Turbulent"
|
|
|
|
| 74 |
|
| 75 |
+
return {
|
| 76 |
+
"reynolds_number": reynolds_number,
|
| 77 |
+
"flow_regime": flow_regime,
|
| 78 |
+
}
|
| 79 |
|
| 80 |
+
# This function generates an explanation of the results for Reynolds number
|
| 81 |
+
def llm_explain_reynolds(results: dict, inputs: list) -> str:
|
| 82 |
+
rho, v, D, mu = inputs
|
| 83 |
+
reynolds_number = results.get("reynolds_number")
|
| 84 |
+
flow_regime = results.get("flow_regime")
|
| 85 |
|
| 86 |
+
if reynolds_number is None:
|
| 87 |
+
return flow_regime # Return the error message from the calculation
|
|
|
|
| 88 |
|
| 89 |
|
| 90 |
+
system_prompt = (
|
| 91 |
+
"You explain fluid mechanics to a smart 5-year-old. "
|
| 92 |
+
"Use food-based analogies to support the explanation."
|
| 93 |
+
"You always return CONCISE responses, only one sentence."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
user_prompt = (
|
| 97 |
+
f"For a fluid with density {rho:g} kg/m³ and viscosity {mu:g} Pa*s, "
|
| 98 |
+
f"flowing at {v:g} m/s through a pipe with diameter {D:g} m, "
|
| 99 |
+
f"the Reynolds number is {reynolds_number:.2f}. "
|
| 100 |
+
f"The flow regime is {flow_regime}. "
|
| 101 |
+
"Explain the Reynolds number and the flow regime in ONE friendly sentence for a non-expert."
|
| 102 |
+
)
|
| 103 |
|
| 104 |
+
formatted = _format_chat(system_prompt, user_prompt)
|
| 105 |
+
return _llm_generate(formatted, max_tokens=128)
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
+
# This function ties everything together for the Reynolds number calculator
|
| 109 |
+
def run_reynolds_once(rho, v, D, mu):
|
| 110 |
+
inputs = [rho, v, D, mu]
|
| 111 |
+
d = reynolds_number_calc(
|
| 112 |
+
rho=float(rho),
|
| 113 |
+
v=float(v),
|
| 114 |
+
D=float(D),
|
| 115 |
+
mu=float(mu)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
)
|
| 117 |
|
| 118 |
+
if d.get("reynolds_number") is None:
|
| 119 |
+
return pandas.DataFrame(), d["flow_regime"]
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
+
df = pandas.DataFrame([{
|
| 123 |
+
"Reynolds Number [-]": round(d["reynolds_number"], 3),
|
| 124 |
+
"Flow Regime": d["flow_regime"],
|
| 125 |
+
}])
|
| 126 |
|
| 127 |
+
narrative = llm_explain_reynolds(d, inputs).split("\n")[0]
|
| 128 |
+
return df, narrative
|
| 129 |
+
|
| 130 |
+
# Now, let's create the Gradio interface for the Reynolds number calculator
|
| 131 |
+
with gradio.Blocks() as reynolds_demo:
|
| 132 |
+
|
| 133 |
+
gradio.Markdown(
|
| 134 |
+
"# Reynolds Number Calculator"
|
| 135 |
+
)
|
| 136 |
+
gradio.Markdown(
|
| 137 |
+
"This app calculates the Reynolds number for flow in a circular pipe and determines the flow regime (laminar, transitional, or turbulent)."
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
with gradio.Row():
|
| 141 |
+
rho = gradio.Number(value=1000.0, label="Fluid density [kg/m³]")
|
| 142 |
+
v = gradio.Number(value=1.0, label="Fluid velocity [m/s]")
|
| 143 |
+
D = gradio.Number(value=0.1, label="Pipe diameter [m]")
|
| 144 |
+
mu = gradio.Number(value=0.001, label="Dynamic viscosity [Pa*s]")
|
| 145 |
+
|
| 146 |
+
run_btn_reynolds = gradio.Button("Compute Reynolds Number")
|
| 147 |
+
|
| 148 |
+
results_df_reynolds = gradio.Dataframe(label="Calculation Results", interactive=False)
|
| 149 |
+
|
| 150 |
+
# Add an explicit title for the explanation panel
|
| 151 |
+
gradio.Markdown("## Explanation")
|
| 152 |
+
explain_md_reynolds = gradio.Markdown(label="Explanation")
|
| 153 |
+
|
| 154 |
+
run_btn_reynolds.click(fn=run_reynolds_once, inputs=[rho, v, D, mu], outputs=[results_df_reynolds, explain_md_reynolds])
|
| 155 |
+
|
| 156 |
+
gradio.Examples(
|
| 157 |
+
examples=[
|
| 158 |
+
[1000.0, 0.1, 0.05, 0.001], # Laminar example
|
| 159 |
+
[1000.0, 0.5, 0.1, 0.001], # Transitional example
|
| 160 |
+
[1000.0, 2.0, 0.1, 0.001], # Turbulent example
|
| 161 |
+
],
|
| 162 |
+
inputs=[rho, v, D, mu],
|
| 163 |
+
label="Representative cases",
|
| 164 |
+
examples_per_page=3,
|
| 165 |
+
cache_examples=False,
|
| 166 |
+
)
|
requirements.txt
CHANGED
|
@@ -1,5 +1,172 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
| 3 |
huggingface_hub
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers
|
| 2 |
+
gradio
|
| 3 |
+
torch
|
| 4 |
huggingface_hub
|
| 5 |
+
|
| 6 |
+
%%writefile app.py
|
| 7 |
+
import math
|
| 8 |
+
import gradio
|
| 9 |
+
import pandas
|
| 10 |
+
|
| 11 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 12 |
+
|
| 13 |
+
# Instantiate the model that we'll be calling. This is a tiny one!
|
| 14 |
+
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
|
| 15 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 16 |
+
pipe = pipeline(
|
| 17 |
+
task="text-generation",
|
| 18 |
+
model=AutoModelForCausalLM.from_pretrained(
|
| 19 |
+
MODEL_ID,
|
| 20 |
+
),
|
| 21 |
+
tokenizer=tokenizer
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# This helper function applies a chat format to help the LLM understand what
|
| 25 |
+
# is going on
|
| 26 |
+
def _format_chat(system_prompt: str, user_prompt: str) -> str:
|
| 27 |
+
messages = [
|
| 28 |
+
{"role": "system", "content": system_prompt},
|
| 29 |
+
{"role": "user", "content": user_prompt},
|
| 30 |
+
]
|
| 31 |
+
template = getattr(tokenizer, "chat_template", None)
|
| 32 |
+
return tokenizer.apply_chat_template(
|
| 33 |
+
messages,
|
| 34 |
+
tokenize=False,
|
| 35 |
+
add_generation_prompt=True
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# This functoin uses hte LLM to generate a response.
|
| 39 |
+
def _llm_generate(prompt: str, max_tokens: int) -> str:
|
| 40 |
+
out = pipe(
|
| 41 |
+
prompt,
|
| 42 |
+
max_new_tokens=max_tokens,
|
| 43 |
+
do_sample=True,
|
| 44 |
+
temperature=0.5,
|
| 45 |
+
return_full_text=False,
|
| 46 |
+
)
|
| 47 |
+
return out[0]["generated_text"]
|
| 48 |
+
|
| 49 |
+
# Create a function to do the Reynolds number calculation
|
| 50 |
+
def reynolds_number_calc(rho: float, v: float, D: float, mu: float) -> dict:
|
| 51 |
+
"""
|
| 52 |
+
Calculates the Reynolds number for flow in a circular pipe.
|
| 53 |
+
Determines the flow regime (laminar, transitional, or turbulent).
|
| 54 |
+
Inputs:
|
| 55 |
+
rho: fluid density [kg/m^3]
|
| 56 |
+
v: fluid velocity [m/s]
|
| 57 |
+
D: pipe diameter [m]
|
| 58 |
+
mu: dynamic viscosity [Pa*s] or [kg/(m*s)]
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
reynolds_number: The calculated Reynolds number
|
| 62 |
+
flow_regime: The determined flow regime
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
# Input validation
|
| 66 |
+
if rho <= 0 or v < 0 or D <= 0 or mu <= 0:
|
| 67 |
+
return {
|
| 68 |
+
"reynolds_number": None,
|
| 69 |
+
"flow_regime": "Invalid input: Density, velocity, diameter, and viscosity must be positive (velocity can be zero).",
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
reynolds_number = (rho * v * D) / mu
|
| 73 |
+
|
| 74 |
+
if reynolds_number < 2100:
|
| 75 |
+
flow_regime = "Laminar"
|
| 76 |
+
elif 2100 <= reynolds_number <= 4000:
|
| 77 |
+
flow_regime = "Transitional"
|
| 78 |
+
else:
|
| 79 |
+
flow_regime = "Turbulent"
|
| 80 |
+
|
| 81 |
+
return {
|
| 82 |
+
"reynolds_number": reynolds_number,
|
| 83 |
+
"flow_regime": flow_regime,
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
# This function generates an explanation of the results for Reynolds number
|
| 87 |
+
def llm_explain_reynolds(results: dict, inputs: list) -> str:
|
| 88 |
+
rho, v, D, mu = inputs
|
| 89 |
+
reynolds_number = results.get("reynolds_number")
|
| 90 |
+
flow_regime = results.get("flow_regime")
|
| 91 |
+
|
| 92 |
+
if reynolds_number is None:
|
| 93 |
+
return flow_regime # Return the error message from the calculation
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
system_prompt = (
|
| 97 |
+
"You explain fluid mechanics to a smart 5-year-old. "
|
| 98 |
+
"Use food-based analogies to support the explanation."
|
| 99 |
+
"You always return CONCISE responses, only one sentence."
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
user_prompt = (
|
| 103 |
+
f"For a fluid with density {rho:g} kg/m³ and viscosity {mu:g} Pa*s, "
|
| 104 |
+
f"flowing at {v:g} m/s through a pipe with diameter {D:g} m, "
|
| 105 |
+
f"the Reynolds number is {reynolds_number:.2f}. "
|
| 106 |
+
f"The flow regime is {flow_regime}. "
|
| 107 |
+
"Explain the Reynolds number and the flow regime in ONE friendly sentence for a non-expert."
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
formatted = _format_chat(system_prompt, user_prompt)
|
| 111 |
+
return _llm_generate(formatted, max_tokens=128)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# This function ties everything together for the Reynolds number calculator
|
| 115 |
+
def run_reynolds_once(rho, v, D, mu):
|
| 116 |
+
inputs = [rho, v, D, mu]
|
| 117 |
+
d = reynolds_number_calc(
|
| 118 |
+
rho=float(rho),
|
| 119 |
+
v=float(v),
|
| 120 |
+
D=float(D),
|
| 121 |
+
mu=float(mu)
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
if d.get("reynolds_number") is None:
|
| 125 |
+
return pandas.DataFrame(), d["flow_regime"]
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
df = pandas.DataFrame([{
|
| 129 |
+
"Reynolds Number [-]": round(d["reynolds_number"], 3),
|
| 130 |
+
"Flow Regime": d["flow_regime"],
|
| 131 |
+
}])
|
| 132 |
+
|
| 133 |
+
narrative = llm_explain_reynolds(d, inputs).split("\n")[0]
|
| 134 |
+
return df, narrative
|
| 135 |
+
|
| 136 |
+
# Now, let's create the Gradio interface for the Reynolds number calculator
|
| 137 |
+
with gradio.Blocks() as reynolds_demo:
|
| 138 |
+
|
| 139 |
+
gradio.Markdown(
|
| 140 |
+
"# Reynolds Number Calculator"
|
| 141 |
+
)
|
| 142 |
+
gradio.Markdown(
|
| 143 |
+
"This app calculates the Reynolds number for flow in a circular pipe and determines the flow regime (laminar, transitional, or turbulent)."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
with gradio.Row():
|
| 147 |
+
rho = gradio.Number(value=1000.0, label="Fluid density [kg/m³]")
|
| 148 |
+
v = gradio.Number(value=1.0, label="Fluid velocity [m/s]")
|
| 149 |
+
D = gradio.Number(value=0.1, label="Pipe diameter [m]")
|
| 150 |
+
mu = gradio.Number(value=0.001, label="Dynamic viscosity [Pa*s]")
|
| 151 |
+
|
| 152 |
+
run_btn_reynolds = gradio.Button("Compute Reynolds Number")
|
| 153 |
+
|
| 154 |
+
results_df_reynolds = gradio.Dataframe(label="Calculation Results", interactive=False)
|
| 155 |
+
|
| 156 |
+
# Add an explicit title for the explanation panel
|
| 157 |
+
gradio.Markdown("## Explanation")
|
| 158 |
+
explain_md_reynolds = gradio.Markdown(label="Explanation")
|
| 159 |
+
|
| 160 |
+
run_btn_reynolds.click(fn=run_reynolds_once, inputs=[rho, v, D, mu], outputs=[results_df_reynolds, explain_md_reynolds])
|
| 161 |
+
|
| 162 |
+
gradio.Examples(
|
| 163 |
+
examples=[
|
| 164 |
+
[1000.0, 0.1, 0.05, 0.001], # Laminar example
|
| 165 |
+
[1000.0, 0.5, 0.1, 0.001], # Transitional example
|
| 166 |
+
[1000.0, 2.0, 0.1, 0.001], # Turbulent example
|
| 167 |
+
],
|
| 168 |
+
inputs=[rho, v, D, mu],
|
| 169 |
+
label="Representative cases",
|
| 170 |
+
examples_per_page=3,
|
| 171 |
+
cache_examples=False,
|
| 172 |
+
)
|