Spaces:
Sleeping
Sleeping
File size: 9,147 Bytes
35918c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
import math # For access to infinity
import gradio # For building the interface
import pandas # For working with tables
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # For LLMS
# Instantiate the model that we'll be calling. This is a tiny one!
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline(
task="text-generation",
model=AutoModelForCausalLM.from_pretrained(
MODEL_ID,
),
tokenizer=tokenizer
)
import math
import gradio as gr
import pandas as pd
# --- IMPORTANT: Model and Tokenizer Setup (User must provide this) ---
# To run this script, you must load a Hugging Face model and tokenizer.
# For example:
#
# from transformers import pipeline, AutoTokenizer
# import torch
#
# model_id = "microsoft/phi-2"
# tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
# pipe = pipeline(
# "text-generation",
# model=model_id,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# device_map="auto",
# trust_remote_code=True
# )
#
# If you do not define 'pipe' and 'tokenizer', this script will raise a NameError.
# --- Core Calculation Logic ---
def size_ac_motor_complex(
required_power_hp: float = None,
required_torque_lb_ft: float = None,
speed_rpm: float = None,
voltage_v: float = None,
efficiency: float = 0.85,
service_factor: float = 1.15,
power_factor: float = 0.8,
motor_type: str = "induction"
) -> dict:
"""
Calculates the required motor nameplate power based on load requirements.
"""
if required_power_hp is not None:
required_load_power_watts = required_power_hp * 745.7
elif required_torque_lb_ft is not None and speed_rpm is not None:
hp_from_torque = (required_torque_lb_ft * speed_rpm) / 5252.0
required_load_power_watts = hp_from_torque * 745.7
required_power_hp = hp_from_torque
else:
return {"error": "Either 'Required Power (HP)' or both 'Required Torque (lb-ft)' and 'Speed (rpm)' must be provided."}
if efficiency <= 0 or power_factor <= 0 or service_factor <= 0:
return {"error": "Efficiency, Power Factor, and Service Factor must be positive values."}
sized_mechanical_power_watts = required_load_power_watts * service_factor
electrical_power_drawn_watts = required_load_power_watts / efficiency
sized_mechanical_power_kw = sized_mechanical_power_watts / 1000
electrical_power_drawn_kw = electrical_power_drawn_watts / 1000
notes = f"Sizing calculation for a standard {motor_type} motor."
return {
"calculated_load_hp": required_power_hp,
"required_torque_lb_ft": required_torque_lb_ft,
"speed_rpm": speed_rpm,
"voltage_v": voltage_v,
"sized_mechanical_power_kw": sized_mechanical_power_kw,
"electrical_power_drawn_kw": electrical_power_drawn_kw,
"efficiency": efficiency,
"service_factor": service_factor,
"power_factor": power_factor,
"motor_type": motor_type,
"notes": notes
}
# --- LLM Helper Functions (Now require a live model) ---
def _format_chat(system_prompt: str, user_prompt: str) -> str:
"""This helper function applies a chat format to help the LLM understand."""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
# This function now assumes a 'tokenizer' object is available globally.
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
def _llm_generate(prompt: str, max_tokens: int) -> str:
"""This function uses the LLM to generate a response."""
# This function now assumes a 'pipe' object (pipeline) is available globally.
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=True,
temperature=0.5,
return_full_text=False,
)
return out[0]["generated_text"]
def llm_explain(results: dict) -> str:
"""This function generates an explanation of the results using the LLM structure."""
if "error" in results:
return f"Error: {results['error']}"
system_prompt = (
"You explain engineering to a smart 5-year-old. "
"Use food-based analogies to support the explanation. "
"You always return CONCISE responses, only one sentence."
)
user_prompt = (
f"My machine needs {results['calculated_load_hp']:.2f} horsepower to run. "
f"Considering a service factor of {results['service_factor']}, the calculated sized motor power is {results['sized_mechanical_power_kw']:.2f} kW. "
f"Explain what the 'sized motor power' means in one friendly sentence using a food-based analogy for a non-expert."
)
formatted_prompt = _format_chat(system_prompt, user_prompt)
return _llm_generate(formatted_prompt, max_tokens=128)
# --- Gradio Interface Function ---
def run_calculation_interface(required_power_hp, required_torque_lb_ft, speed_rpm, voltage_v, efficiency, service_factor, power_factor, motor_type):
"""Connects the Gradio UI to the backend calculation logic."""
required_power_hp = float(required_power_hp) if required_power_hp else None
required_torque_lb_ft = float(required_torque_lb_ft) if required_torque_lb_ft else None
speed_rpm = float(speed_rpm) if speed_rpm else None
voltage_v = float(voltage_v) if voltage_v else None
results = size_ac_motor_complex(
required_power_hp=required_power_hp,
required_torque_lb_ft=required_torque_lb_ft,
speed_rpm=speed_rpm,
voltage_v=voltage_v,
efficiency=float(efficiency),
service_factor=float(service_factor),
power_factor=float(power_factor),
motor_type=motor_type
)
# Wrap the LLM call in a try-except block to handle potential errors
try:
narrative = llm_explain(results)
except NameError:
narrative = "LLM Error: The 'pipe' and 'tokenizer' objects are not defined. Please load a model."
except Exception as e:
narrative = f"An unexpected error occurred with the LLM: {e}"
if "error" in results:
df = pd.DataFrame()
else:
df = pd.DataFrame([{
"Required Load [HP]": f"{results['calculated_load_hp']:.2f}",
"Sized Motor Rating [kW]": f"{results['sized_mechanical_power_kw']:.2f}",
"Estimated Electrical Draw [kW]": f"{results['electrical_power_drawn_kw']:.2f}",
"Service Factor": results["service_factor"],
"Efficiency": results["efficiency"],
}])
return df, narrative
# --- Gradio User Interface Definition ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# Advanced AC Motor Sizing Calculator")
gr.Markdown("Size an AC motor based on your load requirements. Provide either the required power directly, or the torque and speed for the application.")
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("### Load Requirements")
with gr.Row():
required_power_hp = gr.Number(label="Required Power [HP] (Option 1)")
required_torque_lb_ft = gr.Number(label="Required Torque [lb-ft] (Option 2)")
speed_rpm = gr.Number(label="Speed [rpm] (with Torque)")
with gr.Column(scale=1):
gr.Markdown("### Electrical & Motor Parameters")
voltage_v = gr.Number(label="Voltage [V] (Optional)")
motor_type = gr.Dropdown(choices=["induction", "synchronous", "servo"], value="induction", label="Motor Type")
with gr.Row():
efficiency = gr.Slider(minimum=0.5, maximum=0.99, value=0.85, step=0.01, label="Motor Efficiency")
service_factor = gr.Slider(minimum=1.0, maximum=2.0, value=1.15, step=0.05, label="Service Factor")
power_factor = gr.Slider(minimum=0.5, maximum=1.0, value=0.80, step=0.01, label="Power Factor")
run_btn = gr.Button("Calculate Motor Size", variant="primary")
gr.Markdown("---")
gr.Markdown("### Results")
results_df = gr.Dataframe(label="Summary", interactive=False)
explain_md = gr.Markdown(label="Explanation")
run_btn.click(
fn=run_calculation_interface,
inputs=[required_power_hp, required_torque_lb_ft, speed_rpm, voltage_v, efficiency, service_factor, power_factor, motor_type],
outputs=[results_df, explain_md]
)
gr.Examples(
examples=[
[5.0, None, None, 480, 0.90, 1.25, 0.85, "induction"],
[None, 10.0, 1750, 230, 0.88, 1.15, 0.82, "induction"],
[1.0, None, None, 208, 0.85, 1.0, 0.75, "synchronous"],
],
inputs=[required_power_hp, required_torque_lb_ft, speed_rpm, voltage_v, efficiency, service_factor, power_factor, motor_type],
label="Example Scenarios",
)
if __name__ == "__main__":
# --- IMPORTANT ---
# You must define 'pipe' and 'tokenizer' before this line for the app to work.
# For example, place the model loading code from the top of the script here.
demo.launch()
|