Spaces:
Sleeping
Sleeping
| # -*- coding: utf-8 -*- | |
| """tempx.ipynb | |
| Automatically generated by Colab. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1Vf5N8mlJ4efrplevzTY2qQCIEhCvd1jy | |
| """ | |
| import math # For access to infinity | |
| import gradio # For building the interface | |
| import pandas # For working with tables | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # For LLMS | |
| # Instantiate the model that we'll be calling. This is a tiny one! | |
| MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| pipe = pipeline( | |
| task="text-generation", | |
| model=AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| ), | |
| tokenizer=tokenizer | |
| ) | |
| # Create a function to do the beam calculations | |
| def calculate_heat_flow(T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in): | |
| """Calculates heat flux and temperatures through a two-layer wall. | |
| Args: | |
| T_out: Outdoor temperature (Celsius). | |
| h_out: Outdoor convection coefficient (W/(m^2*K)). | |
| thickness1: Thickness of layer 1 (m). | |
| k1: Thermal conductivity of layer 1 (W/(m*K)). | |
| thickness2: Thickness of layer 2 (m). | |
| k2: Thermal conductivity of layer 2 (W/(m*K)). | |
| T_in: Indoor temperature (Celsius). | |
| h_in: Indoor convection coefficient (W/(m^2*K)). | |
| Returns: | |
| A tuple containing: | |
| Q: Total heat flux (W/m^2). | |
| T_outer_surface: Temperature at the outer surface (Celsius). | |
| T_interface: Temperature at the interface between layers (Celsius). | |
| T_inner_surface: Temperature at the inner surface (Celsius). | |
| """ | |
| # Assume A = 1 m^2 | |
| area = 1 | |
| # Calculate thermal resistances | |
| R_out = 1 / (h_out * area) | |
| R1 = thickness1 / (k1 * area) | |
| R2 = thickness2 / (k2 * area) | |
| R_in = 1 / (h_in * area) | |
| # Calculate total thermal resistance | |
| R_total = R_out + R1 + R2 + R_in | |
| # Calculate total heat flux | |
| Q = (T_out - T_in) / R_total | |
| # Calculate temperatures at different points | |
| T_outer_surface = T_out - Q * R_out | |
| T_interface = T_outer_surface - Q * R1 | |
| T_inner_surface = T_interface - Q * R2 | |
| return Q, T_outer_surface, T_interface, T_inner_surface | |
| def calculate_heat_flow_gr(T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in): | |
| """Calculates heat flux and temperatures through a two-layer wall for Gradio output. | |
| Args: | |
| T_out: Outdoor temperature (Celsius). | |
| h_out: Outdoor convection coefficient (W/(m^2*K)). | |
| thickness1: Thickness of layer 1 (m). | |
| k1: Thermal conductivity of layer 1 (W/(m*K)). | |
| thickness2: Thickness of layer 2 (m). | |
| k2: Thermal conductivity of Layer 2 (W/(m*K)). | |
| T_in: Indoor temperature (Celsius). | |
| h_in: Indoor convection coefficient (W/(m^2*K)). | |
| Returns: | |
| A pandas DataFrame containing the calculated results. | |
| """ | |
| Q, T_outer_surface, T_interface, T_inner_surface = calculate_heat_flow(T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in) | |
| results = { | |
| "Metric": ["Total Heat Flux (W/m^2)", "Outer Surface Temperature (°C)", "Interface Temperature (°C)", "Inner Surface Temperature (°C)"], | |
| "Value": [Q, T_outer_surface, T_interface, T_inner_surface] | |
| } | |
| return pd.DataFrame(results) | |
| # This helper function applies a chat format to help the LLM understand what | |
| # is going on | |
| def _format_chat(system_prompt: str, user_prompt: str) -> str: | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt}, | |
| ] | |
| template = getattr(tokenizer, "chat_template", None) | |
| return tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| # This functoin uses hte LLM to generate a response. | |
| def _llm_generate(prompt: str, max_tokens: int) -> str: | |
| out = pipe( | |
| prompt, | |
| max_new_tokens=max_tokens, | |
| do_sample=True, | |
| temperature=0.8, # Increased temperature for more varied output | |
| return_full_text=False, | |
| ) | |
| return out[0]["generated_text"] | |
| # This function generates an explanation of the results | |
| def llm_explain(results: dict, inputs: list) -> str: | |
| T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in = inputs | |
| Q = results["Value"][0] | |
| T_outer_surface = results["Value"][1] | |
| T_interface = results["Value"][2] | |
| T_inner_surface = results["Value"][3] | |
| system_prompt = ( | |
| "You are a friendly and simple assistant that explains heat transfer in one concise sentence." | |
| "Focus on the direction of heat flow and the main factor influencing it (like temperature difference or insulation)." | |
| "Explain if the heat is flowing into the indoor space or not, keep it simple" | |
| "Also explain how the insulation choice impacts this with simple topics" | |
| "Avoid technical jargon and complex formulas." | |
| ) | |
| user_prompt = ( | |
| f"Given an outdoor temperature of {T_out}°C and an indoor temperature of {T_in}°C,\n" | |
| f"and a wall with layers having thermal conductivities {k1} W/(m*K) and {k2} W/(m*K),\n" | |
| f"the total heat flux through the wall is {Q:.2f} W/m^2.\n" | |
| "Explain this result in one simple sentence." | |
| ) | |
| formatted = _format_chat(system_prompt, user_prompt) | |
| return _llm_generate(formatted, max_tokens=128) # Reduced max_tokens for a more concise response | |
| # This function ties everythign together (evaluation, LLM explanaation, output) | |
| # And will be out main entry point for teh GUI | |
| def run_once(T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in): | |
| inputs = [T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in] | |
| df = calculate_heat_flow_gr( | |
| T_out=float(T_out), | |
| h_out=float(h_out), | |
| thickness1=float(thickness1), | |
| k1=float(k1), | |
| thickness2=float(thickness2), | |
| k2=float(k2), | |
| T_in=float(T_in), | |
| h_in=float(h_in) | |
| ) | |
| narrative = llm_explain(df, inputs) # Removed split("\n")[0] to get the full explanation | |
| return df, narrative | |
| # Last but not least, here's the UI! | |
| with gradio.Blocks() as demo: | |
| # Let's start by adding a title and introduction | |
| gradio.Markdown( | |
| "# Run and Explain Heat Flow Calcs" | |
| ) | |
| gradio.Markdown( | |
| """ | |
| This app runs a basic heat tranfer calculation between two spaces with two walls. Users can adjust indoor and outdoor | |
| temperatures and heat transfer coefficients along with wall materials and thicknesses. | |
| The calculations work well, but the LLM has issues. I played with the prompting for a while but the large number of inputs | |
| and small model size made outputs very incosistent. Sometimes they explain the importance of the thickness and material choise and | |
| other times it will just display the outputs. I couln't fix this, to improve this I would use a larger model with GPU processing. | |
| **Goals:** | |
| * Simulate heat transfer through a composite wall. | |
| * Calculate heat flux and temperatures at different points in the wall. | |
| * Provide a simple interface to adjust parameters and see the impact on heat flow. | |
| **Assumptions:** | |
| * One-dimensional steady-state heat transfer. | |
| * Constant thermal properties of materials. | |
| * Uniform temperatures and convection coefficients on the surfaces. | |
| * No internal heat generation. | |
| """ | |
| ) | |
| # Define material thermal conductivities | |
| material_k = { | |
| "Wood": 0.12, # Example k value for wood | |
| "Brick": 0.72, # Example k value for brick | |
| "Insulation": 0.04 # Example k value for insulation | |
| } | |
| # Create a list of tuples for dropdown choices (label, value) | |
| material_choices = [(f"{name}: k = {k}", k) for name, k in material_k.items()] | |
| # Row for outdoor conditions | |
| with gradio.Row(): | |
| T_out = gradio.Number(value=0, label="Outdoor Temperature (°C)") | |
| h_out = gradio.Number(value=25, label="Outdoor Convection Coefficient (W/(m^2*K))") | |
| # Rows for wall conditions | |
| with gradio.Row(): | |
| thickness1 = gradio.Number(value=0.1, label="Thickness of Layer 1 (m)") | |
| k1 = gradio.Dropdown(material_choices, label="Thermal Conductivity of Layer 1 (W/(m*K))", value=material_k["Wood"]) | |
| with gradio.Row(): | |
| thickness2 = gradio.Number(value=0.1, label="Thickness of Layer 2 (m)") | |
| k2 = gradio.Dropdown(material_choices, label="Thermal Conductivity of Layer 2 (W/(m*K))", value=material_k["Wood"]) | |
| # Row for indoor conditions | |
| with gradio.Row(): | |
| T_in = gradio.Number(value=20, label="Indoor Temperature (°C)") | |
| h_in = gradio.Number(value=5, label="Indoor Convection Coefficient (W/(m^2*K))") | |
| # Add a button to click to run the interface | |
| run_btn = gradio.Button("Compute") | |
| # These are the outputs. We use both a dataframe (for tabular info) and a markdown box | |
| # for info from teh LLM | |
| results_df = gradio.Dataframe(label="Numerical results (deterministic)", interactive=False) | |
| explain_md = gradio.Markdown(label="Explanation") | |
| # Run the calculations when the button is clicked | |
| run_btn.click(fn=run_once, inputs=[T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in], outputs=[results_df, explain_md]) | |
| # Finally, add a few examples | |
| gradio.Examples( | |
| examples=[ | |
| [0, 25, 0.1, material_k["Wood"], 0.1, material_k["Wood"], 20, 5], | |
| [10, 10, 0.2, material_k["Brick"], 0.05, material_k["Insulation"], 22, 8], | |
| [-5, 30, 0.05, material_k["Insulation"], 0.15, material_k["Brick"], 18, 3], | |
| ], | |
| inputs=[T_out, h_out, thickness1, k1, thickness2, k2, T_in, h_in], | |
| label="Representative cases", | |
| examples_per_page=3, | |
| cache_examples=False, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True) |