Update app.py
Browse files
app.py
CHANGED
|
@@ -1,22 +1,27 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import tempfile
|
| 3 |
-
import os
|
| 4 |
import wntr
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
|
|
|
|
| 7 |
@st.cache_resource
|
| 8 |
def load_llm():
|
| 9 |
model_name = "Qwen/Qwen2.5-7B-Instruct-1M"
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 11 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
| 12 |
-
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
st.title("💧 EPANET + LLM (via WNTR + Qwen LLM)")
|
| 15 |
|
| 16 |
# Upload .inp file
|
| 17 |
uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
|
| 18 |
|
| 19 |
-
# Initialize the model
|
| 20 |
wn = None
|
| 21 |
if uploaded_file:
|
| 22 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
|
|
@@ -24,32 +29,32 @@ if uploaded_file:
|
|
| 24 |
inp_path = tmp_file.name
|
| 25 |
wn = wntr.network.WaterNetworkModel(inp_path)
|
| 26 |
|
| 27 |
-
# Ask question
|
| 28 |
user_question = st.text_input("Ask a question about your water network model")
|
| 29 |
|
| 30 |
if st.button("Generate Code") and wn and user_question:
|
| 31 |
-
# Prompt to LLM
|
| 32 |
prompt = f"""You are a Python expert using the WNTR library. Given a water network model `wn`, answer this question:
|
| 33 |
Question: {user_question}
|
| 34 |
Generate a Python function called `answer()` that computes and returns the result as a variable named `result`.
|
| 35 |
Only include valid Python code. Do not include markdown, explanations, or text outside the function."""
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
st.markdown("### 🧠 Generated Code")
|
| 42 |
st.code(generated_code, language="python")
|
| 43 |
|
| 44 |
-
# Execute the function in a controlled environment
|
| 45 |
local_env = {"wn": wn}
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
st.error(f"Error in generated code: {e}")
|
| 54 |
-
else:
|
| 55 |
-
st.error("Failed to extract Python function from LLM response.")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import tempfile
|
|
|
|
| 3 |
import wntr
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 7 |
|
| 8 |
+
# Load model once
|
| 9 |
@st.cache_resource
|
| 10 |
def load_llm():
|
| 11 |
model_name = "Qwen/Qwen2.5-7B-Instruct-1M"
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 13 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
| 14 |
+
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
| 15 |
+
|
| 16 |
+
# Load LLM
|
| 17 |
+
llm = load_llm()
|
| 18 |
|
| 19 |
st.title("💧 EPANET + LLM (via WNTR + Qwen LLM)")
|
| 20 |
|
| 21 |
# Upload .inp file
|
| 22 |
uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
|
| 23 |
|
| 24 |
+
# Initialize the WNTR model
|
| 25 |
wn = None
|
| 26 |
if uploaded_file:
|
| 27 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
|
|
|
|
| 29 |
inp_path = tmp_file.name
|
| 30 |
wn = wntr.network.WaterNetworkModel(inp_path)
|
| 31 |
|
| 32 |
+
# Ask user question
|
| 33 |
user_question = st.text_input("Ask a question about your water network model")
|
| 34 |
|
| 35 |
if st.button("Generate Code") and wn and user_question:
|
|
|
|
| 36 |
prompt = f"""You are a Python expert using the WNTR library. Given a water network model `wn`, answer this question:
|
| 37 |
Question: {user_question}
|
| 38 |
Generate a Python function called `answer()` that computes and returns the result as a variable named `result`.
|
| 39 |
Only include valid Python code. Do not include markdown, explanations, or text outside the function."""
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
response = llm(prompt)[0]["generated_text"]
|
| 43 |
+
# Optional: Clean hallucinated prompt part
|
| 44 |
+
if "def answer" in response:
|
| 45 |
+
response = response.split("def answer", 1)[-1]
|
| 46 |
+
generated_code = "def answer" + response
|
| 47 |
+
else:
|
| 48 |
+
raise ValueError("Function definition not found in LLM output.")
|
| 49 |
+
|
| 50 |
st.markdown("### 🧠 Generated Code")
|
| 51 |
st.code(generated_code, language="python")
|
| 52 |
|
|
|
|
| 53 |
local_env = {"wn": wn}
|
| 54 |
+
exec(generated_code, local_env)
|
| 55 |
+
result = local_env["answer"]()
|
| 56 |
+
st.markdown("### 📤 Output")
|
| 57 |
+
st.success(result)
|
| 58 |
+
except Exception as e:
|
| 59 |
+
st.markdown("### 📤 Output")
|
| 60 |
+
st.error(f"Error running generated code: {e}")
|
|
|
|
|
|
|
|
|