Update app.py
Browse files
app.py
CHANGED
|
@@ -1,78 +1,64 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import wntr
|
| 3 |
import tempfile
|
| 4 |
-
import os
|
| 5 |
-
import re
|
| 6 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 7 |
|
| 8 |
@st.cache_resource
|
| 9 |
-
def load_llm(
|
|
|
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 11 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def extract_code(response):
|
| 18 |
-
match = re.search(r"def answer\(\):[\s\S]+?return.+", response)
|
| 19 |
-
return match.group(0) if match else "def answer():\n return 'Error: No valid function found'"
|
| 20 |
-
|
| 21 |
-
def generate_python_code(pipe, prompt):
|
| 22 |
-
full_prompt = f"""You are a Python expert using the `wntr` library.
|
| 23 |
-
The user asks: "{prompt}"
|
| 24 |
-
|
| 25 |
-
You have the following variables:
|
| 26 |
-
- `wn`: a `WaterNetworkModel` object from the EPANET `.inp` file.
|
| 27 |
-
- `results`: simulation output from `wntr.sim`.
|
| 28 |
-
|
| 29 |
-
Write a single function named `answer()` that returns the answer as a Python object (int, float, str, or list).
|
| 30 |
-
|
| 31 |
-
Only return code, no explanations. Example:
|
| 32 |
-
```python
|
| 33 |
-
def answer():
|
| 34 |
-
# Your code
|
| 35 |
-
return result
|
| 36 |
-
```
|
| 37 |
-
"""
|
| 38 |
-
response = pipe(full_prompt, max_new_tokens=300, do_sample=True)[0]["generated_text"]
|
| 39 |
-
code_block = re.findall(r"```python(.*?)```", response, re.DOTALL)
|
| 40 |
-
if code_block:
|
| 41 |
-
return code_block[0].strip()
|
| 42 |
-
else:
|
| 43 |
-
return extract_code(response)
|
| 44 |
-
|
| 45 |
-
def run_answer_function(code, wn, results):
|
| 46 |
-
local_vars = {"wn": wn, "results": results}
|
| 47 |
-
try:
|
| 48 |
-
exec(code, {}, local_vars)
|
| 49 |
-
return local_vars["answer"]()
|
| 50 |
-
except Exception as e:
|
| 51 |
-
return f"⚠️ Error in generated code: {e}"
|
| 52 |
-
|
| 53 |
-
st.set_page_config(page_title="EPANET + LLM", layout="wide")
|
| 54 |
st.title("💧 EPANET + LLM (via WNTR + Falcon LLM)")
|
| 55 |
|
| 56 |
uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
|
| 57 |
-
question = st.text_input("Ask a question about your water network model")
|
| 58 |
|
| 59 |
-
if uploaded_file
|
| 60 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp:
|
| 61 |
tmp.write(uploaded_file.read())
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
|
| 65 |
-
sim = wntr.sim.EpanetSimulator(wn)
|
| 66 |
-
results = sim.run_sim()
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
-
os.remove(tmp_path)
|
|
|
|
| 1 |
+
|
| 2 |
import streamlit as st
|
| 3 |
import wntr
|
| 4 |
import tempfile
|
|
|
|
|
|
|
| 5 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 6 |
|
| 7 |
@st.cache_resource
|
| 8 |
+
def load_llm():
|
| 9 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 11 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
| 12 |
+
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 13 |
+
|
| 14 |
+
pipe = load_llm()
|
| 15 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
st.title("💧 EPANET + LLM (via WNTR + Falcon LLM)")
|
| 17 |
|
| 18 |
uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
|
|
|
|
| 19 |
|
| 20 |
+
if uploaded_file:
|
| 21 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp:
|
| 22 |
tmp.write(uploaded_file.read())
|
| 23 |
+
inp_path = tmp.name
|
| 24 |
+
wn = wntr.network.WaterNetworkModel(inp_path)
|
| 25 |
+
|
| 26 |
+
question = st.text_input("Ask a question about your water network model")
|
| 27 |
+
|
| 28 |
+
if question:
|
| 29 |
+
with st.spinner("Generating response..."):
|
| 30 |
+
prompt = f'''
|
| 31 |
+
You are a Python assistant helping with water distribution modeling using WNTR.
|
| 32 |
+
The user has loaded an EPANET model into the variable `wn` using:
|
| 33 |
+
wn = wntr.network.WaterNetworkModel('model.inp')
|
| 34 |
+
|
| 35 |
+
Now the user is asking this question:
|
| 36 |
+
{question}
|
| 37 |
+
|
| 38 |
+
Write a Python function `def answer():` that computes and returns the result as a variable named `result`.
|
| 39 |
+
|
| 40 |
+
Use:
|
| 41 |
+
- `wn.pipe_name_list` to access pipe data.
|
| 42 |
+
- `len(wn.pipe_name_list)` for total pipes.
|
| 43 |
+
- Assign output to `result`.
|
| 44 |
|
| 45 |
+
Do not include anything outside the function.
|
|
|
|
|
|
|
| 46 |
|
| 47 |
+
Begin:
|
| 48 |
+
'''
|
| 49 |
+
llm_output = pipe(prompt, max_new_tokens=200)[0]['generated_text']
|
| 50 |
+
code_start = llm_output.find("def answer():")
|
| 51 |
+
code = llm_output[code_start:].strip()
|
| 52 |
|
| 53 |
+
st.markdown("### 🧠 Generated Code")
|
| 54 |
+
st.code(code, language="python")
|
| 55 |
|
| 56 |
+
st.markdown("### 📤 Output")
|
| 57 |
+
local_vars = {}
|
| 58 |
+
try:
|
| 59 |
+
exec(code, {"wn": wn}, local_vars)
|
| 60 |
+
output = local_vars.get("result", "No result returned.")
|
| 61 |
+
except Exception as e:
|
| 62 |
+
output = f"Error in generated code: {e}"
|
| 63 |
+
st.success(output)
|
| 64 |
|
|
|