|
|
import streamlit as st |
|
|
import tempfile |
|
|
import wntr |
|
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
@st.cache_resource |
|
|
def load_llm(): |
|
|
model_name = "deepseek-ai/deepseek-coder-6.7b-instruct" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True) |
|
|
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512) |
|
|
|
|
|
llm = load_llm() |
|
|
|
|
|
st.title("💧 EPANET + WNTR + DeepSeek LLM Assistant") |
|
|
|
|
|
uploaded_file = st.file_uploader("Upload your EPANET .inp file", type=["inp"]) |
|
|
|
|
|
wn = None |
|
|
if uploaded_file: |
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file: |
|
|
tmp_file.write(uploaded_file.read()) |
|
|
inp_path = tmp_file.name |
|
|
wn = wntr.network.WaterNetworkModel(inp_path) |
|
|
st.success("Water network model loaded successfully.") |
|
|
|
|
|
question = st.text_input("Ask a question about your water network model") |
|
|
|
|
|
if st.button("Generate Python Code") and wn and question: |
|
|
prompt = f""" |
|
|
You are a Python expert using the WNTR library for water network simulations. |
|
|
Given a WNTR water network model `wn`, generate a Python function called `answer()` that answers this question: |
|
|
|
|
|
Question: {question} |
|
|
|
|
|
The function must use the `wn` model, store the final answer in a variable called `result`, and return it. |
|
|
Only output valid Python code. Do not include markdown or explanations. |
|
|
""" |
|
|
|
|
|
try: |
|
|
response = llm(prompt)[0]["generated_text"] |
|
|
code_start = response.find("def answer") |
|
|
if code_start != -1: |
|
|
code = response[code_start:] |
|
|
st.subheader("🧠 Generated Code") |
|
|
st.code(code, language="python") |
|
|
|
|
|
local_vars = {"wn": wn} |
|
|
try: |
|
|
exec(code, local_vars) |
|
|
result = local_vars["answer"]() |
|
|
st.subheader("📤 Output") |
|
|
st.success(result) |
|
|
except Exception as e: |
|
|
st.subheader("📤 Output") |
|
|
st.error(f"Error executing function: {e}") |
|
|
else: |
|
|
st.error("Could not extract Python function from LLM response.") |
|
|
except Exception as e: |
|
|
st.error(f"Error querying DeepSeek model: {e}") |
|
|
|