File size: 2,326 Bytes
969c22c
ed00ea6
ba3e802
59222ae
dfcfe24
e66696e
 
59222ae
e66696e
59222ae
dfcfe24
 
 
ed00ea6
59222ae
8a43de5
59222ae
969c22c
ba3e802
 
 
 
 
 
59222ae
 
 
 
 
 
 
 
969c22c
59222ae
ba3e802
59222ae
 
 
dfcfe24
 
 
59222ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfcfe24
59222ae
dfcfe24
59222ae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import streamlit as st
import tempfile
import wntr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM

@st.cache_resource
def load_llm():
    model_name = "deepseek-ai/deepseek-coder-6.7b-instruct"  # You can swap this with another DeepSeek model
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
    return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)

llm = load_llm()

st.title("💧 EPANET + WNTR + DeepSeek LLM Assistant")

uploaded_file = st.file_uploader("Upload your EPANET .inp file", type=["inp"])

wn = None
if uploaded_file:
    with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
        tmp_file.write(uploaded_file.read())
        inp_path = tmp_file.name
    wn = wntr.network.WaterNetworkModel(inp_path)
    st.success("Water network model loaded successfully.")

question = st.text_input("Ask a question about your water network model")

if st.button("Generate Python Code") and wn and question:
    prompt = f"""
You are a Python expert using the WNTR library for water network simulations.
Given a WNTR water network model `wn`, generate a Python function called `answer()` that answers this question:

Question: {question}

The function must use the `wn` model, store the final answer in a variable called `result`, and return it.
Only output valid Python code. Do not include markdown or explanations.
"""

    try:
        response = llm(prompt)[0]["generated_text"]
        code_start = response.find("def answer")
        if code_start != -1:
            code = response[code_start:]
            st.subheader("🧠 Generated Code")
            st.code(code, language="python")

            local_vars = {"wn": wn}
            try:
                exec(code, local_vars)
                result = local_vars["answer"]()
                st.subheader("📤 Output")
                st.success(result)
            except Exception as e:
                st.subheader("📤 Output")
                st.error(f"Error executing function: {e}")
        else:
            st.error("Could not extract Python function from LLM response.")
    except Exception as e:
        st.error(f"Error querying DeepSeek model: {e}")