razaali10 commited on
Commit
ba3e802
·
verified ·
1 Parent(s): 2af17f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -81
app.py CHANGED
@@ -1,91 +1,51 @@
1
  import streamlit as st
2
- import wntr
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  import tempfile
5
  import os
 
 
6
 
7
- @st.cache_resource
8
- def load_llm(model_name="mistralai/Mistral-7B-Instruct-v0.2"):
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(
11
- model_name, torch_dtype="auto", device_map="auto"
12
- )
13
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
14
- return pipe
15
-
16
- def simulate_epanet(inp_path):
17
- wn = wntr.network.WaterNetworkModel(inp_path)
18
- sim = wntr.sim.WNTRSimulator(wn)
19
- results = sim.run_sim()
20
- return wn, results
21
-
22
- def generate_python_code(pipe, prompt):
23
- full_prompt = f"""You are a Python expert using the `wntr` library.
24
- Here is the user's question: "{prompt}"
25
-
26
- You have these variables:
27
- - `wn`: the EPANET WaterNetworkModel
28
- - `results`: the simulation output
29
-
30
- Write a Python function named `answer()` that answers the question and returns a single result.
31
-
32
- Begin your function with:
33
- def answer():
34
- """
35
- response = pipe(full_prompt, max_new_tokens=300, do_sample=True)[0]["generated_text"]
36
- return extract_code(response)
37
-
38
- def extract_code(text):
39
- lines = text.splitlines()
40
- code_lines = []
41
- in_func = False
42
- for line in lines:
43
- if line.strip().startswith("def answer("):
44
- in_func = True
45
- if in_func:
46
- code_lines.append(line)
47
- if in_func and line.strip() == "":
48
- break
49
- return "\n".join(code_lines)
50
-
51
- def run_answer_function(code, wn, results):
52
- local_vars = {"wn": wn, "results": results}
53
- exec(code, {}, local_vars)
54
- return local_vars["answer"]()
55
-
56
- # --- Streamlit UI ---
57
- st.title("💧 EPANET + LLM with Streamlit")
58
-
59
- uploaded_file = st.file_uploader("Upload your EPANET .inp file", type=["inp"])
60
- user_prompt = st.text_input("Ask a question (e.g., 'What's the max pressure at node 101?')")
61
-
62
- if uploaded_file and user_prompt:
63
- with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as temp:
64
- temp.write(uploaded_file.read())
65
- temp_path = temp.name
66
 
67
- with st.spinner("Simulating EPANET model..."):
68
- try:
69
- wn, results = simulate_epanet(temp_path)
70
- except Exception as e:
71
- st.error(f"Failed to simulate EPANET model: {e}")
72
- st.stop()
73
 
74
- with st.spinner("Loading model..."):
75
- pipe = load_llm()
76
 
77
- with st.spinner("Generating Python code..."):
78
- code = generate_python_code(pipe, user_prompt)
79
- st.code(code, language="python")
 
 
 
 
80
 
81
- with st.spinner("Running generated code..."):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  try:
83
- output = run_answer_function(code, wn, results)
84
- st.success("Answer:")
85
- st.write(output)
 
86
  except Exception as e:
87
- st.error(f"Error running generated code: {e}")
88
-
89
- os.remove(temp_path)
90
-
91
-
 
1
  import streamlit as st
 
 
2
  import tempfile
3
  import os
4
+ import wntr
5
+ from transformers import pipeline
6
 
7
+ # Load LLM
8
+ llm = pipeline("text-generation", model="tiiuae/falcon-7b-instruct", device_map="auto", max_new_tokens=200)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ st.title("💧 EPANET + LLM (via WNTR + Falcon LLM)")
 
 
 
 
 
11
 
12
+ # Upload .inp file
13
+ uploaded_file = st.file_uploader("Upload EPANET .inp file", type=["inp"])
14
 
15
+ # Initialize the model globally
16
+ wn = None
17
+ if uploaded_file:
18
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".inp") as tmp_file:
19
+ tmp_file.write(uploaded_file.read())
20
+ inp_path = tmp_file.name
21
+ wn = wntr.network.WaterNetworkModel(inp_path)
22
 
23
+ # Ask question
24
+ user_question = st.text_input("Ask a question about your water network model")
25
+
26
+ if st.button("Generate Code") and wn and user_question:
27
+ # Prompt to LLM
28
+ prompt = f"""You are a Python expert using the WNTR library. Given a water network model `wn`, answer this question:
29
+ Question: {user_question}
30
+ Generate a Python function called `answer()` that computes and returns the result as a variable named `result`.
31
+ Only include valid Python code. Do not include markdown, explanations, or text outside the function."""
32
+
33
+ llm_response = llm(prompt)[0]["generated_text"]
34
+ code_start = llm_response.find("def answer")
35
+ if code_start != -1:
36
+ generated_code = llm_response[code_start:]
37
+ st.markdown("### 🧠 Generated Code")
38
+ st.code(generated_code, language="python")
39
+
40
+ # Execute the function in a controlled environment
41
+ local_env = {"wn": wn}
42
  try:
43
+ exec(generated_code, local_env)
44
+ result = local_env["answer"]()
45
+ st.markdown("### 📤 Output")
46
+ st.success(result)
47
  except Exception as e:
48
+ st.markdown("### 📤 Output")
49
+ st.error(f"Error in generated code: {e}")
50
+ else:
51
+ st.error("Failed to extract Python function from LLM response.")