Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| from openai import OpenAI | |
| from pypdf import PdfReader | |
| # --- PAGE CONFIGURATION --- | |
| st.set_page_config( | |
| page_title="Multi-Agent Energy Auditor", | |
| page_icon="⚡", | |
| layout="wide" | |
| ) | |
| # --- SIDEBAR: SETTINGS --- | |
| with st.sidebar: | |
| st.header("⚙️ Configuration") | |
| api_key = st.text_input("OpenAI API Key", type="password", help="Enter your OpenAI API key here.") | |
| st.markdown("---") | |
| st.markdown("### 🤖 Agent Roles") | |
| st.info( | |
| "**EngineerGPT:** Proposes aggressive savings measures.\n\n" | |
| "**VerifierGPT:** Skeptical QA that checks math and assumptions." | |
| ) | |
| # --- CLASS DEFINITIONS (Refactored from Notebook) --- | |
| class DebateAgent: | |
| def __init__(self, client, name, role, system_prompt): | |
| self.client = client | |
| self.name = name | |
| self.role = role | |
| self.system_prompt = system_prompt | |
| self.conversation_history = [ | |
| {"role": "system", "content": f"You are {name}, {role}. {system_prompt}"} | |
| ] | |
| def generate_response(self, user_input, prev_agent_response=None): | |
| if prev_agent_response: | |
| prompt_content = ( | |
| f"{user_input}\n\n" | |
| f"REVIEW MATERIAL: Here is the proposal from the other agent:\n" | |
| f"```\n{prev_agent_response}\n```\n" | |
| f"Critique this proposal strictly based on the facts provided. " | |
| f"If you find errors, point them out efficiently." | |
| ) | |
| else: | |
| prompt_content = user_input | |
| self.conversation_history.append({"role": "user", "content": prompt_content}) | |
| try: | |
| # Using gpt-4o as per your notebook for reasoning capabilities | |
| completion = self.client.chat.completions.create( | |
| model="gpt-4o", | |
| messages=self.conversation_history, | |
| temperature=0.2 | |
| ) | |
| response_text = completion.choices[0].message.content | |
| self.conversation_history.append({"role": "assistant", "content": response_text}) | |
| return response_text | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # --- HELPER FUNCTIONS --- | |
| def extract_text_from_pdf(uploaded_file): | |
| try: | |
| pdf_reader = PdfReader(uploaded_file) | |
| text = "" | |
| for page in pdf_reader.pages: | |
| text += page.extract_text() | |
| return text | |
| except Exception as e: | |
| return f"Error reading PDF: {e}" | |
| # --- MAIN APP LAYOUT --- | |
| st.title("⚡ Multi-Agent Energy Auditor") | |
| st.markdown(""" | |
| This tool uses a **Multi-Agent Debate System** to audit building data. | |
| One agent proposes an Energy Conservation Measure (ECM), and a second "Skeptic" agent attempts to find errors in the logic before the final report is generated. | |
| """) | |
| # 1. INPUT SECTION | |
| st.header("1. Upload Audit Data") | |
| uploaded_file = st.file_uploader("Upload an Audit PDF", type="pdf") | |
| manual_text = st.text_area("Or paste building data text here:", height=150) | |
| if st.button("Start AI Audit Process", type="primary"): | |
| if not api_key: | |
| st.error("Please enter your OpenAI API Key in the sidebar.") | |
| elif not uploaded_file and not manual_text: | |
| st.warning("Please provide audit data via PDF upload or text entry.") | |
| else: | |
| # Initialize Client | |
| client = OpenAI(api_key=api_key) | |
| # Get Data | |
| with st.spinner("Ingesting Audit Data..."): | |
| if uploaded_file: | |
| audit_context = extract_text_from_pdf(uploaded_file) | |
| else: | |
| audit_context = manual_text | |
| # Show a snippet of what was ingested | |
| with st.expander("View Ingested Data Context"): | |
| st.code(audit_context[:2000] + "...", language="text") | |
| # Initialize Agents | |
| engineer_agent = DebateAgent( | |
| client=client, | |
| name="EngineerGPT", | |
| role="Energy Auditor", | |
| system_prompt="You are an experienced energy auditor. You propose Energy Conservation Measures (ECMs) based on building data. You are aggressive with savings estimates. Focus on the single highest-impact opportunity." | |
| ) | |
| verifier_agent = DebateAgent( | |
| client=client, | |
| name="VerifierGPT", | |
| role="Senior QA Engineer", | |
| system_prompt="You are a skeptical Senior Engineer. Your job is to catch errors in assumptions, specifically regarding operating hours, rates, and feasibility. You do not generate ideas, you only verify them." | |
| ) | |
| # --- ROUND 1: PROPOSAL --- | |
| st.header("2. The Debate Loop") | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| st.subheader("Round 1: Initial Proposal") | |
| with st.spinner("EngineerGPT is calculating..."): | |
| task_prompt = f"Based on the Building Data below, identify and propose the best retrofit ECM. Estimate the annual savings in kWh and $. \n\nData: {audit_context}" | |
| proposal_text = engineer_agent.generate_response(task_prompt) | |
| st.success("Proposal Generated") | |
| with st.container(border=True): | |
| st.markdown(proposal_text) | |
| # --- ROUND 2: CRITIQUE --- | |
| with col2: | |
| st.subheader("Round 2: QA Critique") | |
| with st.spinner("VerifierGPT is reviewing logic..."): | |
| critique_text = verifier_agent.generate_response( | |
| "Please review the following ECM proposal for accuracy.", | |
| prev_agent_response=proposal_text | |
| ) | |
| st.warning("Critique Generated") | |
| with st.container(border=True): | |
| st.markdown(critique_text) | |
| # --- ROUND 3: CORRECTION --- | |
| st.subheader("Round 3: Final Verified Proposal") | |
| with st.spinner("EngineerGPT is applying corrections..."): | |
| final_text = engineer_agent.generate_response( | |
| "Here is feedback from the Senior QA Engineer. Please update your calculation and proposal strictly addressing their concerns.", | |
| prev_agent_response=critique_text | |
| ) | |
| st.info("Final Report Generated") | |
| st.markdown("### 📝 Final Verified ECM Report") | |
| st.markdown(final_text) | |
| # Download Button for Report | |
| st.download_button( | |
| label="Download Report", | |
| data=final_text, | |
| file_name="verified_ecm_report.md", | |
| mime="text/markdown" | |
| ) |