AlexKurian commited on
Commit
97cc89a
·
1 Parent(s): 98060ad

Added pdf download

Browse files
emission_forecast.py CHANGED
@@ -548,7 +548,10 @@ def register_emission_routes(app):
548
  applied_policies.append({
549
  'id': policy['id'],
550
  'name': policy['name'],
551
- 'icon': policy['icon']
 
 
 
552
  })
553
 
554
  # Calculate yearly totals
@@ -745,6 +748,42 @@ Data Points: {len(year_df)} days
745
  import traceback
746
  traceback.print_exc()
747
  return jsonify({'error': str(e)}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748
 
749
  if __name__ == "__main__":
750
  print("Training XGBoost Emission Model...")
 
548
  applied_policies.append({
549
  'id': policy['id'],
550
  'name': policy['name'],
551
+ 'icon': policy['icon'],
552
+ 'description': policy.get('description', ''),
553
+ 'category': policy.get('category', 'General'),
554
+ 'details': policy.get('details', {})
555
  })
556
 
557
  # Calculate yearly totals
 
748
  import traceback
749
  traceback.print_exc()
750
  return jsonify({'error': str(e)}), 500
751
+
752
+ @app.route('/api/policies/chat', methods=['POST'])
753
+ def policy_chat():
754
+ """
755
+ Chat with Gemini about selected policies.
756
+ """
757
+ try:
758
+ from policy_chat import PolicyChat
759
+ from policies_data import get_policy_by_id
760
+
761
+ data = request.json or {}
762
+ policy_ids = data.get('policy_ids', [])
763
+ question = data.get('question', '')
764
+
765
+ if not question:
766
+ return jsonify({'error': 'Question is required'}), 400
767
+
768
+ # Get full policy details
769
+ policies = []
770
+ for pid in policy_ids:
771
+ p = get_policy_by_id(pid)
772
+ if p:
773
+ policies.append(p)
774
+
775
+ chat_engine = PolicyChat()
776
+ answer = chat_engine.chat(policies, question)
777
+
778
+ return jsonify({
779
+ 'status': 'success',
780
+ 'answer': answer,
781
+ 'timestamp': datetime.now().isoformat()
782
+ })
783
+ except Exception as e:
784
+ import traceback
785
+ traceback.print_exc()
786
+ return jsonify({'error': str(e)}), 500
787
 
788
  if __name__ == "__main__":
789
  print("Training XGBoost Emission Model...")
policy_chat.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_google_genai import ChatGoogleGenerativeAI
2
+ from langchain_google_genai import ChatGoogleGenerativeAI
3
+ from langchain_core.prompts import ChatPromptTemplate
4
+ from langchain_core.runnables import RunnablePassthrough
5
+ from langchain_core.output_parsers import StrOutputParser
6
+ import config
7
+
8
+ class PolicyChat:
9
+ def __init__(self):
10
+ if not config.GEMINI_API_KEY:
11
+ print("WARNING: GEMINI_API_KEY is not set for PolicyChat.")
12
+
13
+ self.llm = ChatGoogleGenerativeAI(
14
+ model="gemini-2.5-flash",
15
+ temperature=0.5,
16
+ google_api_key=config.GEMINI_API_KEY
17
+ )
18
+
19
+ self.prompt = ChatPromptTemplate.from_template("""
20
+ You are an AI policy expert for an Urban Digital Twin platform.
21
+ Your goal is to explain specific urban policies to stakeholders (city planners, citizens).
22
+
23
+ The user is asking about the following policies which are currently selected in the simulator:
24
+ {policy_context}
25
+
26
+ Context about these policies:
27
+ - These are used to reduce CO2 emissions and improve Air Quality (AQI).
28
+ - The user may ask about implementation details, costs, side effects, or effectiveness.
29
+
30
+ User Question: {question}
31
+
32
+ Provide a clear, concise, and helpful answer. validation: If the user asks about something unrelated to the policies or urban planning, politely steer them back to the topic.
33
+ """)
34
+
35
+ def chat(self, policies, question):
36
+ """
37
+ policies: List of policy dictionaries (name, description, etc.)
38
+ question: User's question string
39
+ """
40
+ # Format policy context
41
+ policy_context = ""
42
+ for p in policies:
43
+ policy_context += f"- {p.get('name')} ({p.get('id')}): {p.get('description')}\n"
44
+ if 'details' in p:
45
+ details = p['details']
46
+ policy_context += f" Cost: {details.get('implementation_cost')}, Public Acceptance: {details.get('public_acceptance')}\n"
47
+
48
+ chain = (
49
+ {"policy_context": lambda x: policy_context, "question": lambda x: question}
50
+ | self.prompt
51
+ | self.llm
52
+ | StrOutputParser()
53
+ )
54
+
55
+ return chain.invoke({})
static/aqi_map_hotspots_2026.png CHANGED

Git LFS Details

  • SHA256: f8f6f636ff56f419eda40c189a5e8359250041a12727f1f90c6b32515f178e92
  • Pointer size: 131 Bytes
  • Size of remote file: 446 kB

Git LFS Details

  • SHA256: 5124caba9b9df3f6179fcb64d039f6f39d4d69374bebc5adb80762611d5985a1
  • Pointer size: 131 Bytes
  • Size of remote file: 441 kB
static/aqi_map_hotspots_2027.png CHANGED

Git LFS Details

  • SHA256: 6177b027de64d6d6aa45b034374325fe6a9522751c572333298d05c1f3a3a268
  • Pointer size: 131 Bytes
  • Size of remote file: 446 kB

Git LFS Details

  • SHA256: d18c43cd9f67c434af92c34afdb4f880175c82cd33110302bb43eca651a38176
  • Pointer size: 131 Bytes
  • Size of remote file: 441 kB