CodeNine commited on
Commit
4d132d8
Β·
verified Β·
1 Parent(s): 378a9bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -47
app.py CHANGED
@@ -1,58 +1,127 @@
1
  import gradio as gr
2
  import os
3
  import requests
 
 
4
 
5
- # Groq API configuration
6
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
7
- GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"
8
- MODEL = "llama3-70b-8192" # Safe, currently supported model from Groq
9
 
10
- def get_flood_risk(location, water_level, rainfall):
 
11
  prompt = f"""
12
- Analyze flood risk based on:
13
  - Location: {location}
14
- - Current water level: {water_level} meters
15
- - 24-hour rainfall: {rainfall} mm
16
- - Historical flood data for area
17
-
18
- Provide output in this exact format:
19
- "RISK: <HIGH/MEDIUM/LOW> | ALERT: <Warning message> | ACTION: <Recommended action>"
 
 
 
 
20
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- headers = {
23
- "Authorization": f"Bearer {GROQ_API_KEY}",
24
- "Content-Type": "application/json"
25
- }
26
-
27
- data = {
28
- "model": MODEL,
29
- "messages": [
30
- {"role": "system", "content": "You are a flood risk analysis expert."},
31
- {"role": "user", "content": prompt}
32
- ]
33
- }
34
-
35
- response = requests.post(GROQ_API_URL, headers=headers, json=data)
36
-
37
- if response.status_code == 200:
38
- result = response.json()
39
- message = result["choices"][0]["message"]["content"].strip()
40
- return message
41
- else:
42
- return f"❌ Error: {response.status_code} - {response.text}"
43
-
44
- # Gradio UI
45
- iface = gr.Interface(
46
- fn=get_flood_risk,
47
- inputs=[
48
- gr.Textbox(label="Location"),
49
- gr.Number(label="Current Water Level (m)"),
50
- gr.Number(label="24-hour Rainfall (mm)")
51
- ],
52
- outputs="text",
53
- title="🌊 Flood Risk Predictor",
54
- description="Enter location, water level and rainfall to get flood risk prediction using Groq AI"
55
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- if __name__ == "__main__":
58
- iface.launch()
 
1
  import gradio as gr
2
  import os
3
  import requests
4
+ import json
5
+ from datetime import datetime
6
 
7
+ # Configuration - Use Hugging Face Secrets
8
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
9
+ MODEL = "llama3-70b-8192"
10
+ API_URL = "https://api.groq.com/openai/v1/chat/completions"
11
 
12
+ def analyze_flood(location, water_level, rainfall, historical_data):
13
+ """Enhanced flood analysis with error handling"""
14
  prompt = f"""
15
+ As FloodAI Expert, analyze:
16
  - Location: {location}
17
+ - Water Level: {water_level}m
18
+ - Rainfall: {rainfall}mm
19
+ - Historical Data: {'Available' if historical_data else 'Unavailable'}
20
+
21
+ Respond in JSON with:
22
+ - risk_level (HIGH/MEDIUM/LOW)
23
+ - confidence (0-100)
24
+ - alert_message
25
+ - actions (3 bullet points)
26
+ - emergency (boolean)
27
  """
28
+
29
+ try:
30
+ response = requests.post(
31
+ API_URL,
32
+ headers={
33
+ "Authorization": f"Bearer {GROQ_API_KEY}",
34
+ "Content-Type": "application/json"
35
+ },
36
+ json={
37
+ "model": MODEL,
38
+ "messages": [
39
+ {
40
+ "role": "system",
41
+ "content": "You are FloodAI. Respond in valid JSON only."
42
+ },
43
+ {
44
+ "role": "user",
45
+ "content": prompt
46
+ }
47
+ ],
48
+ "response_format": {"type": "json_object"}
49
+ },
50
+ timeout=10
51
+ )
52
+ response.raise_for_status()
53
+ result = json.loads(response.json()["choices"][0]["message"]["content"])
54
+
55
+ # Format for Gradio output
56
+ return {
57
+ "Risk Level": result.get("risk_level", "UNKNOWN"),
58
+ "Confidence": f"{result.get('confidence', 0)}%",
59
+ "Alert": result.get("alert_message", "No alert generated"),
60
+ "Recommended Actions": "\n".join([f"β€’ {action}" for action in result.get("actions", [])]),
61
+ "Emergency": "🚨 EVACUATE" if result.get("emergency") else "⚠️ Monitor"
62
+ }
63
+
64
+ except Exception as e:
65
+ return {"Error": f"API Failure: {str(e)}"}
66
 
67
+ # Hugging Face Optimized Interface
68
+ with gr.Blocks(theme=gr.themes.Soft(), title="FloodAI Pro") as app:
69
+ # Header Section
70
+ gr.Markdown("""
71
+ <div style='text-align: center'>
72
+ <h1>🌊 FloodAI Pro</h1>
73
+ <p>Real-time Flood Risk Assessment powered by Groq AI</p>
74
+ </div>
75
+ """)
76
+
77
+ # Input Section
78
+ with gr.Row():
79
+ with gr.Column():
80
+ gr.Markdown("### πŸ“ Location Data")
81
+ location = gr.Textbox(label="City/Region", placeholder="e.g. Karachi, Pakistan")
82
+ water_level = gr.Slider(0, 15, step=0.1, label="Water Level (meters)")
83
+ rainfall = gr.Slider(0, 500, step=5, label="24h Rainfall (mm)")
84
+ historical = gr.Checkbox(label="Include historical flood data")
85
+ submit_btn = gr.Button("Analyze Risk", variant="primary")
86
+
87
+ # Output Section
88
+ with gr.Column():
89
+ gr.Markdown("### πŸ“Š Risk Assessment")
90
+ risk_output = gr.JSON(label="Analysis Results")
91
+
92
+ with gr.Accordion("πŸ›‘οΈ Safety Recommendations", open=False):
93
+ gr.Markdown("""
94
+ - Move to higher ground if risk is HIGH
95
+ - Prepare emergency supplies
96
+ - Monitor local authorities' instructions
97
+ """)
98
+
99
+ # Examples Section
100
+ gr.Markdown("### πŸ§ͺ Try Example Scenarios")
101
+ gr.Examples(
102
+ examples=[
103
+ ["Dhaka, Bangladesh", 4.5, 200, True],
104
+ ["Lahore, Pakistan", 2.1, 80, False],
105
+ ["Mumbai, India", 3.8, 300, True]
106
+ ],
107
+ inputs=[location, water_level, rainfall, historical],
108
+ label="Click any example to load"
109
+ )
110
+
111
+ # Footer
112
+ gr.Markdown(f"""
113
+ <div style='text-align: center; color: #666'>
114
+ <p>Last Updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
115
+ <p>Powered by Groq LPU β€’ Model: {MODEL}</p>
116
+ </div>
117
+ """)
118
+
119
+ # Event Handling
120
+ submit_btn.click(
121
+ fn=analyze_flood,
122
+ inputs=[location, water_level, rainfall, historical],
123
+ outputs=risk_output
124
+ )
125
 
126
+ # Required for Hugging Face Spaces
127
+ app.launch(debug=True)