Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| import requests | |
| # Groq API configuration | |
| GROQ_API_KEY = os.getenv("GROQ_API_KEY") | |
| GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions" | |
| MODEL = "llama3-70b-8192" # Safe, currently supported model from Groq | |
| def get_flood_risk(location, water_level, rainfall): | |
| prompt = f""" | |
| Analyze flood risk based on: | |
| - Location: {location} | |
| - Current water level: {water_level} meters | |
| - 24-hour rainfall: {rainfall} mm | |
| - Historical flood data for area | |
| Provide output in this exact format: | |
| "RISK: <HIGH/MEDIUM/LOW> | ALERT: <Warning message> | ACTION: <Recommended action>" | |
| """ | |
| headers = { | |
| "Authorization": f"Bearer {GROQ_API_KEY}", | |
| "Content-Type": "application/json" | |
| } | |
| data = { | |
| "model": MODEL, | |
| "messages": [ | |
| {"role": "system", "content": "You are a flood risk analysis expert."}, | |
| {"role": "user", "content": prompt} | |
| ] | |
| } | |
| response = requests.post(GROQ_API_URL, headers=headers, json=data) | |
| if response.status_code == 200: | |
| result = response.json() | |
| message = result["choices"][0]["message"]["content"].strip() | |
| return message | |
| else: | |
| return f"β Error: {response.status_code} - {response.text}" | |
| # Gradio UI | |
| iface = gr.Interface( | |
| fn=get_flood_risk, | |
| inputs=[ | |
| gr.Textbox(label="Location"), | |
| gr.Number(label="Current Water Level (m)"), | |
| gr.Number(label="24-hour Rainfall (mm)") | |
| ], | |
| outputs="text", | |
| title="π Flood Risk Predictor", | |
| description="Enter location, water level and rainfall to get flood risk prediction using Groq AI" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |