agentsay commited on
Commit
de0f1b1
·
verified ·
1 Parent(s): e1f71c5

Upload 12 files

Browse files
Files changed (12) hide show
  1. .python-version +1 -0
  2. Dockerfile +13 -0
  3. README.md +0 -11
  4. SETUP.md +47 -0
  5. app.py +293 -0
  6. config.py +6 -0
  7. engine.py +427 -0
  8. futureWeather.py +231 -0
  9. pyproject.toml +28 -0
  10. requirements.txt +20 -0
  11. setup.bat +2 -0
  12. uv.lock +0 -0
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Augosure FastAPI Services
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
8
- short_description: Agrosure FastAPI Services
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
SETUP.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python Backend Setup Guide
2
+
3
+ ## Environment Variables Setup
4
+
5
+ The Python backend requires Cloudinary credentials to be configured. You can set these up in two ways:
6
+
7
+ ### Option 1: Environment Variables
8
+ Set the following environment variables in your system:
9
+
10
+ ```bash
11
+ export CLOUDINARY_CLOUD_NAME=your_cloudinary_cloud_name
12
+ export CLOUDINARY_API_KEY=your_cloudinary_api_key
13
+ export CLOUDINARY_API_SECRET=your_cloudinary_api_secret
14
+ ```
15
+
16
+ ### Option 2: .env File
17
+ Create a `.env` file in the `python_backend` directory with the following content:
18
+
19
+ ```
20
+ CLOUDINARY_CLOUD_NAME=your_cloudinary_cloud_name
21
+ CLOUDINARY_API_KEY=your_cloudinary_api_key
22
+ CLOUDINARY_API_SECRET=your_cloudinary_api_secret
23
+ ```
24
+
25
+ Replace `your_cloudinary_cloud_name`, `your_cloudinary_api_key`, and `your_cloudinary_api_secret` with your actual Cloudinary credentials.
26
+
27
+ ## Installation
28
+
29
+ 1. Install the required dependencies:
30
+ ```bash
31
+ pip install -r requirements.txt
32
+ ```
33
+
34
+ 2. Run the FastAPI server:
35
+ ```bash
36
+ python app.py
37
+ ```
38
+
39
+ The server will start on `http://localhost:5000`
40
+
41
+ ## API Endpoints
42
+
43
+ - `POST /api/exif_metadata` - Extract EXIF metadata from images
44
+ - `POST /api/damage_detection` - Detect crop damage from images
45
+ - `POST /api/crop_type` - Identify crop type from images
46
+ - `POST /predictForCrop` - Predict crop yield
47
+ - `POST /futureWeatherPrediction` - Get weather predictions and claim recommendations
app.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Query
2
+ from fastapi.responses import JSONResponse
3
+ from pydantic import BaseModel
4
+ import os
5
+ import requests
6
+ import time
7
+ import cloudinary
8
+ import cloudinary.utils
9
+ import engine
10
+ import config
11
+ import futureWeather
12
+ import warnings
13
+ import re
14
+ from geopy.geocoders import Nominatim
15
+ from geopy.exc import GeocoderTimedOut
16
+ import pandas as pd
17
+
18
+ # Load environment variables from .env file
19
+ try:
20
+ from dotenv import load_dotenv
21
+ load_dotenv()
22
+ except ImportError:
23
+ print("Warning: python-dotenv not installed. Using system environment variables only.")
24
+
25
+ warnings.filterwarnings("ignore")
26
+
27
+ app = FastAPI()
28
+
29
+ # Configure Cloudinary using environment variables
30
+ cloudinary_config = {
31
+ 'cloud_name': config.CLOUDINARY_CLOUD_NAME,
32
+ 'api_key': config.CLOUDINARY_API_KEY,
33
+ 'api_secret': config.CLOUDINARY_API_SECRET
34
+ }
35
+
36
+ # Validate that all required Cloudinary credentials are present
37
+ if not all(cloudinary_config.values()):
38
+ print("Warning: Some Cloudinary environment variables are missing!")
39
+ missing = [k for k, v in cloudinary_config.items() if not v]
40
+ print(f"Missing: {missing}")
41
+
42
+ cloudinary.config(**cloudinary_config)
43
+
44
+ # Ensure upload directory exists
45
+ UPLOAD_FOLDER = 'Uploads'
46
+ if not os.path.exists(UPLOAD_FOLDER):
47
+ os.makedirs(UPLOAD_FOLDER)
48
+
49
+ # Pydantic models for request validation
50
+ class ImageRequest(BaseModel):
51
+ publicId: str
52
+ fileType: str
53
+ originalName: str | None = None
54
+
55
+ class CropYieldRequest(BaseModel):
56
+ cropName: str
57
+ locationLat: float
58
+ locationLong: float
59
+
60
+ class WeatherPredictionRequest(BaseModel):
61
+ locationLat: float
62
+ locationLong: float
63
+ language: str
64
+
65
+ # Generate signed URL for Cloudinary
66
+ def get_signed_url(public_id: str, resource_type: str = 'image', expires_in: int = 300) -> str:
67
+ expires_at = int(time.time()) + expires_in
68
+ url, options = cloudinary.utils.cloudinary_url(
69
+ public_id,
70
+ resource_type=resource_type,
71
+ type="authenticated",
72
+ sign_url=True,
73
+ expires_at=expires_at
74
+ )
75
+ return url
76
+
77
+ # Download from Cloudinary and save to local file
78
+ def download_file(public_id: str, save_path: str, file_type: str = 'image/jpeg') -> bool:
79
+ resource_type = 'raw' if file_type == 'raw' else 'image'
80
+ url = get_signed_url(public_id, resource_type=resource_type)
81
+ response = requests.get(url, headers={'Content-Type': file_type})
82
+ if response.status_code == 200:
83
+ with open(save_path, 'wb') as f:
84
+ f.write(response.content)
85
+ return True
86
+ return False
87
+
88
+ # --- FastAPI Routes ---
89
+ @app.get("/")
90
+ async def root():
91
+ return {
92
+ "message": "Agrosure API is running!",
93
+ "status": "healthy",
94
+ "endpoints": {
95
+ "exif_metadata": "/api/exif_metadata",
96
+ "damage_detection": "/api/damage_detection",
97
+ "crop_type": "/api/crop_type",
98
+ "crop_yield_prediction": "/predictForCrop",
99
+ "weather_prediction": "/futureWeatherPrediction"
100
+ },
101
+ "docs": "/docs",
102
+ "redoc": "/redoc"
103
+ }
104
+
105
+ @app.post("/api/exif_metadata")
106
+ async def exif_metadata(image_request: ImageRequest):
107
+ filename = image_request.originalName or f"{image_request.publicId.split('/')[-1]}.jpg"
108
+ filepath = os.path.join(UPLOAD_FOLDER, filename)
109
+
110
+ if not download_file(image_request.publicId, filepath, image_request.fileType):
111
+ raise HTTPException(status_code=500, detail=f"Failed to download image from Cloudinary: {image_request.publicId}")
112
+
113
+ result = engine.get_exif_data(filepath)
114
+ os.remove(filepath)
115
+ return result
116
+
117
+ @app.post("/api/damage_detection")
118
+ async def damage_detection(image_request: ImageRequest):
119
+ print(f"Received damage detection request: {image_request}")
120
+ filename = image_request.originalName or f"{image_request.publicId.split('/')[-1]}.jpg"
121
+ filepath = os.path.join(UPLOAD_FOLDER, filename)
122
+
123
+ if not download_file(image_request.publicId, filepath, image_request.fileType):
124
+ raise HTTPException(status_code=500, detail=f"Failed to download image from Cloudinary: {image_request.publicId}")
125
+
126
+ result = engine.predict_damage(filepath)
127
+ os.remove(filepath)
128
+ return result
129
+
130
+ @app.post("/api/crop_type")
131
+ async def crop_type(image_request: ImageRequest):
132
+ filename = image_request.originalName or f"{image_request.publicId.split('/')[-1]}.jpg"
133
+ filepath = os.path.join(UPLOAD_FOLDER, filename)
134
+
135
+ if not download_file(image_request.publicId, filepath, image_request.fileType):
136
+ raise HTTPException(status_code=500, detail=f"Failed to download image from Cloudinary: {image_request.publicId}")
137
+
138
+ result = engine.predict_crop(filepath)
139
+ os.remove(filepath)
140
+ return result
141
+
142
+ @app.post("/predictForCrop")
143
+ async def predict_crop_yield(data: CropYieldRequest):
144
+ if not (-90 <= data.locationLat <= 90) or not (-180 <= data.locationLong <= 180):
145
+ raise HTTPException(status_code=400, detail="Invalid latitude or longitude values")
146
+
147
+ try:
148
+ result = engine.predict_crop_yield_from_location(
149
+ crop_input=data.cropName.upper(),
150
+ lat=data.locationLat,
151
+ lon=data.locationLong
152
+ )
153
+ return result
154
+ except ValueError as e:
155
+ raise HTTPException(status_code=400, detail=f"Invalid numeric input: {str(e)}")
156
+ except Exception as e:
157
+ raise HTTPException(status_code=500, detail=str(e))
158
+
159
+
160
+ @app.post("/futureWeatherPrediction")
161
+ async def future_weather_prediction(data: WeatherPredictionRequest):
162
+ if not (-90 <= data.locationLat <= 90) or not (-180 <= data.locationLong <= 180):
163
+ raise HTTPException(status_code=400, detail="Invalid latitude or longitude values")
164
+
165
+ try:
166
+ tom = futureWeather.fetch_tomorrow(data.locationLat, data.locationLong)
167
+ if not tom or len(tom.get("timelines", {}).get("daily", [])) < 7:
168
+ weather_data, source = futureWeather.fetch_open_meteo(data.locationLat, data.locationLong), "open-meteo"
169
+ else:
170
+ weather_data, source = tom, "tomorrow"
171
+
172
+ summary, score, should_claim, flags = futureWeather.extract_and_calc(weather_data, source)
173
+ ai_text = futureWeather.invoke_gemini(summary, score, should_claim, flags, data.language)
174
+
175
+ return {
176
+ "claim_recommendation": {
177
+ "should_claim": should_claim,
178
+ "weather_trend_risk_score": round(score, 2),
179
+ "forecast_summary": summary,
180
+ "language": data.language,
181
+ "gemini_response": ai_text
182
+ }
183
+ }
184
+ except ValueError as e:
185
+ raise HTTPException(status_code=400, detail=f"Invalid numeric input: {str(e)}")
186
+ except Exception as e:
187
+ raise HTTPException(status_code=500, detail=str(e))
188
+
189
+
190
+
191
+ ## MADE BY UDDALAK MUKHERJEE
192
+ # Load and clean crop data once on startup
193
+ CROP_DATA_PATH = "data/ICRISAT-District_Level_Data_30_Years.csv"
194
+ df_crop = pd.read_csv(CROP_DATA_PATH)
195
+ df_crop_clean = df_crop.drop(columns=['State Code', 'Year', 'State Name'], errors='ignore')
196
+ mean_crop_by_district = df_crop_clean.groupby('Dist Name').mean(numeric_only=True)
197
+
198
+ def get_district_from_coordinates(lat, lon):
199
+ geolocator = Nominatim(user_agent="agrisure-ai")
200
+ try:
201
+ location = geolocator.reverse((lat, lon), exactly_one=True)
202
+ except GeocoderTimedOut:
203
+ raise Exception("Reverse geocoding service timed out.")
204
+ except Exception as e:
205
+ raise Exception(f"Geocoding error: {str(e)}")
206
+
207
+ if not location:
208
+ raise ValueError("Could not get district from coordinates.")
209
+
210
+ # Handle potential async/coroutine response with type ignoring
211
+ try:
212
+ # Use type: ignore to suppress type checker warnings for geopy attributes
213
+ address = location.raw.get('address', {}) # type: ignore
214
+ except (AttributeError, TypeError):
215
+ try:
216
+ # Fallback: try to get address from location attributes
217
+ addr_str = str(location.address) # type: ignore
218
+ # Basic parsing fallback
219
+ address = {'display_name': addr_str}
220
+ except (AttributeError, TypeError):
221
+ raise ValueError("Could not parse location data.")
222
+
223
+ if not address:
224
+ raise ValueError("Could not get district from coordinates.")
225
+ district = (
226
+ address.get('district') or
227
+ address.get('state_district') or
228
+ address.get('county')
229
+ )
230
+ if district and 'district' in district.lower():
231
+ district = district.replace("District", "").strip()
232
+ return district
233
+
234
+ def clean_district_name(district):
235
+ if not isinstance(district, str):
236
+ return district
237
+ district = re.sub(r"\s*[-\u2013]\s*(I{1,3}|IV|V|VI|VII|VIII|IX|X|\d+)$", "", district, flags=re.IGNORECASE)
238
+ district = district.replace("District", "").strip()
239
+ aliases = {
240
+ "Purba Bardhaman": "Burdwan",
241
+ "Paschim Bardhaman": "Burdwan",
242
+ "Bardhaman": "Burdwan",
243
+ "Kalna": "Burdwan",
244
+ "Kalyani": "Nadia",
245
+ "Raiganj": "Uttar Dinajpur",
246
+ "Kolkata": "North 24 Parganas"
247
+ }
248
+ return aliases.get(district, district)
249
+
250
+ @app.get("/top-crops")
251
+ async def get_top_5_crops(
252
+ lat: float = Query(..., description="Latitude of the location"),
253
+ lon: float = Query(..., description="Longitude of the location")
254
+ ):
255
+ try:
256
+ district_name = get_district_from_coordinates(lat, lon)
257
+ if not district_name:
258
+ return JSONResponse(status_code=404, content={"error": "Could not resolve district from coordinates."})
259
+
260
+ district_name = clean_district_name(district_name)
261
+
262
+ matched_district = None
263
+ for dist in mean_crop_by_district.index:
264
+ if dist.strip().lower() == district_name.lower():
265
+ matched_district = dist
266
+ break
267
+
268
+ if not matched_district:
269
+ return JSONResponse(status_code=404, content={"error": f"District '{district_name}' not found in dataset."})
270
+
271
+ top_crops = mean_crop_by_district.loc[matched_district].sort_values(ascending=False).head(5)
272
+
273
+ print(top_crops)
274
+
275
+ return {
276
+ "district": matched_district,
277
+ "top_5_crops": [
278
+ crop.replace(" (Kg per ha)", "").replace("YIELD", "").strip()
279
+ for crop in top_crops.index
280
+ ]
281
+ }
282
+
283
+ except Exception as e:
284
+ return JSONResponse(status_code=500, content={"error": str(e)})
285
+
286
+
287
+ if __name__ == "__main__":
288
+ import uvicorn
289
+ print("Starting FastAPI server...")
290
+ print("Server will be available at:")
291
+ print(" - http://localhost:5001")
292
+ print("\nPress CTRL+C to stop the server")
293
+ uvicorn.run("app:app", host="0.0.0.0", port=5001, reload=True)
config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ CLOUDINARY_CLOUD_NAME="djd6ovzuc"
2
+ CLOUDINARY_API_KEY="661697976329398"
3
+ CLOUDINARY_API_SECRET="rg9GlCFI28GKgHm82WA8bmMnZ5o"
4
+ GEMINI_API_KEY="AIzaSyBwwlfEgmTddBhMmVcNGqhLdpY6Nc7TzMk"
5
+ TOMORROW_API_KEY="uKclyQEYoCJNiqQMHc8wZg7FpVFk76Kl"
6
+ OPENWEATHER_API="dbc4cf80006c9d817da4af9d4578f0fb"
engine.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PIL import Image
3
+ import piexif
4
+ import cv2
5
+ import numpy as np
6
+ from geopy.geocoders import Nominatim
7
+ from geopy.exc import GeocoderTimedOut
8
+ import torch
9
+ import timm
10
+ from torchvision import transforms
11
+ import torch.nn.functional as F
12
+ import pandas as pd
13
+ import re
14
+ from prophet import Prophet
15
+ from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
16
+ import requests
17
+ import json
18
+ import config
19
+ # Load environment variables from .env file
20
+ try:
21
+ from dotenv import load_dotenv
22
+ load_dotenv()
23
+ except ImportError:
24
+ print("Warning: python-dotenv not installed. Using system environment variables only.")
25
+
26
+ # --- EXIF Metadata Extraction ---
27
+ def get_exif_data(image_path):
28
+ if not os.path.exists(image_path):
29
+ return {"error": f"File not found at path {image_path}"}
30
+
31
+ suspicious_reasons = []
32
+ authenticity_score = 100
33
+
34
+ try:
35
+ exif_dict = piexif.load(image_path)
36
+ gps_info = exif_dict.get('GPS', {})
37
+
38
+ def _convert_to_degrees(value):
39
+ d, m, s = value
40
+ return d[0]/d[1] + (m[0]/m[1])/60 + (s[0]/s[1])/3600
41
+
42
+ lat = lon = None
43
+ if gps_info:
44
+ try:
45
+ lat = round(_convert_to_degrees(gps_info[2]), 6)
46
+ lon = round(_convert_to_degrees(gps_info[4]), 6)
47
+ if gps_info[1] == b'S': lat *= -1
48
+ if gps_info[3] == b'W': lon *= -1
49
+ except:
50
+ lat, lon = None, None
51
+ suspicious_reasons.append("GPS data could not be parsed correctly.")
52
+ else:
53
+ suspicious_reasons.append("GPS metadata missing.")
54
+ authenticity_score -= 30
55
+
56
+ address = None
57
+ if lat and lon:
58
+ try:
59
+ geolocator = Nominatim(user_agent="agrisure_exif_reader")
60
+ location = geolocator.reverse((lat, lon))
61
+ address = location.address if location else None # type: ignore
62
+ except:
63
+ address = "Geocoder error"
64
+
65
+ model = exif_dict['0th'].get(piexif.ImageIFD.Model, b"").decode('utf-8', errors='ignore')
66
+ timestamp = exif_dict['Exif'].get(piexif.ExifIFD.DateTimeOriginal, b"").decode('utf-8', errors='ignore')
67
+ software = exif_dict['0th'].get(piexif.ImageIFD.Software, b"").decode('utf-8', errors='ignore')
68
+
69
+ if not model:
70
+ suspicious_reasons.append("Device model missing.")
71
+ authenticity_score -= 10
72
+ if not timestamp:
73
+ suspicious_reasons.append("Timestamp missing.")
74
+ authenticity_score -= 20
75
+ if software:
76
+ suspicious_reasons.append(f"Image was edited using software: {software}")
77
+ authenticity_score -= 25
78
+
79
+ try:
80
+ ela_path = image_path.replace(".jpg", "_ela.jpg")
81
+ original = Image.open(image_path).convert('RGB')
82
+ original.save(ela_path, 'JPEG', quality=90)
83
+ ela_image = Image.open(ela_path)
84
+ ela = Image.blend(original, ela_image, alpha=10)
85
+ ela_cv = np.array(ela)
86
+ std_dev = np.std(ela_cv)
87
+ if std_dev > 25:
88
+ suspicious_reasons.append("High ELA deviation — possible image tampering.")
89
+ authenticity_score -= 15
90
+ os.remove(ela_path)
91
+ except:
92
+ suspicious_reasons.append("ELA check failed.")
93
+ authenticity_score -= 5
94
+
95
+ return {
96
+ "verifier": "exif_metadata_reader",
97
+ "device_model": model or "N/A",
98
+ "timestamp": timestamp or "N/A",
99
+ "gps_latitude": lat,
100
+ "gps_longitude": lon,
101
+ "address": address,
102
+ "authenticity_score": max(0, authenticity_score),
103
+ "suspicious_reasons": suspicious_reasons or ["None"]
104
+ }
105
+ except Exception as e:
106
+ return {"error": f"Failed to analyze image: {str(e)}"}
107
+
108
+ # --- Crop Damage Detection ---
109
+ device = "cuda" if torch.cuda.is_available() else "cpu"
110
+ val_transform = transforms.Compose([
111
+ transforms.Resize((384, 384)),
112
+ transforms.ToTensor(),
113
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
114
+ ])
115
+ model_damage = timm.create_model('efficientnetv2_rw_m', pretrained=False, num_classes=2)
116
+ model_damage.load_state_dict(torch.load("models/efficientnetv2_rw_m_crop_damage.pt", map_location=device))
117
+ model_damage.to(device)
118
+ model_damage.eval()
119
+ class_names = ['damaged', 'non_damaged']
120
+
121
+ def predict_damage(image_path):
122
+ if not os.path.exists(image_path):
123
+ return {"status": "error", "message": f"File not found: {image_path}"}
124
+
125
+ try:
126
+ image = Image.open(image_path).convert('RGB')
127
+ input_tensor = val_transform(image).unsqueeze(0).to(device)
128
+ with torch.no_grad():
129
+ output = model_damage(input_tensor)
130
+ probs = torch.softmax(output, dim=1)
131
+ predicted_class = int(torch.argmax(probs, dim=1).item())
132
+ confidence = float(probs[0][predicted_class].item())
133
+ predicted_label = class_names[predicted_class]
134
+ return {
135
+ "verifier": "crop_damage_classifier",
136
+ "model": "efficientnetv2_rw_m",
137
+ "prediction": predicted_label,
138
+ "confidence": round(confidence * 100, 2),
139
+ "class_names": class_names,
140
+ "status": "success"
141
+ }
142
+ except Exception as e:
143
+ return {"status": "error", "message": str(e)}
144
+
145
+ # --- Crop Type Detection ---
146
+ val_transforms_crop = transforms.Compose([
147
+ transforms.Resize((224, 224)),
148
+ transforms.ToTensor(),
149
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
150
+ ])
151
+ idx_to_class = {
152
+ 0: 'Coffee-plant', 1: 'Cucumber', 2: 'Fox_nut(Makhana)', 3: 'Lemon', 4: 'Olive-tree',
153
+ 5: 'Pearl_millet(bajra)', 6: 'Tobacco-plant', 7: 'almond', 8: 'banana', 9: 'cardamom',
154
+ 10: 'cherry', 11: 'chilli', 12: 'clove', 13: 'coconut', 14: 'cotton', 15: 'gram',
155
+ 16: 'jowar', 17: 'jute', 18: 'maize', 19: 'mustard-oil', 20: 'papaya', 21: 'pineapple',
156
+ 22: 'rice', 23: 'soyabean', 24: 'sugarcane', 25: 'sunflower', 26: 'tea', 27: 'tomato',
157
+ 28: 'vigna-radiati(Mung)', 29: 'wheat'
158
+ }
159
+ model_crop = timm.create_model('convnext_tiny', pretrained=False, num_classes=30)
160
+ model_crop.load_state_dict(torch.load('models/crop_type_detection_model.pth', map_location=device))
161
+ model_crop.to(device)
162
+ model_crop.eval()
163
+
164
+ def predict_crop(image_path):
165
+ if not os.path.exists(image_path):
166
+ return {"status": "error", "message": f"File not found: {image_path}"}
167
+
168
+ try:
169
+ image = Image.open(image_path).convert('RGB')
170
+ image_tensor = val_transforms_crop(image).unsqueeze(0).to(device)
171
+ with torch.no_grad():
172
+ outputs = model_crop(image_tensor)
173
+ probs = F.softmax(outputs, dim=1)
174
+ conf, pred = torch.max(probs, 1)
175
+ predicted_label = idx_to_class[pred.item()]
176
+ confidence = round(float(conf.item()) * 100, 2)
177
+ return {
178
+ "status": "success",
179
+ "predicted_class": predicted_label,
180
+ "confidence_percent": confidence
181
+ }
182
+ except Exception as e:
183
+ return {"status": "error", "message": str(e)}
184
+
185
+ # --- Crop Yield Prediction Utilities ---
186
+ def get_district_from_coordinates(lat, lon):
187
+ geolocator = Nominatim(user_agent="agrisure-ai")
188
+ try:
189
+ location = geolocator.reverse((lat, lon))
190
+ except GeocoderTimedOut:
191
+ return None, None, "Reverse geocoding service timed out."
192
+ except Exception as e:
193
+ return None, None, f"Geocoding error: {str(e)}"
194
+
195
+ if not location:
196
+ return None, None, "Could not get district from coordinates."
197
+
198
+ try:
199
+ address = location.raw.get('address', {}) # type: ignore
200
+ except (AttributeError, TypeError):
201
+ return None, None, "Could not parse location data."
202
+
203
+ if not address:
204
+ return None, None, "Could not get district from coordinates."
205
+ district = (
206
+ address.get('district') or
207
+ address.get('state_district') or
208
+ address.get('county')
209
+ )
210
+ if not district:
211
+ return None, None, "District not found in address data."
212
+ if 'district' in district.lower():
213
+ district = district.replace("District", "").strip()
214
+ place_name = district # Set place_name to district name
215
+ return district, place_name, None
216
+
217
+ def clean_district_name(district):
218
+ if not isinstance(district, str):
219
+ return district
220
+ district = re.sub(r"\s*[-\u2013]\s*(I{1,3}|IV|V|VI|VII|VIII|IX|X|\d+)$", "", district, flags=re.IGNORECASE)
221
+ district = district.replace("District", "").strip()
222
+ aliases = {
223
+ "Purba Bardhaman": "Burdwan",
224
+ "Paschim Bardhaman": "Burdwan",
225
+ "Bardhaman": "Burdwan",
226
+ "Kalna": "Burdwan",
227
+ "Kalyani": "Nadia",
228
+ "Raiganj": "Uttar Dinajpur",
229
+ "Kolkata": "North 24 Parganas"
230
+ }
231
+ return aliases.get(district, district)
232
+
233
+ def get_soil_category(score):
234
+ if score == 0:
235
+ return "No Soil Health Data"
236
+ elif score >= 4.5:
237
+ return "Very Excellent Soil Health"
238
+ elif score >= 4:
239
+ return "Excellent Soil Health"
240
+ elif score >= 3:
241
+ return "Good Soil Health"
242
+ elif score >= 2:
243
+ return "Poor Soil Health"
244
+ else:
245
+ return "Very Poor Soil Health"
246
+
247
+ def calculate_dynamic_climate_score(predicted_yield, soil_score, max_yield=8000, max_soil=5.0):
248
+ norm_yield = (predicted_yield / max_yield) ** 0.8
249
+ norm_soil = (soil_score / max_soil) ** 1.2
250
+ return round((0.6 * norm_yield + 0.4 * norm_soil) * 100, 2)
251
+
252
+ def forecast_yield(ts_data):
253
+ model = Prophet(yearly_seasonality='auto', growth='flat')
254
+ model.fit(ts_data)
255
+ forecast = model.predict(model.make_future_dataframe(periods=1, freq='YS'))
256
+ return max(forecast.iloc[-1]['yhat'], 0)
257
+
258
+ def forecast_yield_with_accuracy(ts_data):
259
+ model = Prophet(yearly_seasonality='auto', growth='flat')
260
+ model.fit(ts_data)
261
+ future = model.make_future_dataframe(periods=1, freq='YS')
262
+ forecast = model.predict(future)
263
+ predicted_yield = max(forecast.iloc[-1]['yhat'], 0)
264
+
265
+ try:
266
+ past = forecast[forecast['ds'] < ts_data['ds'].max()]
267
+ merged = ts_data.merge(past[['ds', 'yhat']], on='ds')
268
+ mae = mean_absolute_error(merged['y'], merged['yhat'])
269
+ mape = mean_absolute_percentage_error(merged['y'], merged['yhat']) * 100
270
+ except:
271
+ mae, mape = None, None
272
+
273
+ return predicted_yield, mae, mape
274
+
275
+ def get_crop_priority_list(district_yield, base_crop_names):
276
+ priority_list = []
277
+ for crop, column in base_crop_names.items():
278
+ crop_data = district_yield[['Year', column]].dropna()
279
+ crop_data.columns = ['ds', 'y']
280
+ crop_data['ds'] = pd.to_datetime(crop_data['ds'], format='%Y')
281
+ if len(crop_data) >= 5:
282
+ yield_pred = forecast_yield(crop_data)
283
+ priority_list.append((crop, yield_pred))
284
+ return sorted(priority_list, key=lambda x: x[1], reverse=True)
285
+
286
+ def get_weather_data(lat, lon):
287
+ try:
288
+ # Get weather API key from environment variables
289
+ weather_api_key = config.OPENWEATHER_API
290
+ if weather_api_key and weather_api_key != "your_openweather_api_key_here":
291
+ url = f"https://api.weatherapi.com/v1/current.json?key={weather_api_key}&q={lat},{lon}"
292
+ response = requests.get(url)
293
+ data = response.json()
294
+ return {
295
+ "temp_c": data['current']['temp_c'],
296
+ "humidity": data['current']['humidity'],
297
+ "condition": data['current']['condition']['text'],
298
+ "wind_kph": data['current']['wind_kph']
299
+ }
300
+ else:
301
+ return {"error": "Weather API key not configured or placeholder value"}
302
+ except Exception as e:
303
+ return {"error": "Weather fetch failed", "details": str(e)}
304
+
305
+ def predict_crop_yield_from_location(crop_input, lat, lon):
306
+ district, place_name, error = get_district_from_coordinates(lat, lon)
307
+ if error:
308
+ return {"error": error}
309
+
310
+ if district is None:
311
+ return {"error": "Could not determine district from coordinates"}
312
+
313
+ district_input = clean_district_name(district)
314
+
315
+ try:
316
+ yield_df = pd.read_csv(r"data\ICRISAT-District_Level_Data_30_Years.csv")
317
+ soil_df = pd.read_csv(r"data\SoilHealthScores_by_District_2.csv")
318
+ except Exception as e:
319
+ return {"error": f"Failed to read data files: {str(e)}"}
320
+
321
+ soil_df['Soil_Category'] = soil_df['SoilHealthScore'].apply(get_soil_category)
322
+ yield_columns = [col for col in yield_df.columns if 'YIELD (Kg per ha)' in col]
323
+ base_crop_names = {col.split(' YIELD')[0]: col for col in yield_columns}
324
+
325
+ if crop_input not in base_crop_names:
326
+ return {"error": f"'{crop_input}' not found in crop list."}
327
+
328
+ yield_col = base_crop_names[crop_input]
329
+
330
+ # Ensure district_input is not None before using lower()
331
+ if district_input is None:
332
+ return {"error": "Could not determine district name"}
333
+
334
+ district_yield = yield_df[yield_df['Dist Name'].str.lower() == district_input.lower()]
335
+ district_soil = soil_df[soil_df['Dist Name'].str.lower() == district_input.lower()]
336
+
337
+ if district_yield.empty or district_soil.empty:
338
+ return {"error": f"Data for district '{district_input}' not found."}
339
+
340
+ ts_data = district_yield[['Year', yield_col]].dropna()
341
+ ts_data.columns = ['ds', 'y']
342
+ ts_data['ds'] = pd.to_datetime(ts_data['ds'], format='%Y')
343
+ ts_data['year'] = ts_data['ds'].dt.year
344
+
345
+ valid_data = ts_data[ts_data['y'] > 0]
346
+ if len(valid_data) < 6:
347
+ predicted_yield = ts_data['y'].mean()
348
+ mae, mape = None, None
349
+ else:
350
+ predicted_yield, mae, mape = forecast_yield_with_accuracy(valid_data)
351
+
352
+ if predicted_yield > 1000:
353
+ yield_cat = "Highly Recommended Crop"
354
+ elif predicted_yield > 500:
355
+ yield_cat = "Good Crop"
356
+ elif predicted_yield > 200:
357
+ yield_cat = "Poor Crop"
358
+ else:
359
+ yield_cat = "Very Poor Crop"
360
+
361
+ soil_score = district_soil['SoilHealthScore'].values[0]
362
+ soil_cat = district_soil['Soil_Category'].values[0]
363
+ climate_score = calculate_dynamic_climate_score(predicted_yield, soil_score)
364
+
365
+ sorted_crops = get_crop_priority_list(district_yield, base_crop_names)
366
+ best_crop = sorted_crops[0][0] if sorted_crops else None
367
+ best_yield = sorted_crops[0][1] if sorted_crops else None
368
+
369
+ weather_data = get_weather_data(lat, lon)
370
+
371
+ crop_priority_list = []
372
+ for c, y in sorted_crops:
373
+ if y > 1000:
374
+ yc = "Highly Recommended Crop"
375
+ elif y > 500:
376
+ yc = "Good Crop"
377
+ elif y > 200:
378
+ yc = "Poor Crop"
379
+ else:
380
+ yc = "Very Poor Crop"
381
+
382
+ score = calculate_dynamic_climate_score(y, soil_score)
383
+
384
+ crop_priority_list.append({
385
+ "crop": c,
386
+ "predicted_yield": {
387
+ "kg_per_ha": round(y, 2),
388
+ "kg_per_acre": round(y / 2.47105, 2)
389
+ },
390
+ "yield_category": yc,
391
+ "climate_score": score
392
+ })
393
+
394
+ return {
395
+ "location": {
396
+ "input_coordinates": {"lat": lat, "lon": lon},
397
+ "place_name": place_name,
398
+ "detected_district": district,
399
+ },
400
+ "input_crop_analysis": {
401
+ "crop": crop_input,
402
+ "predicted_yield": {
403
+ "kg_per_ha": round(predicted_yield, 2),
404
+ "kg_per_acre": round(predicted_yield / 2.47105, 2)
405
+ },
406
+ "yield_category": yield_cat,
407
+ "prediction_accuracy": {
408
+ "mae": round(mae, 2) if mae is not None else "Not enough data",
409
+ "mape_percent": round(mape, 2) if mape is not None else "Not enough data",
410
+ "accuracy_score": round(100 - mape, 2) if mape is not None else "Not enough data"
411
+ }
412
+ },
413
+ "soil_health": {
414
+ "score": soil_score,
415
+ "category": soil_cat
416
+ },
417
+ "climate_score": climate_score,
418
+ "weather_now": weather_data,
419
+ "best_crop": {
420
+ "name": best_crop,
421
+ "predicted_yield": {
422
+ "kg_per_ha": round(best_yield, 2) if best_crop and best_yield is not None else None,
423
+ "kg_per_acre": round(best_yield / 2.47105, 2) if best_crop and best_yield is not None else None,
424
+ }
425
+ },
426
+ "crop_priority_list": crop_priority_list
427
+ }
futureWeather.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import requests
3
+ import json
4
+ import numpy as np
5
+ import os
6
+
7
+ # Load environment variables from .env file
8
+ try:
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+ except ImportError:
12
+ print("Warning: python-dotenv not installed. Using system environment variables only.")
13
+
14
+ try:
15
+ import google.generativeai as genai
16
+ GENAI_AVAILABLE = True
17
+ except ImportError:
18
+ print("Warning: google.generativeai not available")
19
+ genai = None
20
+ GENAI_AVAILABLE = False
21
+
22
+ # --- CONFIG ---
23
+ TOMORROW_API_KEY = os.getenv('TOMORROW_API_KEY')
24
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
25
+
26
+ # Configure Gemini AI if available
27
+ if GENAI_AVAILABLE and genai and GEMINI_API_KEY:
28
+ try:
29
+ genai.configure(api_key=GEMINI_API_KEY) # type: ignore
30
+ except Exception as e:
31
+ print(f"Warning: Failed to configure Gemini AI: {e}")
32
+ GENAI_AVAILABLE = False
33
+
34
+ # --- Ideal Ranges ---
35
+ ideal_ranges = {
36
+ "rain": {"ideal_min": 10, "ideal_max": 100},
37
+ "temperature": {"ideal_min": 20, "ideal_max": 35},
38
+ "humidity": {"ideal_min": 40, "ideal_max": 80},
39
+ "wind": {"ideal_min": 0, "ideal_max": 20}
40
+ }
41
+
42
+ # --- Weather Risk Weights ---
43
+ weather_factors = {
44
+ "rain_risk": {"weight": 0.4, "value": 0},
45
+ "heat_risk": {"weight": 0.3, "value": 0},
46
+ "humidity_risk": {"weight": 0.2, "value": 0},
47
+ "wind_risk": {"weight": 0.1, "value": 0}
48
+ }
49
+
50
+ # --- Normalize Risk ---
51
+ def normalized_risk(actual, ideal_min, ideal_max):
52
+ if ideal_min <= actual <= ideal_max:
53
+ return 0
54
+ return min(1.0, abs(actual - (ideal_min if actual < ideal_min else ideal_max)) / (ideal_min if actual < ideal_min else ideal_max))
55
+
56
+ # --- Localize Flags ---
57
+ def localize_flags(flags, lang):
58
+ translations = {
59
+ "Unusual rainfall": {
60
+ "Bengali": "Rainfall is unusually high or low",
61
+ "Hindi": "Rainfall is unusually high or low",
62
+ "English": "Rainfall is unusually high or low"
63
+ },
64
+ "Heat stress": {
65
+ "Bengali": "High temperatures may cause crop stress",
66
+ "Hindi": "High temperatures may cause crop stress",
67
+ "English": "High temperatures may cause crop stress"
68
+ },
69
+ "High humidity": {
70
+ "Bengali": "High humidity may cause fungal diseases",
71
+ "Hindi": "High humidity may cause fungal diseases",
72
+ "English": "High humidity may cause fungal diseases"
73
+ },
74
+ "High wind": {
75
+ "Bengali": "Strong winds may damage crops",
76
+ "Hindi": "Strong winds may damage crops",
77
+ "English": "Strong winds may damage crops"
78
+ }
79
+ }
80
+ # Filter out None values and ensure all returned values are strings
81
+ result = []
82
+ for f in flags:
83
+ if f is not None:
84
+ translated = translations.get(f, {}).get(lang, f)
85
+ if translated is not None:
86
+ result.append(str(translated))
87
+ return result
88
+
89
+ # --- API Fetches ---
90
+ def fetch_tomorrow(lat, lon):
91
+ if not TOMORROW_API_KEY:
92
+ print("Warning: TOMORROW_API_KEY not found in environment variables")
93
+ return None
94
+
95
+ try:
96
+ url = f"https://api.tomorrow.io/v4/weather/forecast?location={lat},{lon}&timesteps=1d&apikey={TOMORROW_API_KEY}"
97
+ r = requests.get(url, timeout=10)
98
+ return r.json() if r.status_code == 200 else None
99
+ except Exception as e:
100
+ print(f"Error fetching Tomorrow.io data: {e}")
101
+ return None
102
+
103
+ def fetch_open_meteo(lat, lon):
104
+ try:
105
+ url = (
106
+ f"https://api.open-meteo.com/v1/forecast?"
107
+ f"latitude={lat}&longitude={lon}"
108
+ f"&daily=temperature_2m_max,temperature_2m_mean,precipitation_sum,relative_humidity_2m_mean,wind_speed_10m_mean"
109
+ f"&forecast_days=16&timezone=auto"
110
+ )
111
+ response = requests.get(url, timeout=10)
112
+ return response.json() if response.status_code == 200 else None
113
+ except Exception as e:
114
+ print(f"Error fetching Open-Meteo data: {e}")
115
+ return None
116
+
117
+ # --- Weather Trend Analysis ---
118
+ def extract_and_calc(data, source):
119
+ try:
120
+ if source == "tomorrow":
121
+ if not data or "timelines" not in data or "daily" not in data["timelines"]:
122
+ raise ValueError("Invalid Tomorrow.io data structure")
123
+ arr = data["timelines"]["daily"]
124
+ days = len(arr)
125
+ rain = [v["values"].get("precipitationSum", 0) for v in arr]
126
+ temp_avg = [v["values"].get("temperatureAvg", 0) for v in arr]
127
+ temp_max = [v["values"].get("temperatureMax", 0) for v in arr]
128
+ humidity = [v["values"].get("humidityAvg", 0) for v in arr]
129
+ wind = [v["values"].get("windSpeedAvg", 0) for v in arr]
130
+ else: # open-meteo
131
+ if not data or "daily" not in data:
132
+ raise ValueError("Invalid Open-Meteo data structure")
133
+ d = data["daily"]
134
+ days = len(d["time"])
135
+ rain = d.get("precipitation_sum", [0] * days)
136
+ temp_avg = d.get("temperature_2m_mean", [0] * days)
137
+ temp_max = d.get("temperature_2m_max", [0] * days)
138
+ humidity = d.get("relative_humidity_2m_mean", [0] * days)
139
+ wind = d.get("wind_speed_10m_mean", [0] * days)
140
+
141
+ # Handle potential None values in arrays
142
+ rain = [r if r is not None else 0 for r in rain]
143
+ temp_avg = [t if t is not None else 0 for t in temp_avg]
144
+ temp_max = [t if t is not None else 0 for t in temp_max]
145
+ humidity = [h if h is not None else 0 for h in humidity]
146
+ wind = [w if w is not None else 0 for w in wind]
147
+
148
+ total_rain = float(np.sum(rain))
149
+ avg_temp = float(np.mean(temp_avg))
150
+ max_temp = float(np.max(temp_max))
151
+ avg_humidity = float(np.mean(humidity))
152
+ avg_wind = float(np.mean(wind))
153
+ dry_days = int(sum(1 for r in rain if r < 1))
154
+ except Exception as e:
155
+ print(f"Error processing weather data: {e}")
156
+ # Return default values in case of error
157
+ return {
158
+ "avg_temp_c": 25.0,
159
+ "max_temp_c": 30.0,
160
+ "total_rainfall_mm": 50.0,
161
+ "dry_days": 3,
162
+ "avg_humidity_percent": 60.0,
163
+ "avg_wind_speed_kmph": 10.0,
164
+ "forecast_days_used": 7,
165
+ "source": source,
166
+ "error": str(e)
167
+ }, 0.3, False, []
168
+
169
+ weather_factors["rain_risk"]["value"] = normalized_risk(total_rain, **ideal_ranges["rain"])
170
+ weather_factors["heat_risk"]["value"] = normalized_risk(avg_temp, **ideal_ranges["temperature"])
171
+ weather_factors["humidity_risk"]["value"] = normalized_risk(avg_humidity, **ideal_ranges["humidity"])
172
+ weather_factors["wind_risk"]["value"] = normalized_risk(avg_wind, **ideal_ranges["wind"])
173
+
174
+ risk_score = float(sum(f["value"] * f["weight"] for f in weather_factors.values()))
175
+ should_claim = bool(risk_score >= 0.5)
176
+
177
+ flags = []
178
+ if weather_factors["rain_risk"]["value"] > 0.3:
179
+ flags.append("Unusual rainfall")
180
+ if weather_factors["heat_risk"]["value"] > 0.3:
181
+ flags.append("Heat stress")
182
+ if weather_factors["humidity_risk"]["value"] > 0.3:
183
+ flags.append("High humidity")
184
+ if weather_factors["wind_risk"]["value"] > 0.3:
185
+ flags.append("High wind")
186
+
187
+ summary = {
188
+ "avg_temp_c": round(avg_temp, 2),
189
+ "max_temp_c": round(max_temp, 2),
190
+ "total_rainfall_mm": round(total_rain, 2),
191
+ "dry_days": dry_days,
192
+ "avg_humidity_percent": round(avg_humidity, 2),
193
+ "avg_wind_speed_kmph": round(avg_wind, 2),
194
+ "forecast_days_used": days,
195
+ "source": source
196
+ }
197
+
198
+ return summary, risk_score, should_claim, flags
199
+
200
+ # --- Gemini AI Interpretation ---
201
+ def invoke_gemini(summary, score, should_claim, flags, lang):
202
+ if not GENAI_AVAILABLE or not genai:
203
+ return f"AI service unavailable. Based on weather analysis: {'Claim recommended' if should_claim else 'No claim needed'}"
204
+
205
+ localized_flags = localize_flags(flags, lang)
206
+ prompt = f"""
207
+ You are a crop insurance assistant. Respond ONLY in {lang}.
208
+
209
+ Weather Summary:
210
+ - Total Rainfall: {summary['total_rainfall_mm']} mm
211
+ - Avg Temperature: {summary['avg_temp_c']} °C
212
+ - Max Temperature: {summary['max_temp_c']} °C
213
+ - Avg Humidity: {summary['avg_humidity_percent']} %
214
+ - Avg Wind Speed: {summary['avg_wind_speed_kmph']} km/h
215
+ - Dry Days: {summary['dry_days']} days
216
+
217
+ Risks Observed:
218
+ - {'; '.join(localized_flags) if localized_flags else 'No major weather risks observed.'}
219
+
220
+ Final Output:
221
+ - Bullet points for why claim is or is not needed.
222
+ - A brief interpretation about whether to claim crop insurance or not.
223
+ """
224
+
225
+ try:
226
+ model = genai.GenerativeModel("gemini-2.0-flash") # type: ignore
227
+ response = model.generate_content(prompt)
228
+ return response.text.strip()
229
+ except Exception as e:
230
+ print(f"Error calling Gemini AI: {e}")
231
+ return f"AI service error. Based on weather analysis: {'Claim recommended' if should_claim else 'No claim needed'}"
pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "python-backend"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "cloudinary>=1.44.1",
9
+ "cupy-cuda102>=12.3.0",
10
+ "fastapi>=0.116.0",
11
+ "flask==2.2.5",
12
+ "geopy==2.4.1",
13
+ "google-generativeai>=0.8.5",
14
+ "numpy==1.24.3",
15
+ "ollama>=0.5.1",
16
+ "opencv-python==4.8.0.76",
17
+ "pandas",
18
+ "piexif==1.1.3",
19
+ "pillow==9.5.0",
20
+ "plotly>=6.2.0",
21
+ "prophet>=1.1.7",
22
+ "python-dotenv>=1.1.1",
23
+ "scikit-learn>=1.7.0",
24
+ "sentence-transformers>=4.1.0",
25
+ "timm==0.9.2",
26
+ "torch==2.0.1",
27
+ "uvicorn>=0.35.0",
28
+ ]
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cloudinary==1.44.1
2
+ fastapi==0.116.1
3
+ geopy==2.3.0
4
+ numpy==2.3.2
5
+ opencv_contrib_python==4.10.0.84
6
+ opencv_python==4.10.0.84
7
+ opencv_python_headless==4.10.0.84
8
+ pandas==2.3.1
9
+ piexif==1.1.3
10
+ Pillow==11.3.0
11
+ prophet==1.1.6
12
+ protobuf==6.31.1
13
+ pydantic==2.11.7
14
+ python-dotenv==1.1.1
15
+ Requests==2.32.4
16
+ scikit_learn==1.7.1
17
+ timm==1.0.19
18
+ torch==2.7.1+cu128
19
+ torchvision==0.22.1+cu128
20
+ uvicorn==0.35.0
setup.bat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ uv add -r requirements.txt
2
+ uv run app.py
uv.lock ADDED
The diff for this file is too large to render. See raw diff