|
|
from flask import Flask, render_template, request |
|
|
import joblib |
|
|
import pandas as pd |
|
|
import google.generativeai as genai |
|
|
from openai import OpenAI |
|
|
import os |
|
|
import time |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
app = Flask(__name__) |
|
|
|
|
|
|
|
|
rf_ferti_name = joblib.load('rf_ferti_name.pkl') |
|
|
rf_ferti_value = joblib.load('rf_ferti_value.pkl') |
|
|
|
|
|
|
|
|
soil_type_encodings = {'Black': 0, 'Clayey': 1, 'Loamy': 2, 'Red': 3, 'Sandy': 4} |
|
|
crop_type_encodings = {'Barley': 0, 'Cotton': 1, 'Ground Nuts': 2, 'Maize': 3, 'Millets': 4, |
|
|
'Oil seeds': 5, 'Other Variety': 6, 'Paddy': 7, 'Pulses': 8, 'Sugarcane': 9, |
|
|
'Tobacco': 10, 'Wheat': 11} |
|
|
fertilizer_name_encodings = {'10-26-26': 0, '14-35-14': 1, '15-15-15': 2, '17-17-17': 3, '20-20': 4, |
|
|
'20-20-20': 5, '28-28': 6, 'Ammonium sulfate': 7, 'Biofertilizer (e.g., Rhizobium)': 8, |
|
|
'Calcium nitrate': 9, 'DAP': 10, 'Ferrous sulfate': 11, 'Magnesium sulfate': 12, |
|
|
'Potassium chloride/Muriate of potash (MOP)': 13, 'Potassium sulfate/Sulfate of potash (SOP)': 14, |
|
|
'Rock phosphate (RP)': 15, 'Single superphosphate (SSP)': 16, 'Triple superphosphate (TSP)': 17, |
|
|
'Urea': 18, 'Zinc sulfate': 19} |
|
|
|
|
|
|
|
|
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY') |
|
|
NVIDIA_API_KEY = os.getenv('NVIDIA_API_KEY') |
|
|
|
|
|
if GEMINI_API_KEY: |
|
|
genai.configure(api_key=GEMINI_API_KEY) |
|
|
|
|
|
|
|
|
GEMINI_MODELS = [ |
|
|
{"name": "gemini-2.0-flash-exp", "max_retries": 2, "timeout": 30, "description": "Latest experimental"}, |
|
|
{"name": "gemini-1.5-pro-latest", "max_retries": 2, "timeout": 45, "description": "Most capable"}, |
|
|
{"name": "gemini-1.5-flash", "max_retries": 3, "timeout": 20, "description": "Fast and reliable"}, |
|
|
{"name": "gemini-1.5-flash-8b", "max_retries": 3, "timeout": 15, "description": "Lightweight"}, |
|
|
] |
|
|
|
|
|
NVIDIA_MODELS = [ |
|
|
{"name": "meta/llama-3.2-90b-vision-instruct", "max_retries": 2, "timeout": 40, "description": "High capability"}, |
|
|
{"name": "meta/llama-3.2-11b-vision-instruct", "max_retries": 2, "timeout": 30, "description": "Balanced"}, |
|
|
] |
|
|
|
|
|
|
|
|
def retry_with_backoff(func, max_retries=3, initial_delay=1): |
|
|
"""Retry a function with exponential backoff.""" |
|
|
for attempt in range(max_retries): |
|
|
try: |
|
|
return func() |
|
|
except Exception as e: |
|
|
if attempt == max_retries - 1: |
|
|
raise |
|
|
delay = initial_delay * (2 ** attempt) |
|
|
print(f" >> Retry {attempt + 1}/{max_retries} after {delay}s (Error: {type(e).__name__})") |
|
|
time.sleep(delay) |
|
|
|
|
|
|
|
|
def generate_with_gemini(prompt, model_config): |
|
|
"""Generate text using a specific Gemini model with retry logic.""" |
|
|
model_name = model_config["name"] |
|
|
max_retries = model_config.get("max_retries", 2) |
|
|
|
|
|
def _attempt(): |
|
|
print(f" >> Attempting Gemini: {model_name}") |
|
|
model = genai.GenerativeModel(model_name) |
|
|
response = model.generate_content(prompt) |
|
|
|
|
|
if not response or not response.text: |
|
|
raise ValueError("Empty response from model") |
|
|
|
|
|
return response.text |
|
|
|
|
|
try: |
|
|
return retry_with_backoff(_attempt, max_retries=max_retries) |
|
|
except Exception as e: |
|
|
print(f" >> FAILED {model_name}: {type(e).__name__}") |
|
|
return None |
|
|
|
|
|
|
|
|
def generate_with_nvidia(prompt, model_config): |
|
|
"""Generate text using NVIDIA API with retry logic.""" |
|
|
if not NVIDIA_API_KEY: |
|
|
return None |
|
|
|
|
|
model_name = model_config["name"] |
|
|
max_retries = model_config.get("max_retries", 2) |
|
|
|
|
|
def _attempt(): |
|
|
print(f" >> Attempting NVIDIA: {model_name}") |
|
|
client = OpenAI( |
|
|
base_url="https://integrate.api.nvidia.com/v1", |
|
|
api_key=NVIDIA_API_KEY |
|
|
) |
|
|
|
|
|
completion = client.chat.completions.create( |
|
|
model=model_name, |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
max_tokens=500, |
|
|
temperature=0.7 |
|
|
) |
|
|
|
|
|
response_text = completion.choices[0].message.content |
|
|
if not response_text: |
|
|
raise ValueError("Empty response from NVIDIA") |
|
|
|
|
|
return response_text |
|
|
|
|
|
try: |
|
|
return retry_with_backoff(_attempt, max_retries=max_retries) |
|
|
except Exception as e: |
|
|
print(f" >> FAILED NVIDIA {model_name}: {type(e).__name__}") |
|
|
return None |
|
|
|
|
|
|
|
|
def generate_ai_suggestions(pred_fertilizer_name): |
|
|
"""Generate AI suggestions with enhanced fallback system.""" |
|
|
print("\n" + "=" * 60) |
|
|
print(f"π± GENERATING AI SUGGESTIONS FOR: {pred_fertilizer_name}") |
|
|
print("=" * 60) |
|
|
|
|
|
prompt = ( |
|
|
f"For {pred_fertilizer_name} fertilizer, generate 3-4 Short Informative sentences each on a new line. Content should not be very big max to max 4 sentence thats all okay" |
|
|
f"Text should be justified and should not contain any special characters." |
|
|
) |
|
|
|
|
|
response_text = None |
|
|
used_model = "None" |
|
|
|
|
|
|
|
|
if GEMINI_API_KEY: |
|
|
print("\n--- PHASE 1: Trying Gemini Models ---") |
|
|
for idx, model_config in enumerate(GEMINI_MODELS, 1): |
|
|
print(f"[{idx}/{len(GEMINI_MODELS)}] Testing {model_config['name']}...") |
|
|
response_text = generate_with_gemini(prompt, model_config) |
|
|
|
|
|
if response_text: |
|
|
used_model = f"Gemini-{model_config['name']}" |
|
|
print(f" β SUCCESS with {used_model}") |
|
|
break |
|
|
|
|
|
|
|
|
if not response_text and NVIDIA_API_KEY: |
|
|
print("\n--- PHASE 2: Trying NVIDIA Models (Fallback) ---") |
|
|
for idx, model_config in enumerate(NVIDIA_MODELS, 1): |
|
|
print(f"[{idx}/{len(NVIDIA_MODELS)}] Testing {model_config['name']}...") |
|
|
response_text = generate_with_nvidia(prompt, model_config) |
|
|
|
|
|
if response_text: |
|
|
used_model = f"NVIDIA-{model_config['name']}" |
|
|
print(f" β SUCCESS with {used_model}") |
|
|
break |
|
|
|
|
|
|
|
|
if not response_text: |
|
|
print("\nβ All LLM providers failed. Using fallback text.") |
|
|
response_text = ( |
|
|
f"{pred_fertilizer_name} is a commonly used fertilizer in agriculture. " |
|
|
f"It provides essential nutrients to crops. " |
|
|
f"Follow recommended dosage for best results. " |
|
|
f"Consult local agricultural experts for specific guidance." |
|
|
) |
|
|
used_model = "Fallback" |
|
|
|
|
|
print(f"\nβ
Generated using: {used_model}") |
|
|
print("=" * 60 + "\n") |
|
|
|
|
|
return response_text |
|
|
|
|
|
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
|
def index(): |
|
|
if request.method == 'POST': |
|
|
|
|
|
temperature = float(request.form['temperature']) |
|
|
humidity = float(request.form['humidity']) |
|
|
moisture = float(request.form['moisture']) |
|
|
soil_type = request.form['soil_type'] |
|
|
crop_type = request.form['crop_type'] |
|
|
nitrogen = float(request.form['nitrogen']) |
|
|
potassium = float(request.form['potassium']) |
|
|
phosphorous = float(request.form['phosphorous']) |
|
|
|
|
|
|
|
|
soil_type_encoded = soil_type_encodings.get(soil_type, -1) |
|
|
crop_type_encoded = crop_type_encodings.get(crop_type, -1) |
|
|
|
|
|
|
|
|
user_input = pd.DataFrame({ |
|
|
'Temperature': [temperature], |
|
|
'Humidity': [humidity], |
|
|
'Moisture': [moisture], |
|
|
'Nitrogen': [nitrogen], |
|
|
'Potassium': [potassium], |
|
|
'Phosphorous': [phosphorous], |
|
|
'Soil Type': [soil_type_encoded], |
|
|
'Crop Type': [crop_type_encoded] |
|
|
}) |
|
|
|
|
|
|
|
|
pred_fertilizer_name = rf_ferti_name.predict(user_input)[0] |
|
|
pred_fertilizer_name = [name for name, value in fertilizer_name_encodings.items() if value == pred_fertilizer_name][0] |
|
|
|
|
|
|
|
|
pred_fertilizer_qty = rf_ferti_value.predict(user_input)[0] |
|
|
|
|
|
|
|
|
pred_info = generate_ai_suggestions(pred_fertilizer_name) |
|
|
|
|
|
return render_template('index.html', prediction=True, fertilizer_name=pred_fertilizer_name, |
|
|
fertilizer_qty=pred_fertilizer_qty, optimal_usage=pred_fertilizer_qty, pred_info=pred_info) |
|
|
return render_template('index.html', prediction=False) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
print("\n" + "=" * 60) |
|
|
print("π Starting Fertilizer Recommendation App") |
|
|
print("=" * 60) |
|
|
print(f"π Configuration:") |
|
|
print(f" - Gemini API: {'β Configured' if GEMINI_API_KEY else 'β Not configured'}") |
|
|
print(f" - NVIDIA API: {'β Configured' if NVIDIA_API_KEY else 'β Not configured'}") |
|
|
print(f" - Gemini Models: {len(GEMINI_MODELS)}") |
|
|
print(f" - NVIDIA Models: {len(NVIDIA_MODELS)}") |
|
|
print("=" * 60 + "\n") |
|
|
app.run(port=7860, host='0.0.0.0') |
|
|
|