krushimitravit's picture
Upload 13 files
b2501a8 verified
from flask import Flask, render_template, request, jsonify
import joblib
import google.generativeai as genai
from openai import OpenAI
import os
import base64
from PIL import Image
import io
# Initialize the Flask app
app = Flask(__name__)
# Load the trained model
gbm_model = joblib.load('gbm_model.pkl')
# API Keys
GEMINI_API_KEY = os.getenv('GEMINI_API', '')
NVIDIA_API_KEY = os.getenv('NVIDIA_API_KEY', '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')
# Model configurations for TEXT generation
NVIDIA_TEXT_MODELS = [
"nvidia/llama-3.1-nemotron-70b-instruct",
"meta/llama-3.1-405b-instruct",
"meta/llama-3.1-70b-instruct",
"mistralai/mixtral-8x7b-instruct-v0.1"
]
# Model configurations for VISION (image analysis)
NVIDIA_VISION_MODELS = [
"meta/llama-3.2-90b-vision-instruct",
"meta/llama-3.2-11b-vision-instruct",
"microsoft/phi-3-vision-128k-instruct",
"nvidia/neva-22b"
]
GEMINI_MODELS = [
"gemini-2.0-flash-exp",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-1.5-pro"
]
# Mapping for class decoding
class_mapping = {
0: 'BANANA', 1: 'BLACKGRAM', 2: 'CHICKPEA', 3: 'COCONUT', 4: 'COFFEE',
5: 'COTTON', 6: 'JUTE', 7: 'KIDNEYBEANS', 8: 'LENTIL', 9: 'MAIZE',
10: 'MANGO', 11: 'MOTHBEANS', 12: 'MUNGBEAN', 13: 'MUSKMELON',
14: 'ORANGE', 15: 'PAPAYA', 16: 'PIGEONPEAS', 17: 'POMEGRANATE',
18: 'RICE', 19: 'WATERMELON'
}
# ============================================================================
# TEXT GENERATION FUNCTIONS
# ============================================================================
def try_nvidia_text_model(model_name, prompt):
"""Try to generate text content using NVIDIA model"""
try:
print(f"πŸ”„ Trying NVIDIA text model: {model_name}")
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key=NVIDIA_API_KEY
)
completion = client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": prompt
}
],
max_tokens=1024,
temperature=0.2,
stream=False
)
response_text = completion.choices[0].message.content
print(f"βœ… Success with NVIDIA text model: {model_name}")
return True, response_text
except Exception as e:
print(f"❌ NVIDIA text model {model_name} failed: {str(e)}")
return False, None
def try_gemini_text_model(model_name, prompt):
"""Try to generate text content using Gemini model"""
try:
print(f"πŸ”„ Trying Gemini text model: {model_name}")
if not GEMINI_API_KEY:
print("❌ Gemini API key not set")
return False, None
genai.configure(api_key=GEMINI_API_KEY)
model = genai.GenerativeModel(model_name)
response = model.generate_content(prompt)
response_text = response.text
print(f"βœ… Success with Gemini text model: {model_name}")
return True, response_text
except Exception as e:
print(f"❌ Gemini text model {model_name} failed: {str(e)}")
return False, None
def generate_ai_suggestions_with_fallback(pred_crop_name, parameters):
"""Generate AI suggestions with multi-model fallback for text"""
prompt = (
f"For the crop {pred_crop_name} based on the input parameters {parameters}, "
f"Give descritpion of provided crop in justified 3-4 line sparagraph."
f"After that spacing of one to two lines"
f"**in the next line** recokemnd foru other crops based on parpameeters as Other recommended crops : crop names in numbvered order. dont include any special character not bold,italic."
)
print("\n" + "="*50)
print("πŸš€ Starting AI Suggestion Generation with Fallback")
print("="*50)
# Phase 1: Try NVIDIA text models
print("\nπŸš€ PHASE 1: Trying NVIDIA Text Models")
for model in NVIDIA_TEXT_MODELS:
success, response = try_nvidia_text_model(model, prompt)
if success:
print(f"\nβœ… Successfully generated suggestions with NVIDIA model: {model}")
return response
# Phase 2: Try Gemini models
print("\nπŸš€ PHASE 2: Trying Gemini Models (Fallback)")
for model in GEMINI_MODELS:
success, response = try_gemini_text_model(model, prompt)
if success:
print(f"\nβœ… Successfully generated suggestions with Gemini model: {model}")
return response
# If all models fail, return a fallback message
print("\n❌ All models failed. Returning fallback message.")
return (
f"{pred_crop_name} is a suitable crop for the given soil and climate conditions.\n\n"
f"Other recommended crops:\n"
f"1. RICE\n"
f"2. WHEAT\n"
f"3. MAIZE\n"
f"4. COTTON\n\n"
f"Note: AI suggestions are temporarily unavailable. Please try again later."
)
# ============================================================================
# IMAGE ANALYSIS FUNCTIONS
# ============================================================================
def encode_image_to_base64(image_file):
"""Encode image file to base64 string"""
try:
image_bytes = image_file.read()
return base64.b64encode(image_bytes).decode('utf-8')
except Exception as e:
print(f"❌ Error encoding image: {str(e)}")
return None
def try_nvidia_vision_model(model_name, base64_image, prompt):
"""Try to analyze image using NVIDIA vision model"""
try:
print(f"πŸ”„ Trying NVIDIA vision model: {model_name}")
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key=NVIDIA_API_KEY
)
completion = client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{base64_image}"
}
}
]
}
],
max_tokens=1024,
temperature=0.2,
stream=False
)
response_text = completion.choices[0].message.content
print(f"βœ… Success with NVIDIA vision model: {model_name}")
return True, response_text
except Exception as e:
print(f"❌ NVIDIA vision model {model_name} failed: {str(e)}")
return False, None
def try_gemini_vision_model(model_name, image_bytes, prompt):
"""Try to analyze image using Gemini vision model"""
try:
print(f"πŸ”„ Trying Gemini vision model: {model_name}")
if not GEMINI_API_KEY:
print("❌ Gemini API key not set")
return False, None
genai.configure(api_key=GEMINI_API_KEY)
model = genai.GenerativeModel(model_name)
# Load image from bytes
img = Image.open(io.BytesIO(image_bytes))
# Generate content
response = model.generate_content([prompt, img])
response_text = response.text
print(f"βœ… Success with Gemini vision model: {model_name}")
return True, response_text
except Exception as e:
print(f"❌ Gemini vision model {model_name} failed: {str(e)}")
return False, None
def analyze_image_with_fallback(image_file, prompt="Analyze this agricultural image and provide detailed insights about the crop, soil condition, and any visible issues or recommendations."):
"""Analyze image with multi-model fallback"""
print("\n" + "="*50)
print("πŸ–ΌοΈ Starting Image Analysis with Fallback")
print("="*50)
# Read image bytes for Gemini
image_file.seek(0)
image_bytes = image_file.read()
# Encode image for NVIDIA
image_file.seek(0)
base64_image = encode_image_to_base64(image_file)
if not base64_image:
return "Error: Could not process image file."
# Phase 1: Try NVIDIA vision models
print("\nπŸš€ PHASE 1: Trying NVIDIA Vision Models")
for model in NVIDIA_VISION_MODELS:
success, response = try_nvidia_vision_model(model, base64_image, prompt)
if success:
print(f"\nβœ… Successfully analyzed image with NVIDIA model: {model}")
return response
# Phase 2: Try Gemini vision models
print("\nπŸš€ PHASE 2: Trying Gemini Vision Models (Fallback)")
for model in GEMINI_MODELS:
success, response = try_gemini_vision_model(model, image_bytes, prompt)
if success:
print(f"\nβœ… Successfully analyzed image with Gemini model: {model}")
return response
# If all models fail
print("\n❌ All vision models failed. Returning fallback message.")
return (
"Image analysis is temporarily unavailable. Please try again later.\n\n"
"For best results, ensure:\n"
"1. Image is clear and well-lit\n"
"2. Crop/soil is clearly visible\n"
"3. Image format is supported (JPG, PNG)\n"
"4. Image size is reasonable (< 10MB)"
)
# ============================================================================
# FLASK ROUTES
# ============================================================================
@app.route('/')
def index():
return render_template('index.html')
@app.route('/test')
def test_api():
"""API testing page"""
return render_template('test_api.html')
@app.route('/predict', methods=['POST'])
def predict():
"""Crop prediction endpoint with AI suggestions"""
try:
# Get input values from the form
nitrogen = float(request.form['nitrogen'])
phosphorus = float(request.form['phosphorus'])
potassium = float(request.form['potassium'])
temperature = float(request.form['temperature'])
humidity = float(request.form['humidity'])
ph = float(request.form['ph'])
rainfall = float(request.form['rainfall'])
location = request.form['location']
# Prepare the features for the model
features = [[nitrogen, phosphorus, potassium, temperature, humidity, ph, rainfall]]
predicted_crop_encoded = gbm_model.predict(features)[0]
predicted_crop = class_mapping[predicted_crop_encoded]
# Get AI suggestions with fallback
parameters = {
"Nitrogen": nitrogen, "Phosphorus": phosphorus, "Potassium": potassium,
"Temperature": temperature, "Humidity": humidity, "pH": ph, "Rainfall": rainfall,
"Location": location
}
ai_suggestions = generate_ai_suggestions_with_fallback(predicted_crop, parameters)
return jsonify({
'predicted_crop': predicted_crop,
'ai_suggestions': ai_suggestions,
'location': location
})
except Exception as e:
print(f"❌ Error in prediction: {str(e)}")
return jsonify({
'error': 'An error occurred during prediction. Please try again.',
'details': str(e)
}), 500
@app.route('/analyze-image', methods=['POST'])
def analyze_image():
"""Image analysis endpoint with AI vision models"""
try:
# Check if image file is present
if 'image' not in request.files:
return jsonify({
'error': 'No image file provided',
'details': 'Please upload an image file'
}), 400
image_file = request.files['image']
# Check if file is empty
if image_file.filename == '':
return jsonify({
'error': 'Empty filename',
'details': 'Please select a valid image file'
}), 400
# Get custom prompt if provided
custom_prompt = request.form.get('prompt',
"Analyze this agricultural image and provide detailed insights about the crop, "
"soil condition, plant health, and any visible issues or recommendations."
)
# Analyze image with fallback
analysis_result = analyze_image_with_fallback(image_file, custom_prompt)
return jsonify({
'analysis': analysis_result,
'filename': image_file.filename
})
except Exception as e:
print(f"❌ Error in image analysis: {str(e)}")
return jsonify({
'error': 'An error occurred during image analysis. Please try again.',
'details': str(e)
}), 500
@app.route('/health', methods=['GET'])
def health_check():
"""Health check endpoint"""
return jsonify({
'status': 'healthy',
'nvidia_api_configured': bool(NVIDIA_API_KEY),
'gemini_api_configured': bool(GEMINI_API_KEY),
'text_models_available': len(NVIDIA_TEXT_MODELS) + len(GEMINI_MODELS),
'vision_models_available': len(NVIDIA_VISION_MODELS) + len(GEMINI_MODELS)
})
if __name__ == '__main__':
print("\n" + "="*60)
print("🌾 Crop Recommendation System with Multi-Model AI")
print("="*60)
print(f"πŸ“Š Text Models: {len(NVIDIA_TEXT_MODELS)} NVIDIA + {len(GEMINI_MODELS)} Gemini")
print(f"πŸ–ΌοΈ Vision Models: {len(NVIDIA_VISION_MODELS)} NVIDIA + {len(GEMINI_MODELS)} Gemini")
print(f"πŸ”‘ NVIDIA API: {'βœ… Configured' if NVIDIA_API_KEY else '❌ Not Set'}")
print(f"πŸ”‘ Gemini API: {'βœ… Configured' if GEMINI_API_KEY else '❌ Not Set'}")
print("="*60)
print("πŸš€ Starting server on http://0.0.0.0:7860")
print("="*60 + "\n")
app.run(port=7860, host='0.0.0.0', debug=True)