| from flask import Flask, render_template, request, jsonify
|
| import joblib
|
| import google.generativeai as genai
|
| from openai import OpenAI
|
| import os
|
| import base64
|
| from PIL import Image
|
| import io
|
|
|
|
|
| app = Flask(__name__)
|
|
|
|
|
| gbm_model = joblib.load('gbm_model.pkl')
|
|
|
|
|
| GEMINI_API_KEY = os.getenv('GEMINI_API', '')
|
| NVIDIA_API_KEY = os.getenv('NVIDIA_API_KEY', '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')
|
|
|
|
|
| NVIDIA_TEXT_MODELS = [
|
| "nvidia/llama-3.1-nemotron-70b-instruct",
|
| "meta/llama-3.1-405b-instruct",
|
| "meta/llama-3.1-70b-instruct",
|
| "mistralai/mixtral-8x7b-instruct-v0.1"
|
| ]
|
|
|
|
|
| NVIDIA_VISION_MODELS = [
|
| "meta/llama-3.2-90b-vision-instruct",
|
| "meta/llama-3.2-11b-vision-instruct",
|
| "microsoft/phi-3-vision-128k-instruct",
|
| "nvidia/neva-22b"
|
| ]
|
|
|
| GEMINI_MODELS = [
|
| "gemini-2.0-flash-exp",
|
| "gemini-1.5-flash",
|
| "gemini-1.5-flash-8b",
|
| "gemini-1.5-pro"
|
| ]
|
|
|
|
|
| class_mapping = {
|
| 0: 'BANANA', 1: 'BLACKGRAM', 2: 'CHICKPEA', 3: 'COCONUT', 4: 'COFFEE',
|
| 5: 'COTTON', 6: 'JUTE', 7: 'KIDNEYBEANS', 8: 'LENTIL', 9: 'MAIZE',
|
| 10: 'MANGO', 11: 'MOTHBEANS', 12: 'MUNGBEAN', 13: 'MUSKMELON',
|
| 14: 'ORANGE', 15: 'PAPAYA', 16: 'PIGEONPEAS', 17: 'POMEGRANATE',
|
| 18: 'RICE', 19: 'WATERMELON'
|
| }
|
|
|
|
|
|
|
|
|
|
|
| def try_nvidia_text_model(model_name, prompt):
|
| """Try to generate text content using NVIDIA model"""
|
| try:
|
| print(f"π Trying NVIDIA text model: {model_name}")
|
|
|
| client = OpenAI(
|
| base_url="https://integrate.api.nvidia.com/v1",
|
| api_key=NVIDIA_API_KEY
|
| )
|
|
|
| completion = client.chat.completions.create(
|
| model=model_name,
|
| messages=[
|
| {
|
| "role": "user",
|
| "content": prompt
|
| }
|
| ],
|
| max_tokens=1024,
|
| temperature=0.2,
|
| stream=False
|
| )
|
|
|
| response_text = completion.choices[0].message.content
|
| print(f"β
Success with NVIDIA text model: {model_name}")
|
| return True, response_text
|
|
|
| except Exception as e:
|
| print(f"β NVIDIA text model {model_name} failed: {str(e)}")
|
| return False, None
|
|
|
| def try_gemini_text_model(model_name, prompt):
|
| """Try to generate text content using Gemini model"""
|
| try:
|
| print(f"π Trying Gemini text model: {model_name}")
|
|
|
| if not GEMINI_API_KEY:
|
| print("β Gemini API key not set")
|
| return False, None
|
|
|
| genai.configure(api_key=GEMINI_API_KEY)
|
| model = genai.GenerativeModel(model_name)
|
|
|
| response = model.generate_content(prompt)
|
| response_text = response.text
|
|
|
| print(f"β
Success with Gemini text model: {model_name}")
|
| return True, response_text
|
|
|
| except Exception as e:
|
| print(f"β Gemini text model {model_name} failed: {str(e)}")
|
| return False, None
|
|
|
| def generate_ai_suggestions_with_fallback(pred_crop_name, parameters):
|
| """Generate AI suggestions with multi-model fallback for text"""
|
|
|
| prompt = (
|
| f"For the crop {pred_crop_name} based on the input parameters {parameters}, "
|
| f"Give descritpion of provided crop in justified 3-4 line sparagraph."
|
| f"After that spacing of one to two lines"
|
| f"**in the next line** recokemnd foru other crops based on parpameeters as Other recommended crops : crop names in numbvered order. dont include any special character not bold,italic."
|
| )
|
|
|
| print("\n" + "="*50)
|
| print("π Starting AI Suggestion Generation with Fallback")
|
| print("="*50)
|
|
|
|
|
| print("\nπ PHASE 1: Trying NVIDIA Text Models")
|
| for model in NVIDIA_TEXT_MODELS:
|
| success, response = try_nvidia_text_model(model, prompt)
|
| if success:
|
| print(f"\nβ
Successfully generated suggestions with NVIDIA model: {model}")
|
| return response
|
|
|
|
|
| print("\nπ PHASE 2: Trying Gemini Models (Fallback)")
|
| for model in GEMINI_MODELS:
|
| success, response = try_gemini_text_model(model, prompt)
|
| if success:
|
| print(f"\nβ
Successfully generated suggestions with Gemini model: {model}")
|
| return response
|
|
|
|
|
| print("\nβ All models failed. Returning fallback message.")
|
| return (
|
| f"{pred_crop_name} is a suitable crop for the given soil and climate conditions.\n\n"
|
| f"Other recommended crops:\n"
|
| f"1. RICE\n"
|
| f"2. WHEAT\n"
|
| f"3. MAIZE\n"
|
| f"4. COTTON\n\n"
|
| f"Note: AI suggestions are temporarily unavailable. Please try again later."
|
| )
|
|
|
|
|
|
|
|
|
|
|
| def encode_image_to_base64(image_file):
|
| """Encode image file to base64 string"""
|
| try:
|
| image_bytes = image_file.read()
|
| return base64.b64encode(image_bytes).decode('utf-8')
|
| except Exception as e:
|
| print(f"β Error encoding image: {str(e)}")
|
| return None
|
|
|
| def try_nvidia_vision_model(model_name, base64_image, prompt):
|
| """Try to analyze image using NVIDIA vision model"""
|
| try:
|
| print(f"π Trying NVIDIA vision model: {model_name}")
|
|
|
| client = OpenAI(
|
| base_url="https://integrate.api.nvidia.com/v1",
|
| api_key=NVIDIA_API_KEY
|
| )
|
|
|
| completion = client.chat.completions.create(
|
| model=model_name,
|
| messages=[
|
| {
|
| "role": "user",
|
| "content": [
|
| {"type": "text", "text": prompt},
|
| {
|
| "type": "image_url",
|
| "image_url": {
|
| "url": f"data:image/png;base64,{base64_image}"
|
| }
|
| }
|
| ]
|
| }
|
| ],
|
| max_tokens=1024,
|
| temperature=0.2,
|
| stream=False
|
| )
|
|
|
| response_text = completion.choices[0].message.content
|
| print(f"β
Success with NVIDIA vision model: {model_name}")
|
| return True, response_text
|
|
|
| except Exception as e:
|
| print(f"β NVIDIA vision model {model_name} failed: {str(e)}")
|
| return False, None
|
|
|
| def try_gemini_vision_model(model_name, image_bytes, prompt):
|
| """Try to analyze image using Gemini vision model"""
|
| try:
|
| print(f"π Trying Gemini vision model: {model_name}")
|
|
|
| if not GEMINI_API_KEY:
|
| print("β Gemini API key not set")
|
| return False, None
|
|
|
| genai.configure(api_key=GEMINI_API_KEY)
|
| model = genai.GenerativeModel(model_name)
|
|
|
|
|
| img = Image.open(io.BytesIO(image_bytes))
|
|
|
|
|
| response = model.generate_content([prompt, img])
|
| response_text = response.text
|
|
|
| print(f"β
Success with Gemini vision model: {model_name}")
|
| return True, response_text
|
|
|
| except Exception as e:
|
| print(f"β Gemini vision model {model_name} failed: {str(e)}")
|
| return False, None
|
|
|
| def analyze_image_with_fallback(image_file, prompt="Analyze this agricultural image and provide detailed insights about the crop, soil condition, and any visible issues or recommendations."):
|
| """Analyze image with multi-model fallback"""
|
|
|
| print("\n" + "="*50)
|
| print("πΌοΈ Starting Image Analysis with Fallback")
|
| print("="*50)
|
|
|
|
|
| image_file.seek(0)
|
| image_bytes = image_file.read()
|
|
|
|
|
| image_file.seek(0)
|
| base64_image = encode_image_to_base64(image_file)
|
|
|
| if not base64_image:
|
| return "Error: Could not process image file."
|
|
|
|
|
| print("\nπ PHASE 1: Trying NVIDIA Vision Models")
|
| for model in NVIDIA_VISION_MODELS:
|
| success, response = try_nvidia_vision_model(model, base64_image, prompt)
|
| if success:
|
| print(f"\nβ
Successfully analyzed image with NVIDIA model: {model}")
|
| return response
|
|
|
|
|
| print("\nπ PHASE 2: Trying Gemini Vision Models (Fallback)")
|
| for model in GEMINI_MODELS:
|
| success, response = try_gemini_vision_model(model, image_bytes, prompt)
|
| if success:
|
| print(f"\nβ
Successfully analyzed image with Gemini model: {model}")
|
| return response
|
|
|
|
|
| print("\nβ All vision models failed. Returning fallback message.")
|
| return (
|
| "Image analysis is temporarily unavailable. Please try again later.\n\n"
|
| "For best results, ensure:\n"
|
| "1. Image is clear and well-lit\n"
|
| "2. Crop/soil is clearly visible\n"
|
| "3. Image format is supported (JPG, PNG)\n"
|
| "4. Image size is reasonable (< 10MB)"
|
| )
|
|
|
|
|
|
|
|
|
|
|
| @app.route('/')
|
| def index():
|
| return render_template('index.html')
|
|
|
| @app.route('/test')
|
| def test_api():
|
| """API testing page"""
|
| return render_template('test_api.html')
|
|
|
| @app.route('/predict', methods=['POST'])
|
| def predict():
|
| """Crop prediction endpoint with AI suggestions"""
|
| try:
|
|
|
| nitrogen = float(request.form['nitrogen'])
|
| phosphorus = float(request.form['phosphorus'])
|
| potassium = float(request.form['potassium'])
|
| temperature = float(request.form['temperature'])
|
| humidity = float(request.form['humidity'])
|
| ph = float(request.form['ph'])
|
| rainfall = float(request.form['rainfall'])
|
| location = request.form['location']
|
|
|
|
|
| features = [[nitrogen, phosphorus, potassium, temperature, humidity, ph, rainfall]]
|
| predicted_crop_encoded = gbm_model.predict(features)[0]
|
| predicted_crop = class_mapping[predicted_crop_encoded]
|
|
|
|
|
| parameters = {
|
| "Nitrogen": nitrogen, "Phosphorus": phosphorus, "Potassium": potassium,
|
| "Temperature": temperature, "Humidity": humidity, "pH": ph, "Rainfall": rainfall,
|
| "Location": location
|
| }
|
| ai_suggestions = generate_ai_suggestions_with_fallback(predicted_crop, parameters)
|
|
|
| return jsonify({
|
| 'predicted_crop': predicted_crop,
|
| 'ai_suggestions': ai_suggestions,
|
| 'location': location
|
| })
|
|
|
| except Exception as e:
|
| print(f"β Error in prediction: {str(e)}")
|
| return jsonify({
|
| 'error': 'An error occurred during prediction. Please try again.',
|
| 'details': str(e)
|
| }), 500
|
|
|
| @app.route('/analyze-image', methods=['POST'])
|
| def analyze_image():
|
| """Image analysis endpoint with AI vision models"""
|
| try:
|
|
|
| if 'image' not in request.files:
|
| return jsonify({
|
| 'error': 'No image file provided',
|
| 'details': 'Please upload an image file'
|
| }), 400
|
|
|
| image_file = request.files['image']
|
|
|
|
|
| if image_file.filename == '':
|
| return jsonify({
|
| 'error': 'Empty filename',
|
| 'details': 'Please select a valid image file'
|
| }), 400
|
|
|
|
|
| custom_prompt = request.form.get('prompt',
|
| "Analyze this agricultural image and provide detailed insights about the crop, "
|
| "soil condition, plant health, and any visible issues or recommendations."
|
| )
|
|
|
|
|
| analysis_result = analyze_image_with_fallback(image_file, custom_prompt)
|
|
|
| return jsonify({
|
| 'analysis': analysis_result,
|
| 'filename': image_file.filename
|
| })
|
|
|
| except Exception as e:
|
| print(f"β Error in image analysis: {str(e)}")
|
| return jsonify({
|
| 'error': 'An error occurred during image analysis. Please try again.',
|
| 'details': str(e)
|
| }), 500
|
|
|
| @app.route('/health', methods=['GET'])
|
| def health_check():
|
| """Health check endpoint"""
|
| return jsonify({
|
| 'status': 'healthy',
|
| 'nvidia_api_configured': bool(NVIDIA_API_KEY),
|
| 'gemini_api_configured': bool(GEMINI_API_KEY),
|
| 'text_models_available': len(NVIDIA_TEXT_MODELS) + len(GEMINI_MODELS),
|
| 'vision_models_available': len(NVIDIA_VISION_MODELS) + len(GEMINI_MODELS)
|
| })
|
|
|
| if __name__ == '__main__':
|
| print("\n" + "="*60)
|
| print("πΎ Crop Recommendation System with Multi-Model AI")
|
| print("="*60)
|
| print(f"π Text Models: {len(NVIDIA_TEXT_MODELS)} NVIDIA + {len(GEMINI_MODELS)} Gemini")
|
| print(f"πΌοΈ Vision Models: {len(NVIDIA_VISION_MODELS)} NVIDIA + {len(GEMINI_MODELS)} Gemini")
|
| print(f"π NVIDIA API: {'β
Configured' if NVIDIA_API_KEY else 'β Not Set'}")
|
| print(f"π Gemini API: {'β
Configured' if GEMINI_API_KEY else 'β Not Set'}")
|
| print("="*60)
|
| print("π Starting server on http://0.0.0.0:7860")
|
| print("="*60 + "\n")
|
|
|
| app.run(port=7860, host='0.0.0.0', debug=True)
|
|
|