krushimitravit commited on
Commit
9a08088
Β·
verified Β·
1 Parent(s): f6af546

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +231 -231
app.py CHANGED
@@ -1,231 +1,231 @@
1
- from flask import Flask, render_template, request
2
- import joblib
3
- import pandas as pd
4
- import google.generativeai as genai
5
- from openai import OpenAI
6
- import os
7
- import time
8
- from dotenv import load_dotenv
9
-
10
- # Load environment variables from .env file
11
- load_dotenv()
12
-
13
- app = Flask(__name__)
14
-
15
- # Load the trained Random Forest models
16
- rf_ferti_name = joblib.load('rf_ferti_name.pkl')
17
- rf_ferti_value = joblib.load('rf_ferti_value.pkl')
18
-
19
- # Manually define the encodings based on the provided dictionaries
20
- soil_type_encodings = {'Black': 0, 'Clayey': 1, 'Loamy': 2, 'Red': 3, 'Sandy': 4}
21
- crop_type_encodings = {'Barley': 0, 'Cotton': 1, 'Ground Nuts': 2, 'Maize': 3, 'Millets': 4,
22
- 'Oil seeds': 5, 'Other Variety': 6, 'Paddy': 7, 'Pulses': 8, 'Sugarcane': 9,
23
- 'Tobacco': 10, 'Wheat': 11}
24
- fertilizer_name_encodings = {'10-26-26': 0, '14-35-14': 1, '15-15-15': 2, '17-17-17': 3, '20-20': 4,
25
- '20-20-20': 5, '28-28': 6, 'Ammonium sulfate': 7, 'Biofertilizer (e.g., Rhizobium)': 8,
26
- 'Calcium nitrate': 9, 'DAP': 10, 'Ferrous sulfate': 11, 'Magnesium sulfate': 12,
27
- 'Potassium chloride/Muriate of potash (MOP)': 13, 'Potassium sulfate/Sulfate of potash (SOP)': 14,
28
- 'Rock phosphate (RP)': 15, 'Single superphosphate (SSP)': 16, 'Triple superphosphate (TSP)': 17,
29
- 'Urea': 18, 'Zinc sulfate': 19}
30
-
31
- # --- ENHANCED LLM CONFIGURATION ---
32
- GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
33
- NVIDIA_API_KEY = os.getenv('NVIDIA_API_KEY')
34
-
35
- if GEMINI_API_KEY:
36
- genai.configure(api_key=GEMINI_API_KEY)
37
-
38
- # Model configurations with retry settings
39
- GEMINI_MODELS = [
40
- {"name": "gemini-2.0-flash-exp", "max_retries": 2, "timeout": 30, "description": "Latest experimental"},
41
- {"name": "gemini-1.5-pro-latest", "max_retries": 2, "timeout": 45, "description": "Most capable"},
42
- {"name": "gemini-1.5-flash", "max_retries": 3, "timeout": 20, "description": "Fast and reliable"},
43
- {"name": "gemini-1.5-flash-8b", "max_retries": 3, "timeout": 15, "description": "Lightweight"},
44
- ]
45
-
46
- NVIDIA_MODELS = [
47
- {"name": "meta/llama-3.2-90b-vision-instruct", "max_retries": 2, "timeout": 40, "description": "High capability"},
48
- {"name": "meta/llama-3.2-11b-vision-instruct", "max_retries": 2, "timeout": 30, "description": "Balanced"},
49
- ]
50
-
51
-
52
- def retry_with_backoff(func, max_retries=3, initial_delay=1):
53
- """Retry a function with exponential backoff."""
54
- for attempt in range(max_retries):
55
- try:
56
- return func()
57
- except Exception as e:
58
- if attempt == max_retries - 1:
59
- raise
60
- delay = initial_delay * (2 ** attempt)
61
- print(f" >> Retry {attempt + 1}/{max_retries} after {delay}s (Error: {type(e).__name__})")
62
- time.sleep(delay)
63
-
64
-
65
- def generate_with_gemini(prompt, model_config):
66
- """Generate text using a specific Gemini model with retry logic."""
67
- model_name = model_config["name"]
68
- max_retries = model_config.get("max_retries", 2)
69
-
70
- def _attempt():
71
- print(f" >> Attempting Gemini: {model_name}")
72
- model = genai.GenerativeModel(model_name)
73
- response = model.generate_content(prompt)
74
-
75
- if not response or not response.text:
76
- raise ValueError("Empty response from model")
77
-
78
- return response.text
79
-
80
- try:
81
- return retry_with_backoff(_attempt, max_retries=max_retries)
82
- except Exception as e:
83
- print(f" >> FAILED {model_name}: {type(e).__name__}")
84
- return None
85
-
86
-
87
- def generate_with_nvidia(prompt, model_config):
88
- """Generate text using NVIDIA API with retry logic."""
89
- if not NVIDIA_API_KEY:
90
- return None
91
-
92
- model_name = model_config["name"]
93
- max_retries = model_config.get("max_retries", 2)
94
-
95
- def _attempt():
96
- print(f" >> Attempting NVIDIA: {model_name}")
97
- client = OpenAI(
98
- base_url="https://integrate.api.nvidia.com/v1",
99
- api_key=NVIDIA_API_KEY
100
- )
101
-
102
- completion = client.chat.completions.create(
103
- model=model_name,
104
- messages=[{"role": "user", "content": prompt}],
105
- max_tokens=500,
106
- temperature=0.7
107
- )
108
-
109
- response_text = completion.choices[0].message.content
110
- if not response_text:
111
- raise ValueError("Empty response from NVIDIA")
112
-
113
- return response_text
114
-
115
- try:
116
- return retry_with_backoff(_attempt, max_retries=max_retries)
117
- except Exception as e:
118
- print(f" >> FAILED NVIDIA {model_name}: {type(e).__name__}")
119
- return None
120
-
121
-
122
- def generate_ai_suggestions(pred_fertilizer_name):
123
- """Generate AI suggestions with enhanced fallback system."""
124
- print("\n" + "=" * 60)
125
- print(f"🌱 GENERATING AI SUGGESTIONS FOR: {pred_fertilizer_name}")
126
- print("=" * 60)
127
-
128
- prompt = (
129
- f"For {pred_fertilizer_name} fertilizer, generate 3-4 sentences each on a new line. "
130
- f"Text should be justified and should not contain any special characters."
131
- )
132
-
133
- response_text = None
134
- used_model = "None"
135
-
136
- # PHASE 1: Try Gemini models
137
- if GEMINI_API_KEY:
138
- print("\n--- PHASE 1: Trying Gemini Models ---")
139
- for idx, model_config in enumerate(GEMINI_MODELS, 1):
140
- print(f"[{idx}/{len(GEMINI_MODELS)}] Testing {model_config['name']}...")
141
- response_text = generate_with_gemini(prompt, model_config)
142
-
143
- if response_text:
144
- used_model = f"Gemini-{model_config['name']}"
145
- print(f" βœ“ SUCCESS with {used_model}")
146
- break
147
-
148
- # PHASE 2: Try NVIDIA models (fallback)
149
- if not response_text and NVIDIA_API_KEY:
150
- print("\n--- PHASE 2: Trying NVIDIA Models (Fallback) ---")
151
- for idx, model_config in enumerate(NVIDIA_MODELS, 1):
152
- print(f"[{idx}/{len(NVIDIA_MODELS)}] Testing {model_config['name']}...")
153
- response_text = generate_with_nvidia(prompt, model_config)
154
-
155
- if response_text:
156
- used_model = f"NVIDIA-{model_config['name']}"
157
- print(f" βœ“ SUCCESS with {used_model}")
158
- break
159
-
160
- # PHASE 3: Final fallback
161
- if not response_text:
162
- print("\n❌ All LLM providers failed. Using fallback text.")
163
- response_text = (
164
- f"{pred_fertilizer_name} is a commonly used fertilizer in agriculture. "
165
- f"It provides essential nutrients to crops. "
166
- f"Follow recommended dosage for best results. "
167
- f"Consult local agricultural experts for specific guidance."
168
- )
169
- used_model = "Fallback"
170
-
171
- print(f"\nβœ… Generated using: {used_model}")
172
- print("=" * 60 + "\n")
173
-
174
- return response_text
175
-
176
-
177
- @app.route('/', methods=['GET', 'POST'])
178
- def index():
179
- if request.method == 'POST':
180
- # Retrieve form data
181
- temperature = float(request.form['temperature'])
182
- humidity = float(request.form['humidity'])
183
- moisture = float(request.form['moisture'])
184
- soil_type = request.form['soil_type']
185
- crop_type = request.form['crop_type']
186
- nitrogen = float(request.form['nitrogen'])
187
- potassium = float(request.form['potassium'])
188
- phosphorous = float(request.form['phosphorous'])
189
-
190
- # Encode categorical data
191
- soil_type_encoded = soil_type_encodings.get(soil_type, -1)
192
- crop_type_encoded = crop_type_encodings.get(crop_type, -1)
193
-
194
- # Create a DataFrame for the input
195
- user_input = pd.DataFrame({
196
- 'Temperature': [temperature],
197
- 'Humidity': [humidity],
198
- 'Moisture': [moisture],
199
- 'Nitrogen': [nitrogen],
200
- 'Potassium': [potassium],
201
- 'Phosphorous': [phosphorous],
202
- 'Soil Type': [soil_type_encoded],
203
- 'Crop Type': [crop_type_encoded]
204
- })
205
-
206
- # Predict Fertilizer Name
207
- pred_fertilizer_name = rf_ferti_name.predict(user_input)[0]
208
- pred_fertilizer_name = [name for name, value in fertilizer_name_encodings.items() if value == pred_fertilizer_name][0]
209
-
210
- # Predict Fertilizer Quantity
211
- pred_fertilizer_qty = rf_ferti_value.predict(user_input)[0]
212
-
213
- # Generate AI suggestions with fallback system
214
- pred_info = generate_ai_suggestions(pred_fertilizer_name)
215
-
216
- return render_template('index.html', prediction=True, fertilizer_name=pred_fertilizer_name,
217
- fertilizer_qty=pred_fertilizer_qty, optimal_usage=pred_fertilizer_qty, pred_info=pred_info)
218
- return render_template('index.html', prediction=False)
219
-
220
-
221
- if __name__ == '__main__':
222
- print("\n" + "=" * 60)
223
- print("πŸš€ Starting Fertilizer Recommendation App")
224
- print("=" * 60)
225
- print(f"πŸ“Š Configuration:")
226
- print(f" - Gemini API: {'βœ“ Configured' if GEMINI_API_KEY else 'βœ— Not configured'}")
227
- print(f" - NVIDIA API: {'βœ“ Configured' if NVIDIA_API_KEY else 'βœ— Not configured'}")
228
- print(f" - Gemini Models: {len(GEMINI_MODELS)}")
229
- print(f" - NVIDIA Models: {len(NVIDIA_MODELS)}")
230
- print("=" * 60 + "\n")
231
- app.run(port=7860, host='0.0.0.0')
 
1
+ from flask import Flask, render_template, request
2
+ import joblib
3
+ import pandas as pd
4
+ import google.generativeai as genai
5
+ from openai import OpenAI
6
+ import os
7
+ import time
8
+ from dotenv import load_dotenv
9
+
10
+ # Load environment variables from .env file
11
+ load_dotenv()
12
+
13
+ app = Flask(__name__)
14
+
15
+ # Load the trained Random Forest models
16
+ rf_ferti_name = joblib.load('rf_ferti_name.pkl')
17
+ rf_ferti_value = joblib.load('rf_ferti_value.pkl')
18
+
19
+ # Manually define the encodings based on the provided dictionaries
20
+ soil_type_encodings = {'Black': 0, 'Clayey': 1, 'Loamy': 2, 'Red': 3, 'Sandy': 4}
21
+ crop_type_encodings = {'Barley': 0, 'Cotton': 1, 'Ground Nuts': 2, 'Maize': 3, 'Millets': 4,
22
+ 'Oil seeds': 5, 'Other Variety': 6, 'Paddy': 7, 'Pulses': 8, 'Sugarcane': 9,
23
+ 'Tobacco': 10, 'Wheat': 11}
24
+ fertilizer_name_encodings = {'10-26-26': 0, '14-35-14': 1, '15-15-15': 2, '17-17-17': 3, '20-20': 4,
25
+ '20-20-20': 5, '28-28': 6, 'Ammonium sulfate': 7, 'Biofertilizer (e.g., Rhizobium)': 8,
26
+ 'Calcium nitrate': 9, 'DAP': 10, 'Ferrous sulfate': 11, 'Magnesium sulfate': 12,
27
+ 'Potassium chloride/Muriate of potash (MOP)': 13, 'Potassium sulfate/Sulfate of potash (SOP)': 14,
28
+ 'Rock phosphate (RP)': 15, 'Single superphosphate (SSP)': 16, 'Triple superphosphate (TSP)': 17,
29
+ 'Urea': 18, 'Zinc sulfate': 19}
30
+
31
+ # --- ENHANCED LLM CONFIGURATION ---
32
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
33
+ NVIDIA_API_KEY = os.getenv('NVIDIA_API_KEY')
34
+
35
+ if GEMINI_API_KEY:
36
+ genai.configure(api_key=GEMINI_API_KEY)
37
+
38
+ # Model configurations with retry settings
39
+ GEMINI_MODELS = [
40
+ {"name": "gemini-2.0-flash-exp", "max_retries": 2, "timeout": 30, "description": "Latest experimental"},
41
+ {"name": "gemini-1.5-pro-latest", "max_retries": 2, "timeout": 45, "description": "Most capable"},
42
+ {"name": "gemini-1.5-flash", "max_retries": 3, "timeout": 20, "description": "Fast and reliable"},
43
+ {"name": "gemini-1.5-flash-8b", "max_retries": 3, "timeout": 15, "description": "Lightweight"},
44
+ ]
45
+
46
+ NVIDIA_MODELS = [
47
+ {"name": "meta/llama-3.2-90b-vision-instruct", "max_retries": 2, "timeout": 40, "description": "High capability"},
48
+ {"name": "meta/llama-3.2-11b-vision-instruct", "max_retries": 2, "timeout": 30, "description": "Balanced"},
49
+ ]
50
+
51
+
52
+ def retry_with_backoff(func, max_retries=3, initial_delay=1):
53
+ """Retry a function with exponential backoff."""
54
+ for attempt in range(max_retries):
55
+ try:
56
+ return func()
57
+ except Exception as e:
58
+ if attempt == max_retries - 1:
59
+ raise
60
+ delay = initial_delay * (2 ** attempt)
61
+ print(f" >> Retry {attempt + 1}/{max_retries} after {delay}s (Error: {type(e).__name__})")
62
+ time.sleep(delay)
63
+
64
+
65
+ def generate_with_gemini(prompt, model_config):
66
+ """Generate text using a specific Gemini model with retry logic."""
67
+ model_name = model_config["name"]
68
+ max_retries = model_config.get("max_retries", 2)
69
+
70
+ def _attempt():
71
+ print(f" >> Attempting Gemini: {model_name}")
72
+ model = genai.GenerativeModel(model_name)
73
+ response = model.generate_content(prompt)
74
+
75
+ if not response or not response.text:
76
+ raise ValueError("Empty response from model")
77
+
78
+ return response.text
79
+
80
+ try:
81
+ return retry_with_backoff(_attempt, max_retries=max_retries)
82
+ except Exception as e:
83
+ print(f" >> FAILED {model_name}: {type(e).__name__}")
84
+ return None
85
+
86
+
87
+ def generate_with_nvidia(prompt, model_config):
88
+ """Generate text using NVIDIA API with retry logic."""
89
+ if not NVIDIA_API_KEY:
90
+ return None
91
+
92
+ model_name = model_config["name"]
93
+ max_retries = model_config.get("max_retries", 2)
94
+
95
+ def _attempt():
96
+ print(f" >> Attempting NVIDIA: {model_name}")
97
+ client = OpenAI(
98
+ base_url="https://integrate.api.nvidia.com/v1",
99
+ api_key=NVIDIA_API_KEY
100
+ )
101
+
102
+ completion = client.chat.completions.create(
103
+ model=model_name,
104
+ messages=[{"role": "user", "content": prompt}],
105
+ max_tokens=500,
106
+ temperature=0.7
107
+ )
108
+
109
+ response_text = completion.choices[0].message.content
110
+ if not response_text:
111
+ raise ValueError("Empty response from NVIDIA")
112
+
113
+ return response_text
114
+
115
+ try:
116
+ return retry_with_backoff(_attempt, max_retries=max_retries)
117
+ except Exception as e:
118
+ print(f" >> FAILED NVIDIA {model_name}: {type(e).__name__}")
119
+ return None
120
+
121
+
122
+ def generate_ai_suggestions(pred_fertilizer_name):
123
+ """Generate AI suggestions with enhanced fallback system."""
124
+ print("\n" + "=" * 60)
125
+ print(f"🌱 GENERATING AI SUGGESTIONS FOR: {pred_fertilizer_name}")
126
+ print("=" * 60)
127
+
128
+ prompt = (
129
+ f"For {pred_fertilizer_name} fertilizer, generate 3-4 Short Informative sentences each on a new line. Content should not be very big max to max 4 sentence thats all okay"
130
+ f"Text should be justified and should not contain any special characters."
131
+ )
132
+
133
+ response_text = None
134
+ used_model = "None"
135
+
136
+ # PHASE 1: Try Gemini models
137
+ if GEMINI_API_KEY:
138
+ print("\n--- PHASE 1: Trying Gemini Models ---")
139
+ for idx, model_config in enumerate(GEMINI_MODELS, 1):
140
+ print(f"[{idx}/{len(GEMINI_MODELS)}] Testing {model_config['name']}...")
141
+ response_text = generate_with_gemini(prompt, model_config)
142
+
143
+ if response_text:
144
+ used_model = f"Gemini-{model_config['name']}"
145
+ print(f" βœ“ SUCCESS with {used_model}")
146
+ break
147
+
148
+ # PHASE 2: Try NVIDIA models (fallback)
149
+ if not response_text and NVIDIA_API_KEY:
150
+ print("\n--- PHASE 2: Trying NVIDIA Models (Fallback) ---")
151
+ for idx, model_config in enumerate(NVIDIA_MODELS, 1):
152
+ print(f"[{idx}/{len(NVIDIA_MODELS)}] Testing {model_config['name']}...")
153
+ response_text = generate_with_nvidia(prompt, model_config)
154
+
155
+ if response_text:
156
+ used_model = f"NVIDIA-{model_config['name']}"
157
+ print(f" βœ“ SUCCESS with {used_model}")
158
+ break
159
+
160
+ # PHASE 3: Final fallback
161
+ if not response_text:
162
+ print("\n❌ All LLM providers failed. Using fallback text.")
163
+ response_text = (
164
+ f"{pred_fertilizer_name} is a commonly used fertilizer in agriculture. "
165
+ f"It provides essential nutrients to crops. "
166
+ f"Follow recommended dosage for best results. "
167
+ f"Consult local agricultural experts for specific guidance."
168
+ )
169
+ used_model = "Fallback"
170
+
171
+ print(f"\nβœ… Generated using: {used_model}")
172
+ print("=" * 60 + "\n")
173
+
174
+ return response_text
175
+
176
+
177
+ @app.route('/', methods=['GET', 'POST'])
178
+ def index():
179
+ if request.method == 'POST':
180
+ # Retrieve form data
181
+ temperature = float(request.form['temperature'])
182
+ humidity = float(request.form['humidity'])
183
+ moisture = float(request.form['moisture'])
184
+ soil_type = request.form['soil_type']
185
+ crop_type = request.form['crop_type']
186
+ nitrogen = float(request.form['nitrogen'])
187
+ potassium = float(request.form['potassium'])
188
+ phosphorous = float(request.form['phosphorous'])
189
+
190
+ # Encode categorical data
191
+ soil_type_encoded = soil_type_encodings.get(soil_type, -1)
192
+ crop_type_encoded = crop_type_encodings.get(crop_type, -1)
193
+
194
+ # Create a DataFrame for the input
195
+ user_input = pd.DataFrame({
196
+ 'Temperature': [temperature],
197
+ 'Humidity': [humidity],
198
+ 'Moisture': [moisture],
199
+ 'Nitrogen': [nitrogen],
200
+ 'Potassium': [potassium],
201
+ 'Phosphorous': [phosphorous],
202
+ 'Soil Type': [soil_type_encoded],
203
+ 'Crop Type': [crop_type_encoded]
204
+ })
205
+
206
+ # Predict Fertilizer Name
207
+ pred_fertilizer_name = rf_ferti_name.predict(user_input)[0]
208
+ pred_fertilizer_name = [name for name, value in fertilizer_name_encodings.items() if value == pred_fertilizer_name][0]
209
+
210
+ # Predict Fertilizer Quantity
211
+ pred_fertilizer_qty = rf_ferti_value.predict(user_input)[0]
212
+
213
+ # Generate AI suggestions with fallback system
214
+ pred_info = generate_ai_suggestions(pred_fertilizer_name)
215
+
216
+ return render_template('index.html', prediction=True, fertilizer_name=pred_fertilizer_name,
217
+ fertilizer_qty=pred_fertilizer_qty, optimal_usage=pred_fertilizer_qty, pred_info=pred_info)
218
+ return render_template('index.html', prediction=False)
219
+
220
+
221
+ if __name__ == '__main__':
222
+ print("\n" + "=" * 60)
223
+ print("πŸš€ Starting Fertilizer Recommendation App")
224
+ print("=" * 60)
225
+ print(f"πŸ“Š Configuration:")
226
+ print(f" - Gemini API: {'βœ“ Configured' if GEMINI_API_KEY else 'βœ— Not configured'}")
227
+ print(f" - NVIDIA API: {'βœ“ Configured' if NVIDIA_API_KEY else 'βœ— Not configured'}")
228
+ print(f" - Gemini Models: {len(GEMINI_MODELS)}")
229
+ print(f" - NVIDIA Models: {len(NVIDIA_MODELS)}")
230
+ print("=" * 60 + "\n")
231
+ app.run(port=7860, host='0.0.0.0')