zynt31 commited on
Commit
a760cc2
·
verified ·
1 Parent(s): a68c8b8

Updated app.py with recommendation

Browse files
Files changed (1) hide show
  1. app.py +18 -49
app.py CHANGED
@@ -11,11 +11,20 @@ from contextlib import asynccontextmanager # Add this import
11
  from models.rice_model import RiceDiseaseCNN
12
  from models.rice_leaf_validator import RiceLeafValidator, is_rice_leaf
13
  from utils.image_processing import process_image
 
14
  import requests
15
  import json
16
  import os
17
  from typing import Dict, Any
18
 
 
 
 
 
 
 
 
 
19
  # Configure logging
20
  logging.basicConfig(
21
  level=logging.INFO,
@@ -87,56 +96,16 @@ RECOMMENDATIONS = {
87
  }
88
  }
89
 
90
- def get_ollama_recommendation(disease_name: str, confidence: float) -> Dict[str, Any]:
91
- """Get AI-generated recommendation using Ollama"""
92
- try:
93
- logging.info(f"Requesting Ollama recommendation for {disease_name} with {confidence:.2f}% confidence")
94
-
95
- # Create a detailed prompt
96
- prompt = f"""
97
- As an agricultural expert specializing in rice diseases in the Philippines:
98
-
99
- Our AI system has detected {disease_display_names.get(disease_name, disease_name)} with {confidence:.2f}% confidence in a rice plant image.
100
-
101
- Please provide:
102
- 1. A detailed analysis of how this disease affects rice plants
103
- 2. Specific recommendations for treating this disease infection
104
- 3. Preventive measures farmers should take to avoid future infections
105
 
106
- Format your response in clear sections that can be easily read by farmers.
107
- """
108
-
109
- # Call local Ollama API
110
- response = requests.post('http://localhost:11434/api/generate',
111
- json={
112
- "model": "mistral:7b-instruct",
113
- "prompt": prompt,
114
- "stream": False
115
- },
116
- timeout=60 # 60-second timeout
117
- )
118
-
119
- if response.status_code == 200:
120
- result = response.json()
121
- ai_recommendation = result.get("response", "")
122
-
123
- # Add this debugging
124
- logging.info(f"Ollama response for {disease_name} (first 100 chars): {ai_recommendation[:100]}...")
125
-
126
- return {
127
- "recommendation": ai_recommendation,
128
- "source": "ai",
129
- "details": f"AI-generated recommendation for {disease_name}"
130
- }
131
- else:
132
- logging.error(f"Ollama API error: {response.status_code}")
133
- # Fall back to static recommendation
134
- return RECOMMENDATIONS.get(disease_name, {})
135
-
136
- except Exception as e:
137
- logging.error(f"Error calling Ollama: {e}")
138
- # Fall back to static recommendation
139
- return RECOMMENDATIONS.get(disease_name, {})
140
 
141
  def load_model():
142
  """Load the PyTorch models"""
 
11
  from models.rice_model import RiceDiseaseCNN
12
  from models.rice_leaf_validator import RiceLeafValidator, is_rice_leaf
13
  from utils.image_processing import process_image
14
+
15
  import requests
16
  import json
17
  import os
18
  from typing import Dict, Any
19
 
20
+ # Hugging Face transformers pipeline for text recommendations
21
+ try:
22
+ from transformers import pipeline
23
+ recommender = pipeline("text-generation", model="google/flan-t5-small")
24
+ except Exception as e:
25
+ recommender = None
26
+ logging.warning(f"Transformers pipeline not loaded: {e}")
27
+
28
  # Configure logging
29
  logging.basicConfig(
30
  level=logging.INFO,
 
96
  }
97
  }
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
+ # New Hugging Face recommendation endpoint
101
+ @app.post("/recommend/")
102
+ async def recommend(disease: str):
103
+ """Generate farming recommendations for a rice disease using Hugging Face LLM"""
104
+ if recommender is None:
105
+ return {"error": "Text generation pipeline not available."}
106
+ prompt = f"Give 2-3 short farming recommendations for managing {disease} in rice plants."
107
+ response = recommender(prompt, max_length=100, num_return_sequences=1)
108
+ return {"recommendation": response[0]['generated_text']}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  def load_model():
111
  """Load the PyTorch models"""