Prathamesh Sable commited on
Commit
9b92ec5
·
1 Parent(s): b37d30d

working ids task and logging changes

Browse files
db/repositories.py CHANGED
@@ -1,6 +1,8 @@
1
  from sqlalchemy.orm import Session
2
  from sqlalchemy import cast, or_, String
3
  from sqlalchemy.dialects.postgresql import JSONB
 
 
4
  from . import models
5
  from interfaces.ingredientModels import IngredientAnalysisResult
6
  from interfaces.productModels import ProductCreate
@@ -14,6 +16,7 @@ class IngredientRepository:
14
  exact_match = self.db.query(models.Ingredient).filter(models.Ingredient.name.ilike(name)).first()
15
 
16
  if exact_match:
 
17
  return exact_match
18
 
19
  # If no exact match, try searching in alternate names
@@ -23,10 +26,12 @@ class IngredientRepository:
23
  models.Ingredient.alternate_names.cast(JSONB).op('?')(name)
24
  ).first()
25
 
 
 
 
26
  return alternate_match
27
  except Exception as e:
28
- from logger_manager import logger
29
- logger.error(f"Error searching alternate names: {e}")
30
  return None
31
 
32
  def get_all_ingredients(self, skip: int = 0, limit: int = 100):
 
1
  from sqlalchemy.orm import Session
2
  from sqlalchemy import cast, or_, String
3
  from sqlalchemy.dialects.postgresql import JSONB
4
+
5
+ from logger_manager import log_debug, log_error
6
  from . import models
7
  from interfaces.ingredientModels import IngredientAnalysisResult
8
  from interfaces.productModels import ProductCreate
 
16
  exact_match = self.db.query(models.Ingredient).filter(models.Ingredient.name.ilike(name)).first()
17
 
18
  if exact_match:
19
+ log_debug(f"Exact match found for ingredient: {name}")
20
  return exact_match
21
 
22
  # If no exact match, try searching in alternate names
 
26
  models.Ingredient.alternate_names.cast(JSONB).op('?')(name)
27
  ).first()
28
 
29
+ if alternate_match:
30
+ log_debug(f"Alternate match found for ingredient: {name}")
31
+
32
  return alternate_match
33
  except Exception as e:
34
+ log_error(f"Error searching alternate names: {e}",e)
 
35
  return None
36
 
37
  def get_all_ingredients(self, skip: int = 0, limit: int = 100):
interfaces/ingredientModels.py CHANGED
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
5
  # Define a structured output model
6
  class IngredientAnalysisResult(BaseModel):
7
  name: str
 
8
  alternate_names: List[str] = Field(default_factory=list)
9
  is_found: bool = False
10
  safety_rating: int = 5
 
5
  # Define a structured output model
6
  class IngredientAnalysisResult(BaseModel):
7
  name: str
8
+ id: int
9
  alternate_names: List[str] = Field(default_factory=list)
10
  is_found: bool = False
11
  safety_rating: int = 5
logger_manager.py CHANGED
@@ -14,7 +14,7 @@ console_handler = logging.StreamHandler()
14
  console_handler.setLevel(logging.ERROR)
15
 
16
  # Create a formatter and set it for both handlers
17
- formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
18
  file_handler.setFormatter(formatter)
19
  console_handler.setFormatter(formatter)
20
 
@@ -24,21 +24,18 @@ logger.addHandler(console_handler)
24
 
25
  def log_debug(message: str):
26
  logger.debug(message)
27
- log_info(f"Debug: {message}")
28
- log_error(f"Debug: {message}")
29
 
30
  def log_info(message: str):
31
  logger.info(message)
32
 
33
  def log_warning(message: str):
34
  logger.warning(message)
35
- log_info(f"Warning: {message}")
36
- log_error(f"Warning: {message}")
37
 
38
- def log_error(message: str):
39
- logger.error(message)
 
 
 
40
 
41
  def log_critical(message: str):
42
  logger.critical(message)
43
- log_info(f"Critical: {message}")
44
- log_error(f"Critical: {message}")
 
14
  console_handler.setLevel(logging.ERROR)
15
 
16
  # Create a formatter and set it for both handlers
17
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(pathname)s:%(lineno)d")
18
  file_handler.setFormatter(formatter)
19
  console_handler.setFormatter(formatter)
20
 
 
24
 
25
  def log_debug(message: str):
26
  logger.debug(message)
 
 
27
 
28
  def log_info(message: str):
29
  logger.info(message)
30
 
31
  def log_warning(message: str):
32
  logger.warning(message)
 
 
33
 
34
+ def log_error(message: str, exc: Exception = None):
35
+ if exc:
36
+ logger.error(message, exc_info=True)
37
+ else:
38
+ logger.error(message)
39
 
40
  def log_critical(message: str):
41
  logger.critical(message)
 
 
routers/analysis.py CHANGED
@@ -14,7 +14,7 @@ from interfaces.productModels import ProductIngredientsRequest
14
  from services.auth_service import get_current_user
15
  from PIL import Image
16
  import cv2
17
- from logger_manager import log_info, log_error, logger
18
  from db.database import get_db,SessionLocal
19
  from db.repositories import IngredientRepository
20
  from dotenv import load_dotenv
@@ -49,6 +49,7 @@ def ingredient_db_to_pydantic(db_ingredient):
49
  name=db_ingredient.name,
50
  alternate_names=db_ingredient.alternate_names or [],
51
  is_found=True,
 
52
  safety_rating=db_ingredient.safety_rating or 5,
53
  description=db_ingredient.description or "No description available",
54
  health_effects=db_ingredient.health_effects or ["Unknown"],
@@ -149,14 +150,14 @@ async def get_image(image_name: str):
149
  @traceable
150
  async def process_ingredient_endpoint(request: IngredientRequest, db: Session = Depends(get_db)):
151
  try:
152
- logger.info(f"Received request to process ingredient: {request.name}")
153
 
154
  # Check if we already have this ingredient in the database
155
  repo = IngredientRepository(db)
156
  db_ingredient = repo.get_ingredient_by_name(request.name)
157
 
158
  if db_ingredient:
159
- logger.info(f"Found existing ingredient in database: {request.name}")
160
  # Convert DB model to Pydantic model
161
  # (This would need a function to correctly map the data)
162
  return ingredient_db_to_pydantic(db_ingredient)
@@ -173,11 +174,11 @@ async def process_ingredient_endpoint(request: IngredientRequest, db: Session =
173
 
174
  # Save to database
175
  repo.create_ingredient(result)
176
- logger.info(f"Saved new ingredient to database: {request.name}")
177
 
178
  return result
179
  except Exception as e:
180
- logger.error(f"Error processing ingredient: {e}")
181
  raise HTTPException(status_code=500, detail="Internal Server Error")
182
 
183
  async def process_single_ingredient(ingredient_name: str):
@@ -214,7 +215,7 @@ async def process_single_ingredient(ingredient_name: str):
214
 
215
  return ingredient_data
216
  except Exception as e:
217
- log_error(f"Error processing ingredient {ingredient_name}: {str(e)}")
218
  # Return a minimal result on error to avoid failing the entire batch
219
  return IngredientAnalysisResult(
220
  name=ingredient_name,
@@ -262,10 +263,13 @@ async def process_ingredients_endpoint(product_ingredient: ProductIngredientsReq
262
  } if current_user else {}
263
  )
264
 
 
 
265
  # Step 3: Prepare final response
266
  result = {
267
  "ingredients_count": len(ingredients),
268
  "processed_ingredients": ingredient_results,
 
269
  "overall_analysis": product_analysis,
270
  "user_id": current_user.id if current_user else None,
271
  "timestamp": datetime.now(tz=pytz.timezone('Asia/Kolkata')).isoformat()
@@ -275,5 +279,5 @@ async def process_ingredients_endpoint(product_ingredient: ProductIngredientsReq
275
  return result
276
 
277
  except Exception as e:
278
- log_error(f"Error in process_ingredients_endpoint: {str(e)}")
279
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
14
  from services.auth_service import get_current_user
15
  from PIL import Image
16
  import cv2
17
+ from logger_manager import log_info, log_error
18
  from db.database import get_db,SessionLocal
19
  from db.repositories import IngredientRepository
20
  from dotenv import load_dotenv
 
49
  name=db_ingredient.name,
50
  alternate_names=db_ingredient.alternate_names or [],
51
  is_found=True,
52
+ id=db_ingredient.id,
53
  safety_rating=db_ingredient.safety_rating or 5,
54
  description=db_ingredient.description or "No description available",
55
  health_effects=db_ingredient.health_effects or ["Unknown"],
 
150
  @traceable
151
  async def process_ingredient_endpoint(request: IngredientRequest, db: Session = Depends(get_db)):
152
  try:
153
+ log_info(f"Received request to process ingredient: {request.name}")
154
 
155
  # Check if we already have this ingredient in the database
156
  repo = IngredientRepository(db)
157
  db_ingredient = repo.get_ingredient_by_name(request.name)
158
 
159
  if db_ingredient:
160
+ log_info(f"Found existing ingredient in database: {request.name}")
161
  # Convert DB model to Pydantic model
162
  # (This would need a function to correctly map the data)
163
  return ingredient_db_to_pydantic(db_ingredient)
 
174
 
175
  # Save to database
176
  repo.create_ingredient(result)
177
+ log_info(f"Saved new ingredient to database: {request.name}")
178
 
179
  return result
180
  except Exception as e:
181
+ log_error(f"Error processing ingredient: {e}",e)
182
  raise HTTPException(status_code=500, detail="Internal Server Error")
183
 
184
  async def process_single_ingredient(ingredient_name: str):
 
215
 
216
  return ingredient_data
217
  except Exception as e:
218
+ log_error(f"Error processing ingredient {ingredient_name}: {str(e)}",e)
219
  # Return a minimal result on error to avoid failing the entire batch
220
  return IngredientAnalysisResult(
221
  name=ingredient_name,
 
263
  } if current_user else {}
264
  )
265
 
266
+ # print("Product analysis result:", product_analysis)
267
+
268
  # Step 3: Prepare final response
269
  result = {
270
  "ingredients_count": len(ingredients),
271
  "processed_ingredients": ingredient_results,
272
+ "ingredient_ids": product_analysis["ingredient_ids"],
273
  "overall_analysis": product_analysis,
274
  "user_id": current_user.id if current_user else None,
275
  "timestamp": datetime.now(tz=pytz.timezone('Asia/Kolkata')).isoformat()
 
279
  return result
280
 
281
  except Exception as e:
282
+ log_error(f"Error in process_ingredients_endpoint: {str(e)}",e)
283
  raise HTTPException(status_code=500, detail="Internal Server Error")
routers/auth.py CHANGED
@@ -23,7 +23,7 @@ def register(user: UserCreate, db: Session = Depends(get_db)):
23
  log_info("User registered successfully")
24
  return {"access_token": access_token, "token_type": "bearer"}
25
  except Exception as e:
26
- log_error(f"Error in register endpoint: {str(e)}")
27
  raise HTTPException(status_code=500, detail="Internal Server Error")
28
 
29
  @router.post("/login", response_model=Token)
@@ -45,7 +45,7 @@ def login(form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depend
45
  log_info("User logged in successfully")
46
  return {"access_token": access_token, "token_type": "bearer"}
47
  except Exception as e:
48
- log_error(f"Error in login endpoint: {str(e)}")
49
  raise HTTPException(status_code=500, detail="Internal Server Error")
50
 
51
  @router.get("/user", response_model=UserResponse)
@@ -54,7 +54,7 @@ def read_users_me(current_user: User = Depends(get_current_active_user)):
54
  try:
55
  return current_user
56
  except Exception as e:
57
- log_error(f"Error in read_users_me endpoint: {str(e)}")
58
  raise HTTPException(status_code=500, detail="Internal Server Error")
59
 
60
  @router.get("/user/email", response_model=UserResponse)
@@ -67,5 +67,5 @@ def read_user_by_email(email: str, db: Session = Depends(get_db)):
67
  raise HTTPException(status_code=404, detail="User not found")
68
  return user
69
  except Exception as e:
70
- log_error(f"Error in read_user_by_email endpoint: {str(e)}")
71
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
23
  log_info("User registered successfully")
24
  return {"access_token": access_token, "token_type": "bearer"}
25
  except Exception as e:
26
+ log_error(f"Error in register endpoint: {str(e)}",e)
27
  raise HTTPException(status_code=500, detail="Internal Server Error")
28
 
29
  @router.post("/login", response_model=Token)
 
45
  log_info("User logged in successfully")
46
  return {"access_token": access_token, "token_type": "bearer"}
47
  except Exception as e:
48
+ log_error(f"Error in login endpoint: {str(e)}",e)
49
  raise HTTPException(status_code=500, detail="Internal Server Error")
50
 
51
  @router.get("/user", response_model=UserResponse)
 
54
  try:
55
  return current_user
56
  except Exception as e:
57
+ log_error(f"Error in read_users_me endpoint: {str(e)}",e)
58
  raise HTTPException(status_code=500, detail="Internal Server Error")
59
 
60
  @router.get("/user/email", response_model=UserResponse)
 
67
  raise HTTPException(status_code=404, detail="User not found")
68
  return user
69
  except Exception as e:
70
+ log_error(f"Error in read_user_by_email endpoint: {str(e)}",e)
71
  raise HTTPException(status_code=500, detail="Internal Server Error")
routers/history.py CHANGED
@@ -26,7 +26,7 @@ def create_scan(scan: ScanHistoryCreate, db: Session = Depends(get_db)):
26
  log_info("Scan recorded successfully")
27
  return scan_entry
28
  except Exception as e:
29
- log_error(f"Error in create_scan endpoint: {str(e)}")
30
  raise HTTPException(status_code=500, detail="Internal Server Error")
31
 
32
  @router.get("/scan/{user_id}", response_model=list[ScanHistoryResponse])
@@ -40,5 +40,5 @@ def read_scan_history(user_id: int, db: Session = Depends(get_db)):
40
  log_info("Scan history retrieved successfully")
41
  return scan_history
42
  except Exception as e:
43
- log_error(f"Error in read_scan_history endpoint: {str(e)}")
44
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
26
  log_info("Scan recorded successfully")
27
  return scan_entry
28
  except Exception as e:
29
+ log_error(f"Error in create_scan endpoint: {str(e)}",e)
30
  raise HTTPException(status_code=500, detail="Internal Server Error")
31
 
32
  @router.get("/scan/{user_id}", response_model=list[ScanHistoryResponse])
 
40
  log_info("Scan history retrieved successfully")
41
  return scan_history
42
  except Exception as e:
43
+ log_error(f"Error in read_scan_history endpoint: {str(e)}",e)
44
  raise HTTPException(status_code=500, detail="Internal Server Error")
routers/product.py CHANGED
@@ -75,7 +75,7 @@ async def add_target_to_vuforia(image_name: str, image_path: str) -> str:
75
  log_error(f"Failed to add target {image_name}: {response.text}")
76
  raise Exception(f"Failed to add target {image_name}: {response.text}")
77
  except Exception as e:
78
- log_error(f"Error adding target {image_name}: {e}")
79
  raise
80
 
81
 
@@ -114,7 +114,7 @@ async def add_product_to_database(
114
  return True
115
  except Exception as e:
116
  db.rollback()
117
- log_error(f"Error adding/updating markers for product {product_id} in database: {e}")
118
  raise HTTPException(status_code=500, detail=f"Error adding/updating markers for product {product_id}: {e}")
119
 
120
 
 
75
  log_error(f"Failed to add target {image_name}: {response.text}")
76
  raise Exception(f"Failed to add target {image_name}: {response.text}")
77
  except Exception as e:
78
+ log_error(f"Error adding target {image_name}: {e}",e)
79
  raise
80
 
81
 
 
114
  return True
115
  except Exception as e:
116
  db.rollback()
117
+ log_error(f"Error adding/updating markers for product {product_id} in database: {e}",e)
118
  raise HTTPException(status_code=500, detail=f"Error adding/updating markers for product {product_id}: {e}")
119
 
120
 
services/auth_service.py CHANGED
@@ -26,7 +26,7 @@ def verify_password(plain_password, hashed_password):
26
  try:
27
  return pwd_context.verify(plain_password, hashed_password)
28
  except Exception as e:
29
- log_error(f"Error verifying password: {str(e)}")
30
  raise HTTPException(status_code=500, detail=str(e))
31
 
32
  def get_password_hash(password):
@@ -34,7 +34,7 @@ def get_password_hash(password):
34
  try:
35
  return pwd_context.hash(password)
36
  except Exception as e:
37
- log_error(f"Error hashing password: {str(e)}")
38
  raise HTTPException(status_code=500, detail=str(e))
39
 
40
  def get_user(db, email: str):
@@ -42,7 +42,7 @@ def get_user(db, email: str):
42
  try:
43
  return db.query(User).filter(func.lower(User.email) == email.lower()).first()
44
  except Exception as e:
45
- log_error(f"Error getting user: {str(e)}")
46
  raise HTTPException(status_code=500, detail=str(e))
47
 
48
  def authenticate_user(db: Session, username: str, password: str):
@@ -77,10 +77,10 @@ async def get_current_user(db: Session = Depends(get_db), token: str = Depends(o
77
  raise credentials_exception
78
  token_data = TokenData(email=email)
79
  except JWTError as e:
80
- log_error(f"JWT error: {str(e)}")
81
  raise credentials_exception
82
  except Exception as e:
83
- log_error(f"Error decoding token: {str(e)}")
84
  raise HTTPException(status_code=500, detail=str(e))
85
  user = get_user(db, email=token_data.email)
86
  if user is None:
@@ -94,7 +94,7 @@ async def get_current_active_user(current_user: User = Depends(get_current_user)
94
  raise HTTPException(status_code=400, detail="Inactive user")
95
  return UserResponse.from_orm(current_user)
96
  except Exception as e:
97
- log_error(f"Error getting current active user: {str(e)}")
98
  raise HTTPException(status_code=500, detail=str(e))
99
 
100
  def create_user(db: Session, name: str, email: str, password: str):
@@ -107,5 +107,5 @@ def create_user(db: Session, name: str, email: str, password: str):
107
  db.refresh(db_user)
108
  return db_user
109
  except Exception as e:
110
- log_error(f"Error creating user: {str(e)}")
111
  raise HTTPException(status_code=500, detail=str(e))
 
26
  try:
27
  return pwd_context.verify(plain_password, hashed_password)
28
  except Exception as e:
29
+ log_error(f"Error verifying password: {str(e)}",e)
30
  raise HTTPException(status_code=500, detail=str(e))
31
 
32
  def get_password_hash(password):
 
34
  try:
35
  return pwd_context.hash(password)
36
  except Exception as e:
37
+ log_error(f"Error hashing password: {str(e)}",e)
38
  raise HTTPException(status_code=500, detail=str(e))
39
 
40
  def get_user(db, email: str):
 
42
  try:
43
  return db.query(User).filter(func.lower(User.email) == email.lower()).first()
44
  except Exception as e:
45
+ log_error(f"Error getting user: {str(e)}",e)
46
  raise HTTPException(status_code=500, detail=str(e))
47
 
48
  def authenticate_user(db: Session, username: str, password: str):
 
77
  raise credentials_exception
78
  token_data = TokenData(email=email)
79
  except JWTError as e:
80
+ log_error(f"JWT error: {str(e)}",e)
81
  raise credentials_exception
82
  except Exception as e:
83
+ log_error(f"Error decoding token: {str(e)}",e)
84
  raise HTTPException(status_code=500, detail=str(e))
85
  user = get_user(db, email=token_data.email)
86
  if user is None:
 
94
  raise HTTPException(status_code=400, detail="Inactive user")
95
  return UserResponse.from_orm(current_user)
96
  except Exception as e:
97
+ log_error(f"Error getting current active user: {str(e)}",e)
98
  raise HTTPException(status_code=500, detail=str(e))
99
 
100
  def create_user(db: Session, name: str, email: str, password: str):
 
107
  db.refresh(db_user)
108
  return db_user
109
  except Exception as e:
110
+ log_error(f"Error creating user: {str(e)}",e)
111
  raise HTTPException(status_code=500, detail=str(e))
services/ingredientFinderAgent.py CHANGED
@@ -10,7 +10,7 @@ from langchain_google_genai import ChatGoogleGenerativeAI
10
 
11
  # modular
12
  from interfaces.ingredientModels import IngredientAnalysisResult,IngredientState
13
- from logger_manager import logger
14
  from utils.agent_tools import search_local_db,search_web,search_wikipedia,search_open_food_facts,search_usda,search_pubchem
15
 
16
  # Load environment variables from .env file
@@ -100,7 +100,7 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
100
 
101
  # Basic validation
102
  if not api_key:
103
- logger.error("No Google API key found in environment variables")
104
  new_state = state.copy()
105
  new_state["result"] = {
106
  "name": state["ingredient"],
@@ -120,7 +120,7 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
120
  # convert_system_message_to_human=True
121
  )
122
  except Exception as e:
123
- logger.error(f"Error initializing LLM: {e}")
124
  new_state = state.copy()
125
  new_state["result"] = {
126
  "name": state["ingredient"],
@@ -133,11 +133,11 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
133
 
134
  # Get sources from state
135
  sources_data = state["sources_data"]
136
- logger.info(f"Analyzing ingredient with {len(sources_data)} total sources")
137
 
138
  # Filter for successful sources only
139
  found_sources = [source for source in sources_data if source.get('found', False)]
140
- logger.info(f"Found {len(found_sources)} sources with usable data")
141
 
142
  # Create default result structure
143
  result = {
@@ -180,12 +180,12 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
180
 
181
  source_texts.append(source_text)
182
  except Exception as e:
183
- logger.error(f"Error formatting source {source_name}: {e}")
184
  source_texts.append(f"--- {source_name} ---\nError formatting data: {str(e)}")
185
 
186
  # Combine all source texts
187
  combined_data = "\n\n".join(source_texts)
188
- logger.info(f"Combined data for analysis:\n{combined_data[:500]}...(truncated)")
189
 
190
  # Create the analysis prompt
191
  analysis_prompt = f"""
@@ -218,14 +218,14 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
218
 
219
  # Process with LLM
220
  try:
221
- logger.info("Sending analysis prompt to LLM")
222
  llm_response = llm.invoke(analysis_prompt)
223
- logger.info("Received LLM response")
224
 
225
  # Extract and parse JSON from LLM response
226
  try:
227
  analysis_text = llm_response.content
228
- logger.debug(f"LLM response: {analysis_text[:500]}...(truncated)")
229
 
230
  # Find JSON in the response
231
  start_idx = analysis_text.find('{')
@@ -244,17 +244,17 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
244
  "allergic_info": analysis.get("allergic_info", []),
245
  "diet_type": analysis.get("diet_type", "unknown"),
246
  })
247
- logger.info(f"Analysis complete - Safety Rating: {result['safety_rating']}")
248
  else:
249
- logger.warning("Could not find JSON in LLM response")
250
  result["description"] = "Error: Failed to parse LLM analysis output."
251
  except json.JSONDecodeError as e:
252
- logger.error(f"JSON parsing error: {e}")
253
  result["description"] = f"Error parsing analysis: {str(e)}"
254
 
255
  except Exception as e:
256
- logger.error(f"Error in LLM analysis: {e}")
257
- logger.error(traceback.format_exc())
258
  result.update({
259
  "description": f"Error in analysis: {str(e)}",
260
  "health_effects": ["Error in analysis"],
@@ -360,7 +360,7 @@ class IngredientInfoAgentLangGraph:
360
  tool_name = str(tool_func).split()[0]
361
 
362
  source_name = tool_name.replace("search_", "").replace("_", " ").title()
363
- logger.info(f"Searching {source_name} for {ingredient}")
364
 
365
  try:
366
  # Run the tool function in a thread pool to avoid blocking
@@ -368,15 +368,15 @@ class IngredientInfoAgentLangGraph:
368
  result = await loop.run_in_executor(None, partial(tool_func.invoke, ingredient))
369
 
370
  if result.get("found", False):
371
- logger.info(f"{source_name} found data for {ingredient}")
372
  return result
373
  except Exception as e:
374
- logger.error(f"Error in {source_name} search: {e}")
375
  return {"source": source_name, "found": False, "error": str(e)}
376
 
377
  async def process_ingredient_async(self, ingredient: str) -> IngredientAnalysisResult:
378
  """Process an ingredient using parallel data fetching."""
379
- logger.info(f"=== Parallel processing for: {ingredient} ===")
380
 
381
  # Define all the tools to run in parallel
382
  tools = [
@@ -417,10 +417,10 @@ class IngredientInfoAgentLangGraph:
417
 
418
  # Extract the result or create a default
419
  if final_state.get("result"):
420
- logger.info(f"Analysis complete for {ingredient}")
421
  return IngredientAnalysisResult(**final_state["result"])
422
  else:
423
- logger.info(f"No result in final state for {ingredient}, returning default")
424
  return IngredientAnalysisResult(
425
  name=ingredient,
426
  is_found=len(sources_data) > 0,
@@ -432,49 +432,49 @@ class IngredientInfoAgentLangGraph:
432
  Process an ingredient using direct sequential approach instead of async.
433
  This method provides compatibility with synchronous code.
434
  """
435
- logger.info(f"=== Sequential processing for: {ingredient} ===")
436
 
437
  # Initialize empty sources data
438
  sources_data = []
439
 
440
  # Run each tool directly in sequence and collect results
441
- logger.info(f"Searching local database for {ingredient}")
442
  result = search_local_db.invoke(ingredient)
443
 
444
  if result.get("found", False):
445
  sources_data.append(result)
446
- logger.info(f"Local DB found data for {ingredient}")
447
 
448
- logger.info(f"Searching web for {ingredient}")
449
  result = search_web.invoke(ingredient)
450
  if result.get("found", False):
451
  sources_data.append(result)
452
- logger.info(f"Web search found data for {ingredient}")
453
 
454
- logger.info(f"Searching Wikipedia for {ingredient}")
455
  result = search_wikipedia.invoke(ingredient)
456
  if result.get("found", False):
457
  sources_data.append(result)
458
- logger.info(f"Wikipedia found data for {ingredient}")
459
 
460
- logger.info(f"Searching Open Food Facts for {ingredient}")
461
  result = search_open_food_facts.invoke(ingredient)
462
  if result.get("found", False):
463
  sources_data.append(result)
464
- logger.info(f"Open Food Facts found data for {ingredient}")
465
 
466
 
467
- logger.info(f"Searching USDA for {ingredient}")
468
  result = search_usda.invoke(ingredient)
469
  if result.get("found", False):
470
  sources_data.append(result)
471
- logger.info(f"USDA found data for {ingredient}")
472
 
473
- logger.info(f"Searching PubChem for {ingredient}")
474
  result = search_pubchem.invoke(ingredient)
475
  if result.get("found", False):
476
  sources_data.append(result)
477
- logger.info(f"PubChem found data for {ingredient}")
478
 
479
  state = IngredientState(ingredient=ingredient,
480
  sources_data=sources_data,
@@ -486,11 +486,11 @@ class IngredientInfoAgentLangGraph:
486
 
487
  # Extract the result or create a default
488
  if final_state.get("result"):
489
- logger.info(f"Analysis complete for {ingredient}")
490
 
491
  return IngredientAnalysisResult(**final_state["result"])
492
  else:
493
- logger.info(f"No result in final state for {ingredient}, returning default")
494
  return IngredientAnalysisResult(
495
  name=ingredient,
496
  is_found=len(sources_data) > 0,
 
10
 
11
  # modular
12
  from interfaces.ingredientModels import IngredientAnalysisResult,IngredientState
13
+ from logger_manager import log_debug, log_error, log_info, log_warning
14
  from utils.agent_tools import search_local_db,search_web,search_wikipedia,search_open_food_facts,search_usda,search_pubchem
15
 
16
  # Load environment variables from .env file
 
100
 
101
  # Basic validation
102
  if not api_key:
103
+ log_error("No Google API key found in environment variables")
104
  new_state = state.copy()
105
  new_state["result"] = {
106
  "name": state["ingredient"],
 
120
  # convert_system_message_to_human=True
121
  )
122
  except Exception as e:
123
+ log_error(f"Error initializing LLM: {e}",e)
124
  new_state = state.copy()
125
  new_state["result"] = {
126
  "name": state["ingredient"],
 
133
 
134
  # Get sources from state
135
  sources_data = state["sources_data"]
136
+ log_info(f"Analyzing ingredient with {len(sources_data)} total sources")
137
 
138
  # Filter for successful sources only
139
  found_sources = [source for source in sources_data if source.get('found', False)]
140
+ log_info(f"Found {len(found_sources)} sources with usable data")
141
 
142
  # Create default result structure
143
  result = {
 
180
 
181
  source_texts.append(source_text)
182
  except Exception as e:
183
+ log_error(f"Error formatting source {source_name}: {e}",e)
184
  source_texts.append(f"--- {source_name} ---\nError formatting data: {str(e)}")
185
 
186
  # Combine all source texts
187
  combined_data = "\n\n".join(source_texts)
188
+ log_info(f"Combined data for analysis:\n{combined_data[:500]}...(truncated)")
189
 
190
  # Create the analysis prompt
191
  analysis_prompt = f"""
 
218
 
219
  # Process with LLM
220
  try:
221
+ log_info("Sending analysis prompt to LLM")
222
  llm_response = llm.invoke(analysis_prompt)
223
+ log_info("Received LLM response")
224
 
225
  # Extract and parse JSON from LLM response
226
  try:
227
  analysis_text = llm_response.content
228
+ log_debug(f"LLM response: {analysis_text[:500]}...(truncated)")
229
 
230
  # Find JSON in the response
231
  start_idx = analysis_text.find('{')
 
244
  "allergic_info": analysis.get("allergic_info", []),
245
  "diet_type": analysis.get("diet_type", "unknown"),
246
  })
247
+ log_info(f"Analysis complete - Safety Rating: {result['safety_rating']}")
248
  else:
249
+ log_warning("Could not find JSON in LLM response")
250
  result["description"] = "Error: Failed to parse LLM analysis output."
251
  except json.JSONDecodeError as e:
252
+ log_error(f"JSON parsing error: {e}",e)
253
  result["description"] = f"Error parsing analysis: {str(e)}"
254
 
255
  except Exception as e:
256
+ log_error(f"Error in LLM analysis: {e}",e)
257
+ log_error(traceback.format_exc())
258
  result.update({
259
  "description": f"Error in analysis: {str(e)}",
260
  "health_effects": ["Error in analysis"],
 
360
  tool_name = str(tool_func).split()[0]
361
 
362
  source_name = tool_name.replace("search_", "").replace("_", " ").title()
363
+ log_info(f"Searching {source_name} for {ingredient}")
364
 
365
  try:
366
  # Run the tool function in a thread pool to avoid blocking
 
368
  result = await loop.run_in_executor(None, partial(tool_func.invoke, ingredient))
369
 
370
  if result.get("found", False):
371
+ log_info(f"{source_name} found data for {ingredient}")
372
  return result
373
  except Exception as e:
374
+ log_error(f"Error in {source_name} search: {e}",e)
375
  return {"source": source_name, "found": False, "error": str(e)}
376
 
377
  async def process_ingredient_async(self, ingredient: str) -> IngredientAnalysisResult:
378
  """Process an ingredient using parallel data fetching."""
379
+ log_info(f"=== Parallel processing for: {ingredient} ===")
380
 
381
  # Define all the tools to run in parallel
382
  tools = [
 
417
 
418
  # Extract the result or create a default
419
  if final_state.get("result"):
420
+ log_info(f"Analysis complete for {ingredient}")
421
  return IngredientAnalysisResult(**final_state["result"])
422
  else:
423
+ log_info(f"No result in final state for {ingredient}, returning default")
424
  return IngredientAnalysisResult(
425
  name=ingredient,
426
  is_found=len(sources_data) > 0,
 
432
  Process an ingredient using direct sequential approach instead of async.
433
  This method provides compatibility with synchronous code.
434
  """
435
+ log_info(f"=== Sequential processing for: {ingredient} ===")
436
 
437
  # Initialize empty sources data
438
  sources_data = []
439
 
440
  # Run each tool directly in sequence and collect results
441
+ log_info(f"Searching local database for {ingredient}")
442
  result = search_local_db.invoke(ingredient)
443
 
444
  if result.get("found", False):
445
  sources_data.append(result)
446
+ log_info(f"Local DB found data for {ingredient}")
447
 
448
+ log_info(f"Searching web for {ingredient}")
449
  result = search_web.invoke(ingredient)
450
  if result.get("found", False):
451
  sources_data.append(result)
452
+ log_info(f"Web search found data for {ingredient}")
453
 
454
+ log_info(f"Searching Wikipedia for {ingredient}")
455
  result = search_wikipedia.invoke(ingredient)
456
  if result.get("found", False):
457
  sources_data.append(result)
458
+ log_info(f"Wikipedia found data for {ingredient}")
459
 
460
+ log_info(f"Searching Open Food Facts for {ingredient}")
461
  result = search_open_food_facts.invoke(ingredient)
462
  if result.get("found", False):
463
  sources_data.append(result)
464
+ log_info(f"Open Food Facts found data for {ingredient}")
465
 
466
 
467
+ log_info(f"Searching USDA for {ingredient}")
468
  result = search_usda.invoke(ingredient)
469
  if result.get("found", False):
470
  sources_data.append(result)
471
+ log_info(f"USDA found data for {ingredient}")
472
 
473
+ log_info(f"Searching PubChem for {ingredient}")
474
  result = search_pubchem.invoke(ingredient)
475
  if result.get("found", False):
476
  sources_data.append(result)
477
+ log_info(f"PubChem found data for {ingredient}")
478
 
479
  state = IngredientState(ingredient=ingredient,
480
  sources_data=sources_data,
 
486
 
487
  # Extract the result or create a default
488
  if final_state.get("result"):
489
+ log_info(f"Analysis complete for {ingredient}")
490
 
491
  return IngredientAnalysisResult(**final_state["result"])
492
  else:
493
+ log_info(f"No result in final state for {ingredient}, returning default")
494
  return IngredientAnalysisResult(
495
  name=ingredient,
496
  is_found=len(sources_data) > 0,
services/productAnalyzerAgent.py CHANGED
@@ -3,7 +3,7 @@ from typing import List, Dict, Any, Optional
3
  from dotenv import load_dotenv
4
  from langchain_core.messages import HumanMessage
5
  from langchain_google_genai import ChatGoogleGenerativeAI
6
- from logger_manager import logger
7
  from interfaces.ingredientModels import IngredientAnalysisResult
8
 
9
  # Load environment variables
@@ -17,7 +17,7 @@ async def analyze_product_ingredients(
17
  Analyze multiple ingredients to provide a comprehensive product analysis
18
  for AR display, considering user preferences and dietary restrictions.
19
  """
20
- logger.info(f"Analyzing product with {len(ingredients_data)} ingredients")
21
 
22
  # Initialize LLM
23
  api_key = os.getenv("LLM_API_KEY")
@@ -89,14 +89,18 @@ analysis that would be helpful for a consumer viewing this in an AR application.
89
  "concerns": (array of strings)
90
  }},
91
  "ingredient_interactions": (array of strings),
92
- "key_takeaway": (string),
93
- "ingredient_ids": (array of integers)
94
  }}
95
 
96
  Only include factual information based on the provided data. If information is unavailable for any field, use appropriate default values. If the data required is too obvious then give appropriate answer.
 
 
 
 
 
97
  """
98
 
99
- logger.info("Sending product analysis prompt to LLM")
100
 
101
  try:
102
  # Process with LLM
@@ -115,10 +119,10 @@ Only include factual information based on the provided data. If information is u
115
  try:
116
  analysis = json.loads(json_match.group(0))
117
  analysis["ingredient_ids"] = ingredient_ids
118
- logger.info("Successfully parsed product analysis")
119
  return analysis
120
  except json.JSONDecodeError as e:
121
- logger.error(f"JSON parsing error: {e}")
122
  # Return a simplified analysis on error
123
  return {
124
  "overall_safety_score": calculate_average_safety(ingredients_data),
@@ -128,7 +132,7 @@ Only include factual information based on the provided data. If information is u
128
  "ingredient_ids": ingredient_ids
129
  }
130
  else:
131
- logger.error("Could not find JSON in LLM response")
132
  return {
133
  "overall_safety_score": calculate_average_safety(ingredients_data),
134
  "error": "Failed to generate structured analysis",
@@ -137,7 +141,7 @@ Only include factual information based on the provided data. If information is u
137
  }
138
 
139
  except Exception as e:
140
- logger.error(f"Error in product analysis: {e}")
141
  # Fallback analysis based on simple calculations
142
  return generate_fallback_analysis(ingredients_data, ingredient_ids)
143
 
 
3
  from dotenv import load_dotenv
4
  from langchain_core.messages import HumanMessage
5
  from langchain_google_genai import ChatGoogleGenerativeAI
6
+ from logger_manager import log_error, log_info
7
  from interfaces.ingredientModels import IngredientAnalysisResult
8
 
9
  # Load environment variables
 
17
  Analyze multiple ingredients to provide a comprehensive product analysis
18
  for AR display, considering user preferences and dietary restrictions.
19
  """
20
+ log_info(f"Analyzing product with {len(ingredients_data)} ingredients")
21
 
22
  # Initialize LLM
23
  api_key = os.getenv("LLM_API_KEY")
 
89
  "concerns": (array of strings)
90
  }},
91
  "ingredient_interactions": (array of strings),
92
+ "key_takeaway": (string)
 
93
  }}
94
 
95
  Only include factual information based on the provided data. If information is unavailable for any field, use appropriate default values. If the data required is too obvious then give appropriate answer.
96
+ IMPORTANT: Ensure your response is valid JSON with double quotes (") around property names and string values.
97
+ Avoid single quotes (') for JSON properties and values.
98
+ Ensure all elements in arrays and objects are separated by commas, and don't include trailing commas.
99
+ Also strictly follow the JSON format in your response.
100
+
101
  """
102
 
103
+ log_info("Sending product analysis prompt to LLM")
104
 
105
  try:
106
  # Process with LLM
 
119
  try:
120
  analysis = json.loads(json_match.group(0))
121
  analysis["ingredient_ids"] = ingredient_ids
122
+ log_info("Successfully parsed product analysis")
123
  return analysis
124
  except json.JSONDecodeError as e:
125
+ log_error(f"JSON parsing error: {e}",e)
126
  # Return a simplified analysis on error
127
  return {
128
  "overall_safety_score": calculate_average_safety(ingredients_data),
 
132
  "ingredient_ids": ingredient_ids
133
  }
134
  else:
135
+ log_error("Could not find JSON in LLM response")
136
  return {
137
  "overall_safety_score": calculate_average_safety(ingredients_data),
138
  "error": "Failed to generate structured analysis",
 
141
  }
142
 
143
  except Exception as e:
144
+ log_error(f"Error in product analysis: {e}",e)
145
  # Fallback analysis based on simple calculations
146
  return generate_fallback_analysis(ingredients_data, ingredient_ids)
147
 
services/scan_history.py CHANGED
@@ -19,7 +19,7 @@ def record_scan(db: Session, user_id: int, product_id: int) -> ScanHistory:
19
  log_info("Scan recorded successfully")
20
  return scan_entry
21
  except Exception as e:
22
- log_error(f"Error recording scan: {str(e)}")
23
  raise HTTPException(status_code=500, detail="Internal Server Error")
24
 
25
  def get_scan_history(db: Session, user_id: int) -> list[ScanHistory]:
@@ -32,5 +32,5 @@ def get_scan_history(db: Session, user_id: int) -> list[ScanHistory]:
32
  log_info("Scan history retrieved successfully")
33
  return scan_history
34
  except Exception as e:
35
- log_error(f"Error getting scan history: {str(e)}")
36
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
19
  log_info("Scan recorded successfully")
20
  return scan_entry
21
  except Exception as e:
22
+ log_error(f"Error recording scan: {str(e)}",e)
23
  raise HTTPException(status_code=500, detail="Internal Server Error")
24
 
25
  def get_scan_history(db: Session, user_id: int) -> list[ScanHistory]:
 
32
  log_info("Scan history retrieved successfully")
33
  return scan_history
34
  except Exception as e:
35
+ log_error(f"Error getting scan history: {str(e)}",e)
36
  raise HTTPException(status_code=500, detail="Internal Server Error")
utils/agent_tools.py CHANGED
@@ -6,7 +6,7 @@ from dotenv import load_dotenv
6
 
7
  from typing import Dict, Any
8
  # modular
9
- from logger_manager import logger
10
  from dotenv import load_dotenv
11
 
12
  import aiohttp
@@ -19,8 +19,6 @@ from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
19
  from langchain_core.tools import tool
20
 
21
 
22
- from logger_manager import logger
23
-
24
  # Load environment variables from .env file
25
  load_dotenv()
26
 
@@ -28,10 +26,10 @@ load_dotenv()
28
  SCRAPED_DB_PATH = "data/Food_Aditives_E_numbers.csv" # Ensure this file exists
29
  if os.path.exists(SCRAPED_DB_PATH):
30
  additives_df = pd.read_csv(SCRAPED_DB_PATH)
31
- logger.info(f"Loaded database with {len(additives_df)} entries")
32
  else:
33
  additives_df = None
34
- logger.warning("Scraped database not found!")
35
 
36
 
37
  # Define a rate limit (adjust as needed)
@@ -47,7 +45,7 @@ DUCKDUCKGO_MAX_RETRIES = int(os.getenv("DUCKDUCKGO_MAX_RETRIES", "3")) # Max re
47
  @tool("search_local_db")
48
  def search_local_db(ingredient: str) -> Dict[str, Any]:
49
  """Search local database for ingredient information. E number database scrapped"""
50
- logger.info(f"Searching local DB for: {ingredient}")
51
  if additives_df is not None:
52
  match = additives_df[additives_df['Name of Additive'].str.contains(ingredient, case=False, na=False, regex=False)]
53
  if not match.empty:
@@ -57,7 +55,7 @@ def search_local_db(ingredient: str) -> Dict[str, Any]:
57
  @tool("search_open_food_facts")
58
  def search_open_food_facts(ingredient: str) -> Dict[str, Any]:
59
  """Search Open Food Facts database for ingredient information."""
60
- logger.info(f"Searching Open Food Facts for: {ingredient}")
61
 
62
  try:
63
  open_food_facts_api = "https://world.openfoodfacts.org/api/v0"
@@ -90,13 +88,13 @@ def search_open_food_facts(ingredient: str) -> Dict[str, Any]:
90
  return {"source": "Open Food Facts", "found": False, "data": None}
91
 
92
  except Exception as e:
93
- logger.error(f"Error searching Open Food Facts: {e}")
94
  return {"source": "Open Food Facts", "found": False, "error": str(e)}
95
 
96
  @tool("search_usda")
97
  def search_usda(ingredient: str) -> Dict[str, Any]:
98
  """Search USDA FoodData Central for ingredient information."""
99
- logger.info(f"Searching USDA for: {ingredient}")
100
 
101
  try:
102
  usda_api = "https://api.nal.usda.gov/fdc/v1"
@@ -125,12 +123,12 @@ def search_usda(ingredient: str) -> Dict[str, Any]:
125
  return {"source": "USDA FoodData Central", "found": False, "data": None}
126
 
127
  except Exception as e:
128
- logger.error(f"Error searching USDA: {e}")
129
  return {"source": "USDA FoodData Central", "found": False, "error": str(e)}
130
 
131
  async def async_search_pubchem(ingredient: str) -> Dict[str, Any]:
132
  """Asynchronously search PubChem for chemical information about the ingredient."""
133
- logger.info(f"Searching PubChem for: {ingredient}")
134
 
135
  try:
136
  pubchem_api = "https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data"
@@ -146,19 +144,19 @@ async def async_search_pubchem(ingredient: str) -> Dict[str, Any]:
146
  if response.status == 200:
147
  return await response.json()
148
  else:
149
- logger.warning(f"PubChem returned status: {response.status} for URL: {url}")
150
  return None
151
  except asyncio.TimeoutError:
152
  if retry_count < PUBCHEM_MAX_RETRIES:
153
  delay = (2 ** retry_count) * 5 # Exponential backoff
154
- logger.warning(f"PubChem timeout for URL '{url}'. Retrying in {delay:.2f} seconds (attempt {retry_count + 1}/{PUBCHEM_MAX_RETRIES})")
155
  await asyncio.sleep(delay)
156
  return await fetch_data(url, timeout, retry_count + 1) # Recursive retry
157
  else:
158
- logger.error(f"Max retries reached for PubChem timeout on URL: {url}")
159
  return None
160
  except Exception as e:
161
- logger.error(f"PubChem error for URL '{url}': {e}")
162
  return None
163
 
164
  data = await fetch_data(search_url)
@@ -187,7 +185,7 @@ async def async_search_pubchem(ingredient: str) -> Dict[str, Any]:
187
  return {"source": "PubChem", "found": False, "data": None}
188
 
189
  except Exception as e:
190
- logger.error(f"Error searching PubChem: {e}")
191
  return {"source": "PubChem", "found": False, "error": str(e)}
192
 
193
  @tool("search_pubchem")
@@ -205,7 +203,7 @@ def search_pubchem(ingredient: str) -> Dict[str, Any]:
205
  @tool("search_wikipedia")
206
  def search_wikipedia(ingredient: str) -> Dict[str, Any]:
207
  """Search Wikipedia for ingredient information."""
208
- logger.info(f"Searching Wikipedia for: {ingredient}")
209
 
210
  try:
211
  wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
@@ -238,13 +236,13 @@ def search_wikipedia(ingredient: str) -> Dict[str, Any]:
238
  return {"source": "Wikipedia", "found": False, "data": None}
239
 
240
  except Exception as e:
241
- logger.error(f"Error searching Wikipedia: {e}")
242
  return {"source": "Wikipedia", "found": False, "error": str(e)}
243
 
244
  @tool("search_web")
245
  def search_web(ingredient: str) -> Dict[str, Any]:
246
  """Search web for ingredient information using DuckDuckGo."""
247
- logger.info(f"Searching web for: {ingredient}")
248
 
249
  try:
250
  duckduckgo = DuckDuckGoSearchRun()
@@ -257,5 +255,5 @@ def search_web(ingredient: str) -> Dict[str, Any]:
257
  all_results.append({"query": query, "result": result})
258
  return {"source": "DuckDuckGo", "found": bool(all_results), "data": all_results}
259
  except Exception as e:
260
- logger.error(f"Web search error: {e}")
261
  return {"source": "DuckDuckGo", "found": False, "error": str(e)}
 
6
 
7
  from typing import Dict, Any
8
  # modular
9
+ from logger_manager import log_error, log_info, log_warning
10
  from dotenv import load_dotenv
11
 
12
  import aiohttp
 
19
  from langchain_core.tools import tool
20
 
21
 
 
 
22
  # Load environment variables from .env file
23
  load_dotenv()
24
 
 
26
  SCRAPED_DB_PATH = "data/Food_Aditives_E_numbers.csv" # Ensure this file exists
27
  if os.path.exists(SCRAPED_DB_PATH):
28
  additives_df = pd.read_csv(SCRAPED_DB_PATH)
29
+ log_info(f"Loaded database with {len(additives_df)} entries")
30
  else:
31
  additives_df = None
32
+ log_warning("Scraped database not found!")
33
 
34
 
35
  # Define a rate limit (adjust as needed)
 
45
  @tool("search_local_db")
46
  def search_local_db(ingredient: str) -> Dict[str, Any]:
47
  """Search local database for ingredient information. E number database scrapped"""
48
+ log_info(f"Searching local DB for: {ingredient}")
49
  if additives_df is not None:
50
  match = additives_df[additives_df['Name of Additive'].str.contains(ingredient, case=False, na=False, regex=False)]
51
  if not match.empty:
 
55
  @tool("search_open_food_facts")
56
  def search_open_food_facts(ingredient: str) -> Dict[str, Any]:
57
  """Search Open Food Facts database for ingredient information."""
58
+ log_info(f"Searching Open Food Facts for: {ingredient}")
59
 
60
  try:
61
  open_food_facts_api = "https://world.openfoodfacts.org/api/v0"
 
88
  return {"source": "Open Food Facts", "found": False, "data": None}
89
 
90
  except Exception as e:
91
+ log_error(f"Error searching Open Food Facts: {e}",e)
92
  return {"source": "Open Food Facts", "found": False, "error": str(e)}
93
 
94
  @tool("search_usda")
95
  def search_usda(ingredient: str) -> Dict[str, Any]:
96
  """Search USDA FoodData Central for ingredient information."""
97
+ log_info(f"Searching USDA for: {ingredient}")
98
 
99
  try:
100
  usda_api = "https://api.nal.usda.gov/fdc/v1"
 
123
  return {"source": "USDA FoodData Central", "found": False, "data": None}
124
 
125
  except Exception as e:
126
+ log_error(f"Error searching USDA: {e}",e)
127
  return {"source": "USDA FoodData Central", "found": False, "error": str(e)}
128
 
129
  async def async_search_pubchem(ingredient: str) -> Dict[str, Any]:
130
  """Asynchronously search PubChem for chemical information about the ingredient."""
131
+ log_info(f"Searching PubChem for: {ingredient}")
132
 
133
  try:
134
  pubchem_api = "https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data"
 
144
  if response.status == 200:
145
  return await response.json()
146
  else:
147
+ log_warning(f"PubChem returned status: {response.status} for URL: {url}")
148
  return None
149
  except asyncio.TimeoutError:
150
  if retry_count < PUBCHEM_MAX_RETRIES:
151
  delay = (2 ** retry_count) * 5 # Exponential backoff
152
+ log_warning(f"PubChem timeout for URL '{url}'. Retrying in {delay:.2f} seconds (attempt {retry_count + 1}/{PUBCHEM_MAX_RETRIES})")
153
  await asyncio.sleep(delay)
154
  return await fetch_data(url, timeout, retry_count + 1) # Recursive retry
155
  else:
156
+ log_error(f"Max retries reached for PubChem timeout on URL: {url}",asyncio.TimeoutError)
157
  return None
158
  except Exception as e:
159
+ log_error(f"PubChem error for URL '{url}': {e}",e)
160
  return None
161
 
162
  data = await fetch_data(search_url)
 
185
  return {"source": "PubChem", "found": False, "data": None}
186
 
187
  except Exception as e:
188
+ log_error(f"Error searching PubChem: {e}",e)
189
  return {"source": "PubChem", "found": False, "error": str(e)}
190
 
191
  @tool("search_pubchem")
 
203
  @tool("search_wikipedia")
204
  def search_wikipedia(ingredient: str) -> Dict[str, Any]:
205
  """Search Wikipedia for ingredient information."""
206
+ log_info(f"Searching Wikipedia for: {ingredient}")
207
 
208
  try:
209
  wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
 
236
  return {"source": "Wikipedia", "found": False, "data": None}
237
 
238
  except Exception as e:
239
+ log_error(f"Error searching Wikipedia: {e}",e)
240
  return {"source": "Wikipedia", "found": False, "error": str(e)}
241
 
242
  @tool("search_web")
243
  def search_web(ingredient: str) -> Dict[str, Any]:
244
  """Search web for ingredient information using DuckDuckGo."""
245
+ log_info(f"Searching web for: {ingredient}")
246
 
247
  try:
248
  duckduckgo = DuckDuckGoSearchRun()
 
255
  all_results.append({"query": query, "result": result})
256
  return {"source": "DuckDuckGo", "found": bool(all_results), "data": all_results}
257
  except Exception as e:
258
+ log_error(f"Web search error: {e}",e)
259
  return {"source": "DuckDuckGo", "found": False, "error": str(e)}