prathameshks commited on
Commit
d616264
·
2 Parent(s): acbe5b8 15ab6a4

Merge branch 'main' of https://github.com/prathameshks/FoodAnalyzer-API

Browse files
alembic.ini CHANGED
@@ -63,7 +63,6 @@ version_path_separator = os
63
  # are written from script.py.mako
64
  # output_encoding = utf-8
65
 
66
- # sqlalchemy.url = postgresql://admin:kMUY6qMLce2fwMiocvf3cjxJLyeAOR1y@dpg-cvs9ajeuk2gs739pi5b0-a.singapore-postgres.render.com/food_analyzer_db
67
  sqlalchemy.url =
68
 
69
  [post_write_hooks]
 
63
  # are written from script.py.mako
64
  # output_encoding = utf-8
65
 
 
66
  sqlalchemy.url =
67
 
68
  [post_write_hooks]
db/models.py CHANGED
@@ -1,4 +1,4 @@
1
- from sqlalchemy import Column, Integer, String, Boolean, Text, JSON, ForeignKey, DateTime
2
  from sqlalchemy.orm import relationship, Mapped, mapped_column
3
  from sqlalchemy.sql import func
4
  from .database import Base
@@ -10,15 +10,16 @@ class Ingredient(Base):
10
  __tablename__ = "ingredients"
11
 
12
  id = Column(Integer, primary_key=True, index=True)
13
- name = Column(String, unique=True, index=True)
14
- alternate_names = Column(JSON, nullable=True)
15
  safety_rating = Column(Integer, nullable=True)
16
  description = Column(Text, nullable=True)
17
- health_effects = Column(JSON, nullable=True)
18
- allergic_info = Column(JSON, nullable=True)
19
- diet_type = Column(String, nullable=True)
20
- created_at = Column(DateTime(timezone=True), server_default=func.now())
21
- updated_at = Column(DateTime(timezone=True), onupdate=func.now())
 
22
 
23
  # Relationships
24
  sources = relationship("IngredientSource", back_populates="ingredient")
@@ -28,44 +29,46 @@ class IngredientSource(Base):
28
 
29
  id = Column(Integer, primary_key=True, index=True)
30
  ingredient_id = Column(Integer, ForeignKey("ingredients.id"))
31
- source_name = Column(String, nullable=False)
32
  found = Column(Boolean, default=False)
33
  summary = Column(Text, nullable=True)
34
- data = Column(JSON, nullable=True)
35
 
36
  # Relationships
37
  ingredient = relationship("Ingredient", back_populates="sources")
38
 
39
  class Marker(Base):
40
  __tablename__ = "markers"
41
- id: Mapped[int] = mapped_column(primary_key=True)
42
- image_name: Mapped[str]
43
- vuforia_id: Mapped[str]
44
- product_id: Mapped[int] = mapped_column(ForeignKey("products.id"))
45
-
46
- product: Mapped["Product"] = relationship(back_populates="markers")
47
 
 
 
 
48
  class Product(Base):
49
 
50
  __tablename__ = "products"
51
 
52
  id = Column(Integer, primary_key=True, index=True)
53
- product_name = Column(String, nullable=False)
54
- ingredients = Column(JSON, nullable=True)
55
- ingredients_analysis = Column(JSON, nullable=True)
56
  overall_safety_score = Column(Integer, nullable=True)
57
- suitable_diet_types = Column(String, nullable=True)
58
- allergy_warnings = Column(JSON, nullable=True)
59
- usage_recommendations = Column(String, nullable=True)
60
- health_insights = Column(JSON, nullable=True)
61
- ingredient_interactions = Column(JSON, nullable=True)
62
- key_takeaway = Column(String, nullable=True)
63
  ingredients_count = Column(Integer, nullable=True)
64
  user_id = Column(Integer, nullable=True)
65
  timestamp = Column(DateTime, nullable=True)
66
- ingredient_ids= Column(JSON, nullable=True)
67
 
68
- data_quality_warnings = Column(JSON, nullable=True)
69
  markers: Mapped[List["Marker"]] = relationship(back_populates="product")
70
 
71
 
@@ -73,9 +76,9 @@ class User(Base):
73
  __tablename__ = "users"
74
 
75
  id = Column(Integer, primary_key=True, index=True)
76
- name = Column(String, unique=False, index=False, nullable=False)
77
- email = Column(String, unique=True, index=True, nullable=False)
78
- hashed_password = Column(String, nullable=False)
79
  is_active = Column(Boolean, default=True)
80
 
81
  # Relationships
@@ -95,10 +98,10 @@ class UserPreferences(Base):
95
 
96
  id = Column(Integer, primary_key=True, index=True)
97
  user_id = Column(Integer, ForeignKey("users.id", ondelete="CASCADE"))
98
- dietary_restrictions = Column(String, nullable=True)
99
- allergens = Column(String, nullable=True)
100
- preferred_ingredients = Column(String, nullable=True)
101
- disliked_ingredients = Column(String, nullable=True)
102
 
103
  # Relationships
104
  user = relationship("User", back_populates="preferences")
@@ -112,4 +115,4 @@ class ScanHistory(Base):
112
  scan_date = Column(DateTime, default=datetime.now)
113
 
114
  # Relationships
115
- user = relationship("User", back_populates="scan_history")
 
1
+ from sqlalchemy import Column, Integer, String, Boolean, Text, JSON, ForeignKey, DateTime, text,TIMESTAMP
2
  from sqlalchemy.orm import relationship, Mapped, mapped_column
3
  from sqlalchemy.sql import func
4
  from .database import Base
 
10
  __tablename__ = "ingredients"
11
 
12
  id = Column(Integer, primary_key=True, index=True)
13
+ name = Column(String(255), unique=True, index=True)
14
+ alternate_names = Column(Text, nullable=True)
15
  safety_rating = Column(Integer, nullable=True)
16
  description = Column(Text, nullable=True)
17
+ health_effects = Column(Text, nullable=True)
18
+ allergic_info = Column(Text, nullable=True)
19
+ diet_type = Column(String(255), nullable=True)
20
+ # Fix the default timestamp for MySQL
21
+ created_at = Column(TIMESTAMP, server_default=text('CURRENT_TIMESTAMP'))
22
+ updated_at = Column(DateTime(timezone=True),nullable=True)
23
 
24
  # Relationships
25
  sources = relationship("IngredientSource", back_populates="ingredient")
 
29
 
30
  id = Column(Integer, primary_key=True, index=True)
31
  ingredient_id = Column(Integer, ForeignKey("ingredients.id"))
32
+ source_name = Column(String(255), nullable=False)
33
  found = Column(Boolean, default=False)
34
  summary = Column(Text, nullable=True)
35
+ data = Column(Text, nullable=True)
36
 
37
  # Relationships
38
  ingredient = relationship("Ingredient", back_populates="sources")
39
 
40
  class Marker(Base):
41
  __tablename__ = "markers"
42
+
43
+ id = Column(Integer, primary_key=True, index=True)
44
+ image_name = Column(String(255), nullable=False)
45
+ vuforia_id = Column(String(255), nullable=False)
46
+ product_id = Column(Integer, ForeignKey("products.id"))
 
47
 
48
+ # Traditional relationship syntax
49
+ product = relationship("Product", back_populates="markers")
50
+
51
  class Product(Base):
52
 
53
  __tablename__ = "products"
54
 
55
  id = Column(Integer, primary_key=True, index=True)
56
+ product_name = Column(String(255), nullable=False)
57
+ ingredients = Column(Text, nullable=True)
58
+ ingredients_analysis = Column(Text, nullable=True)
59
  overall_safety_score = Column(Integer, nullable=True)
60
+ suitable_diet_types = Column(String(255), nullable=True)
61
+ allergy_warnings = Column(Text, nullable=True)
62
+ usage_recommendations = Column(Text, nullable=True)
63
+ health_insights = Column(Text, nullable=True)
64
+ ingredient_interactions = Column(Text, nullable=True)
65
+ key_takeaway = Column(Text, nullable=True)
66
  ingredients_count = Column(Integer, nullable=True)
67
  user_id = Column(Integer, nullable=True)
68
  timestamp = Column(DateTime, nullable=True)
69
+ ingredient_ids= Column(Text, nullable=True)
70
 
71
+ data_quality_warnings = Column(Text, nullable=True)
72
  markers: Mapped[List["Marker"]] = relationship(back_populates="product")
73
 
74
 
 
76
  __tablename__ = "users"
77
 
78
  id = Column(Integer, primary_key=True, index=True)
79
+ name = Column(String(255), unique=False, index=False, nullable=False)
80
+ email = Column(String(255), unique=True, index=True, nullable=False)
81
+ hashed_password = Column(String(255), nullable=False)
82
  is_active = Column(Boolean, default=True)
83
 
84
  # Relationships
 
98
 
99
  id = Column(Integer, primary_key=True, index=True)
100
  user_id = Column(Integer, ForeignKey("users.id", ondelete="CASCADE"))
101
+ dietary_restrictions = Column(String(255), nullable=True)
102
+ allergens = Column(Text, nullable=True)
103
+ preferred_ingredients = Column(Text, nullable=True)
104
+ disliked_ingredients = Column(Text, nullable=True)
105
 
106
  # Relationships
107
  user = relationship("User", back_populates="preferences")
 
115
  scan_date = Column(DateTime, default=datetime.now)
116
 
117
  # Relationships
118
+ user = relationship("User", back_populates="scan_history")
db/repositories.py CHANGED
@@ -1,6 +1,8 @@
1
  from sqlalchemy.orm import Session
2
  from sqlalchemy import cast, or_, String
3
  from sqlalchemy.dialects.postgresql import JSONB
 
 
4
  from . import models
5
  from interfaces.ingredientModels import IngredientAnalysisResult
6
  from interfaces.productModels import ProductCreate
@@ -14,6 +16,7 @@ class IngredientRepository:
14
  exact_match = self.db.query(models.Ingredient).filter(models.Ingredient.name.ilike(name)).first()
15
 
16
  if exact_match:
 
17
  return exact_match
18
 
19
  # If no exact match, try searching in alternate names
@@ -23,10 +26,12 @@ class IngredientRepository:
23
  models.Ingredient.alternate_names.cast(JSONB).op('?')(name)
24
  ).first()
25
 
 
 
 
26
  return alternate_match
27
  except Exception as e:
28
- from logger_manager import logger
29
- logger.error(f"Error searching alternate names: {e}")
30
  return None
31
 
32
  def get_all_ingredients(self, skip: int = 0, limit: int = 100):
@@ -90,11 +95,17 @@ class IngredientRepository:
90
  self.db.refresh(db_ingredient)
91
  return db_ingredient
92
  return None
 
93
  class ProductRepository:
94
  def __init__(self, db: Session):
95
  self.db = db
96
 
97
  def add_product(self, product_create: ProductCreate):
 
 
 
 
 
98
  db_product = models.Product(
99
  product_name=product_create.product_name,
100
  ingredients=product_create.ingredients,
@@ -113,4 +124,9 @@ class ProductRepository:
113
  self.db.add(db_product)
114
  self.db.commit()
115
  self.db.refresh(db_product)
116
- return db_product
 
 
 
 
 
 
1
  from sqlalchemy.orm import Session
2
  from sqlalchemy import cast, or_, String
3
  from sqlalchemy.dialects.postgresql import JSONB
4
+
5
+ from logger_manager import log_debug, log_error
6
  from . import models
7
  from interfaces.ingredientModels import IngredientAnalysisResult
8
  from interfaces.productModels import ProductCreate
 
16
  exact_match = self.db.query(models.Ingredient).filter(models.Ingredient.name.ilike(name)).first()
17
 
18
  if exact_match:
19
+ log_debug(f"Exact match found for ingredient: {name}")
20
  return exact_match
21
 
22
  # If no exact match, try searching in alternate names
 
26
  models.Ingredient.alternate_names.cast(JSONB).op('?')(name)
27
  ).first()
28
 
29
+ if alternate_match:
30
+ log_debug(f"Alternate match found for ingredient: {name}")
31
+
32
  return alternate_match
33
  except Exception as e:
34
+ log_error(f"Error searching alternate names: {e}",e)
 
35
  return None
36
 
37
  def get_all_ingredients(self, skip: int = 0, limit: int = 100):
 
95
  self.db.refresh(db_ingredient)
96
  return db_ingredient
97
  return None
98
+
99
  class ProductRepository:
100
  def __init__(self, db: Session):
101
  self.db = db
102
 
103
  def add_product(self, product_create: ProductCreate):
104
+ db_product = self._create_product(product_create)
105
+ self._store_analysis_data(db_product, product_create.ingredients_analysis)
106
+ return db_product
107
+
108
+ def _create_product(self, product_create: ProductCreate):
109
  db_product = models.Product(
110
  product_name=product_create.product_name,
111
  ingredients=product_create.ingredients,
 
124
  self.db.add(db_product)
125
  self.db.commit()
126
  self.db.refresh(db_product)
127
+ return db_product
128
+
129
+ def _store_analysis_data(self, db_product, ingredients_analysis):
130
+ db_product.ingredients_analysis = ingredients_analysis
131
+ self.db.commit()
132
+ self.db.refresh(db_product)
interfaces/ingredientModels.py CHANGED
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
5
  # Define a structured output model
6
  class IngredientAnalysisResult(BaseModel):
7
  name: str
 
8
  alternate_names: List[str] = Field(default_factory=list)
9
  is_found: bool = False
10
  safety_rating: int = 5
 
5
  # Define a structured output model
6
  class IngredientAnalysisResult(BaseModel):
7
  name: str
8
+ id: int
9
  alternate_names: List[str] = Field(default_factory=list)
10
  is_found: bool = False
11
  safety_rating: int = 5
logger_manager.py CHANGED
@@ -14,7 +14,7 @@ console_handler = logging.StreamHandler()
14
  console_handler.setLevel(logging.ERROR)
15
 
16
  # Create a formatter and set it for both handlers
17
- formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
18
  file_handler.setFormatter(formatter)
19
  console_handler.setFormatter(formatter)
20
 
@@ -31,8 +31,11 @@ def log_info(message: str):
31
  def log_warning(message: str):
32
  logger.warning(message)
33
 
34
- def log_error(message: str):
35
- logger.error(message)
 
 
 
36
 
37
  def log_critical(message: str):
38
  logger.critical(message)
 
14
  console_handler.setLevel(logging.ERROR)
15
 
16
  # Create a formatter and set it for both handlers
17
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(pathname)s:%(lineno)d")
18
  file_handler.setFormatter(formatter)
19
  console_handler.setFormatter(formatter)
20
 
 
31
  def log_warning(message: str):
32
  logger.warning(message)
33
 
34
+ def log_error(message: str, exc: Exception = None):
35
+ if exc:
36
+ logger.error(message, exc_info=True)
37
+ else:
38
+ logger.error(message)
39
 
40
  def log_critical(message: str):
41
  logger.critical(message)
main.py CHANGED
@@ -1,17 +1,54 @@
1
- from fastapi import FastAPI
 
 
 
2
  from routers.auth import router as auth_router
3
  from routers.analysis import router as analysis_router
4
  from routers.history import router as history_router
5
  from routers.product import router as product_router
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  app = FastAPI()
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  app.include_router(analysis_router, prefix="/api/analyze")
10
  app.include_router(auth_router, prefix="/api/auth")
11
  app.include_router(product_router, prefix="/api/product")
12
  app.include_router(history_router, prefix="/api/history")
13
 
 
 
14
  # To run the FastAPI app, use the command: uvicorn main:app --reload
15
  if __name__ == "__main__":
16
- import uvicorn
17
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
+ from fastapi import FastAPI, Request
2
+ from fastapi.responses import HTMLResponse, RedirectResponse
3
+ from fastapi.templating import Jinja2Templates
4
+ from fastapi.staticfiles import StaticFiles
5
  from routers.auth import router as auth_router
6
  from routers.analysis import router as analysis_router
7
  from routers.history import router as history_router
8
  from routers.product import router as product_router
9
+ from dotenv import load_dotenv
10
+ import os
11
+ import uvicorn
12
+ from pathlib import Path
13
+
14
+ load_dotenv()
15
+ # Load environment variables from .env file
16
+ PORT = os.getenv("PORT", 8000)
17
+
18
+ # Define the templates directory
19
+ templates = Jinja2Templates(directory="templates")
20
 
21
  app = FastAPI()
22
 
23
+ @app.get("/")
24
+ def read_root():
25
+ return RedirectResponse("/api")
26
+
27
+ # print every request data for request using middleware
28
+ @app.middleware("http")
29
+ async def log_requests(request: Request, call_next):
30
+ # Store the body content before sending to the next handler
31
+ body_content = await request.body()
32
+ # Create a new request with the consumed body
33
+ request._body = body_content
34
+ response = await call_next(request)
35
+ print(f"Request: {request.method} {request.url}")
36
+ print(f"Data: {body_content}")
37
+ print(f"Headers: {request.headers}")
38
+ return response
39
+
40
+ @app.get("/api", response_class=HTMLResponse)
41
+ async def read_api(request: Request):
42
+ return templates.TemplateResponse("api_docs.html", {"request": request})
43
+
44
  app.include_router(analysis_router, prefix="/api/analyze")
45
  app.include_router(auth_router, prefix="/api/auth")
46
  app.include_router(product_router, prefix="/api/product")
47
  app.include_router(history_router, prefix="/api/history")
48
 
49
+ app.add_event_handler("startup", lambda: print("Starting up..."))
50
+
51
  # To run the FastAPI app, use the command: uvicorn main:app --reload
52
  if __name__ == "__main__":
53
+ # run using fastapi directly for development purposes
54
+ uvicorn.run(app, host="0.0.0.0", port=PORT)
migrations/versions/00248bed0fb5_updated_product.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated product
2
+
3
+ Revision ID: 00248bed0fb5
4
+ Revises: a193e9cfa8c5
5
+ Create Date: 2025-04-27 13:26:01.243225
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+ from sqlalchemy.dialects import postgresql
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '00248bed0fb5'
16
+ down_revision: Union[str, None] = 'a193e9cfa8c5'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.add_column('products', sa.Column('overall_safety_score', sa.Integer(), nullable=True))
25
+ op.add_column('products', sa.Column('suitable_diet_types', sa.String(), nullable=True))
26
+ op.add_column('products', sa.Column('allergy_warnings', sa.JSON(), nullable=True))
27
+ op.add_column('products', sa.Column('usage_recommendations', sa.String(), nullable=True))
28
+ op.add_column('products', sa.Column('health_insights', sa.JSON(), nullable=True))
29
+ op.add_column('products', sa.Column('ingredient_interactions', sa.JSON(), nullable=True))
30
+ op.add_column('products', sa.Column('key_takeaway', sa.String(), nullable=True))
31
+ op.add_column('products', sa.Column('ingredients_count', sa.Integer(), nullable=True))
32
+ op.add_column('products', sa.Column('user_id', sa.Integer(), nullable=True))
33
+ op.add_column('products', sa.Column('timestamp', sa.DateTime(), nullable=True))
34
+ op.add_column('products', sa.Column('ingredient_ids', sa.JSON(), nullable=True))
35
+ op.drop_column('products', 'brands')
36
+ op.drop_column('products', 'ingredients_text')
37
+ op.drop_column('products', 'nutrient_levels')
38
+ op.drop_column('products', 'nutriments')
39
+ op.drop_column('products', 'nutriscore')
40
+ op.drop_column('products', 'generic_name')
41
+ # ### end Alembic commands ###
42
+
43
+
44
+ def downgrade() -> None:
45
+ """Downgrade schema."""
46
+ # ### commands auto generated by Alembic - please adjust! ###
47
+ op.add_column('products', sa.Column('generic_name', sa.VARCHAR(), autoincrement=False, nullable=True))
48
+ op.add_column('products', sa.Column('nutriscore', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True))
49
+ op.add_column('products', sa.Column('nutriments', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True))
50
+ op.add_column('products', sa.Column('nutrient_levels', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True))
51
+ op.add_column('products', sa.Column('ingredients_text', sa.VARCHAR(), autoincrement=False, nullable=True))
52
+ op.add_column('products', sa.Column('brands', sa.VARCHAR(), autoincrement=False, nullable=True))
53
+ op.drop_column('products', 'ingredient_ids')
54
+ op.drop_column('products', 'timestamp')
55
+ op.drop_column('products', 'user_id')
56
+ op.drop_column('products', 'ingredients_count')
57
+ op.drop_column('products', 'key_takeaway')
58
+ op.drop_column('products', 'ingredient_interactions')
59
+ op.drop_column('products', 'health_insights')
60
+ op.drop_column('products', 'usage_recommendations')
61
+ op.drop_column('products', 'allergy_warnings')
62
+ op.drop_column('products', 'suitable_diet_types')
63
+ op.drop_column('products', 'overall_safety_score')
64
+ # ### end Alembic commands ###
migrations/versions/37b8cf50d624_updated_db.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: 37b8cf50d624
4
+ Revises: a02cd287eec0
5
+ Create Date: 2025-05-09 11:09:49.317918
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '37b8cf50d624'
16
+ down_revision: Union[str, None] = 'a02cd287eec0'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('markers',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('image_name', sa.String(length=255), nullable=False),
27
+ sa.Column('vuforia_id', sa.String(length=255), nullable=False),
28
+ sa.Column('product_id', sa.Integer(), nullable=True),
29
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
30
+ sa.PrimaryKeyConstraint('id')
31
+ )
32
+ op.create_index(op.f('ix_markers_id'), 'markers', ['id'], unique=False)
33
+ op.create_table('scan_history',
34
+ sa.Column('id', sa.Integer(), nullable=False),
35
+ sa.Column('user_id', sa.Integer(), nullable=True),
36
+ sa.Column('product_id', sa.Integer(), nullable=True),
37
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
38
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
39
+ sa.PrimaryKeyConstraint('id')
40
+ )
41
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
42
+ op.create_table('user_preferences',
43
+ sa.Column('id', sa.Integer(), nullable=False),
44
+ sa.Column('user_id', sa.Integer(), nullable=True),
45
+ sa.Column('dietary_restrictions', sa.String(length=255), nullable=True),
46
+ sa.Column('allergens', sa.Text(), nullable=True),
47
+ sa.Column('preferred_ingredients', sa.Text(), nullable=True),
48
+ sa.Column('disliked_ingredients', sa.Text(), nullable=True),
49
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
50
+ sa.PrimaryKeyConstraint('id')
51
+ )
52
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
53
+ # ### end Alembic commands ###
54
+
55
+
56
+ def downgrade() -> None:
57
+ """Downgrade schema."""
58
+ # ### commands auto generated by Alembic - please adjust! ###
59
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
60
+ op.drop_table('user_preferences')
61
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
62
+ op.drop_table('scan_history')
63
+ op.drop_index(op.f('ix_markers_id'), table_name='markers')
64
+ op.drop_table('markers')
65
+ # ### end Alembic commands ###
migrations/versions/464bdc8c474f_updated_db.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: 464bdc8c474f
4
+ Revises: f4c75c169cf2
5
+ Create Date: 2025-05-09 10:33:17.799071
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '464bdc8c474f'
16
+ down_revision: Union[str, None] = 'f4c75c169cf2'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('ingredients',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('name', sa.String(length=255), nullable=True),
27
+ sa.Column('alternate_names', sa.JSON(), nullable=True),
28
+ sa.Column('safety_rating', sa.Integer(), nullable=True),
29
+ sa.Column('description', sa.Text(), nullable=True),
30
+ sa.Column('health_effects', sa.JSON(), nullable=True),
31
+ sa.Column('allergic_info', sa.JSON(), nullable=True),
32
+ sa.Column('diet_type', sa.String(length=255), nullable=True),
33
+ sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
34
+ sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
35
+ sa.PrimaryKeyConstraint('id')
36
+ )
37
+ op.create_index(op.f('ix_ingredients_id'), 'ingredients', ['id'], unique=False)
38
+ op.create_index(op.f('ix_ingredients_name'), 'ingredients', ['name'], unique=True)
39
+ op.create_table('products',
40
+ sa.Column('id', sa.Integer(), nullable=False),
41
+ sa.Column('product_name', sa.String(length=255), nullable=False),
42
+ sa.Column('ingredients', sa.JSON(), nullable=True),
43
+ sa.Column('ingredients_analysis', sa.JSON(), nullable=True),
44
+ sa.Column('overall_safety_score', sa.Integer(), nullable=True),
45
+ sa.Column('suitable_diet_types', sa.String(length=255), nullable=True),
46
+ sa.Column('allergy_warnings', sa.JSON(), nullable=True),
47
+ sa.Column('usage_recommendations', sa.Text(), nullable=True),
48
+ sa.Column('health_insights', sa.JSON(), nullable=True),
49
+ sa.Column('ingredient_interactions', sa.JSON(), nullable=True),
50
+ sa.Column('key_takeaway', sa.Text(), nullable=True),
51
+ sa.Column('ingredients_count', sa.Integer(), nullable=True),
52
+ sa.Column('user_id', sa.Integer(), nullable=True),
53
+ sa.Column('timestamp', sa.DateTime(), nullable=True),
54
+ sa.Column('ingredient_ids', sa.JSON(), nullable=True),
55
+ sa.Column('data_quality_warnings', sa.JSON(), nullable=True),
56
+ sa.PrimaryKeyConstraint('id')
57
+ )
58
+ op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=False)
59
+ op.create_table('users',
60
+ sa.Column('id', sa.Integer(), nullable=False),
61
+ sa.Column('name', sa.String(length=255), nullable=False),
62
+ sa.Column('email', sa.String(length=255), nullable=False),
63
+ sa.Column('hashed_password', sa.String(length=255), nullable=False),
64
+ sa.Column('is_active', sa.Boolean(), nullable=True),
65
+ sa.PrimaryKeyConstraint('id')
66
+ )
67
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
68
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
69
+ op.create_table('ingredient_sources',
70
+ sa.Column('id', sa.Integer(), nullable=False),
71
+ sa.Column('ingredient_id', sa.Integer(), nullable=True),
72
+ sa.Column('source_name', sa.String(length=255), nullable=False),
73
+ sa.Column('found', sa.Boolean(), nullable=True),
74
+ sa.Column('summary', sa.Text(), nullable=True),
75
+ sa.Column('data', sa.JSON(), nullable=True),
76
+ sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),
77
+ sa.PrimaryKeyConstraint('id')
78
+ )
79
+ op.create_index(op.f('ix_ingredient_sources_id'), 'ingredient_sources', ['id'], unique=False)
80
+ op.create_table('markers',
81
+ sa.Column('id', sa.Integer(), nullable=False),
82
+ sa.Column('image_name', sa.String(), nullable=False),
83
+ sa.Column('vuforia_id', sa.String(), nullable=False),
84
+ sa.Column('product_id', sa.Integer(), nullable=False),
85
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
86
+ sa.PrimaryKeyConstraint('id')
87
+ )
88
+ op.create_table('scan_history',
89
+ sa.Column('id', sa.Integer(), nullable=False),
90
+ sa.Column('user_id', sa.Integer(), nullable=True),
91
+ sa.Column('product_id', sa.Integer(), nullable=True),
92
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
93
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
94
+ sa.PrimaryKeyConstraint('id')
95
+ )
96
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
97
+ op.create_table('user_preferences',
98
+ sa.Column('id', sa.Integer(), nullable=False),
99
+ sa.Column('user_id', sa.Integer(), nullable=True),
100
+ sa.Column('dietary_restrictions', sa.String(length=255), nullable=True),
101
+ sa.Column('allergens', sa.Text(), nullable=True),
102
+ sa.Column('preferred_ingredients', sa.Text(), nullable=True),
103
+ sa.Column('disliked_ingredients', sa.Text(), nullable=True),
104
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
105
+ sa.PrimaryKeyConstraint('id')
106
+ )
107
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
108
+ # ### end Alembic commands ###
109
+
110
+
111
+ def downgrade() -> None:
112
+ """Downgrade schema."""
113
+ # ### commands auto generated by Alembic - please adjust! ###
114
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
115
+ op.drop_table('user_preferences')
116
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
117
+ op.drop_table('scan_history')
118
+ op.drop_table('markers')
119
+ op.drop_index(op.f('ix_ingredient_sources_id'), table_name='ingredient_sources')
120
+ op.drop_table('ingredient_sources')
121
+ op.drop_index(op.f('ix_users_id'), table_name='users')
122
+ op.drop_index(op.f('ix_users_email'), table_name='users')
123
+ op.drop_table('users')
124
+ op.drop_index(op.f('ix_products_id'), table_name='products')
125
+ op.drop_table('products')
126
+ op.drop_index(op.f('ix_ingredients_name'), table_name='ingredients')
127
+ op.drop_index(op.f('ix_ingredients_id'), table_name='ingredients')
128
+ op.drop_table('ingredients')
129
+ # ### end Alembic commands ###
migrations/versions/50e02fec4154_updated_db.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: 50e02fec4154
4
+ Revises: 00248bed0fb5
5
+ Create Date: 2025-05-09 09:59:35.961528
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '50e02fec4154'
16
+ down_revision: Union[str, None] = '00248bed0fb5'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ pass
25
+ # ### end Alembic commands ###
26
+
27
+
28
+ def downgrade() -> None:
29
+ """Downgrade schema."""
30
+ # ### commands auto generated by Alembic - please adjust! ###
31
+ pass
32
+ # ### end Alembic commands ###
migrations/versions/579f1cd42a8f_updated_db.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: 579f1cd42a8f
4
+ Revises: f8aa9e5882a2
5
+ Create Date: 2025-05-09 10:40:30.132328
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '579f1cd42a8f'
16
+ down_revision: Union[str, None] = 'f8aa9e5882a2'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('ingredients',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('name', sa.String(length=255), nullable=True),
27
+ sa.Column('alternate_names', sa.Text(), nullable=True),
28
+ sa.Column('safety_rating', sa.Integer(), nullable=True),
29
+ sa.Column('description', sa.Text(), nullable=True),
30
+ sa.Column('health_effects', sa.Text(), nullable=True),
31
+ sa.Column('allergic_info', sa.Text(), nullable=True),
32
+ sa.Column('diet_type', sa.String(length=255), nullable=True),
33
+ sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
34
+ sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
35
+ sa.PrimaryKeyConstraint('id')
36
+ )
37
+ op.create_index(op.f('ix_ingredients_id'), 'ingredients', ['id'], unique=False)
38
+ op.create_index(op.f('ix_ingredients_name'), 'ingredients', ['name'], unique=True)
39
+ op.create_table('products',
40
+ sa.Column('id', sa.Integer(), nullable=False),
41
+ sa.Column('product_name', sa.String(length=255), nullable=False),
42
+ sa.Column('ingredients', sa.Text(), nullable=True),
43
+ sa.Column('ingredients_analysis', sa.Text(), nullable=True),
44
+ sa.Column('overall_safety_score', sa.Integer(), nullable=True),
45
+ sa.Column('suitable_diet_types', sa.String(length=255), nullable=True),
46
+ sa.Column('allergy_warnings', sa.Text(), nullable=True),
47
+ sa.Column('usage_recommendations', sa.Text(), nullable=True),
48
+ sa.Column('health_insights', sa.Text(), nullable=True),
49
+ sa.Column('ingredient_interactions', sa.Text(), nullable=True),
50
+ sa.Column('key_takeaway', sa.Text(), nullable=True),
51
+ sa.Column('ingredients_count', sa.Integer(), nullable=True),
52
+ sa.Column('user_id', sa.Integer(), nullable=True),
53
+ sa.Column('timestamp', sa.DateTime(), nullable=True),
54
+ sa.Column('ingredient_ids', sa.Text(), nullable=True),
55
+ sa.Column('data_quality_warnings', sa.Text(), nullable=True),
56
+ sa.PrimaryKeyConstraint('id')
57
+ )
58
+ op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=False)
59
+ op.create_table('users',
60
+ sa.Column('id', sa.Integer(), nullable=False),
61
+ sa.Column('name', sa.String(length=255), nullable=False),
62
+ sa.Column('email', sa.String(length=255), nullable=False),
63
+ sa.Column('hashed_password', sa.String(length=255), nullable=False),
64
+ sa.Column('is_active', sa.Boolean(), nullable=True),
65
+ sa.PrimaryKeyConstraint('id')
66
+ )
67
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
68
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
69
+ op.create_table('ingredient_sources',
70
+ sa.Column('id', sa.Integer(), nullable=False),
71
+ sa.Column('ingredient_id', sa.Integer(), nullable=True),
72
+ sa.Column('source_name', sa.String(length=255), nullable=False),
73
+ sa.Column('found', sa.Boolean(), nullable=True),
74
+ sa.Column('summary', sa.Text(), nullable=True),
75
+ sa.Column('data', sa.Text(), nullable=True),
76
+ sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),
77
+ sa.PrimaryKeyConstraint('id')
78
+ )
79
+ op.create_index(op.f('ix_ingredient_sources_id'), 'ingredient_sources', ['id'], unique=False)
80
+ op.create_table('markers',
81
+ sa.Column('id', sa.Integer(), nullable=False),
82
+ sa.Column('image_name', sa.String(), nullable=False),
83
+ sa.Column('vuforia_id', sa.String(), nullable=False),
84
+ sa.Column('product_id', sa.Integer(), nullable=False),
85
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
86
+ sa.PrimaryKeyConstraint('id')
87
+ )
88
+ op.create_table('scan_history',
89
+ sa.Column('id', sa.Integer(), nullable=False),
90
+ sa.Column('user_id', sa.Integer(), nullable=True),
91
+ sa.Column('product_id', sa.Integer(), nullable=True),
92
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
93
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
94
+ sa.PrimaryKeyConstraint('id')
95
+ )
96
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
97
+ op.create_table('user_preferences',
98
+ sa.Column('id', sa.Integer(), nullable=False),
99
+ sa.Column('user_id', sa.Integer(), nullable=True),
100
+ sa.Column('dietary_restrictions', sa.String(length=255), nullable=True),
101
+ sa.Column('allergens', sa.Text(), nullable=True),
102
+ sa.Column('preferred_ingredients', sa.Text(), nullable=True),
103
+ sa.Column('disliked_ingredients', sa.Text(), nullable=True),
104
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
105
+ sa.PrimaryKeyConstraint('id')
106
+ )
107
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
108
+ # ### end Alembic commands ###
109
+
110
+
111
+ def downgrade() -> None:
112
+ """Downgrade schema."""
113
+ # ### commands auto generated by Alembic - please adjust! ###
114
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
115
+ op.drop_table('user_preferences')
116
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
117
+ op.drop_table('scan_history')
118
+ op.drop_table('markers')
119
+ op.drop_index(op.f('ix_ingredient_sources_id'), table_name='ingredient_sources')
120
+ op.drop_table('ingredient_sources')
121
+ op.drop_index(op.f('ix_users_id'), table_name='users')
122
+ op.drop_index(op.f('ix_users_email'), table_name='users')
123
+ op.drop_table('users')
124
+ op.drop_index(op.f('ix_products_id'), table_name='products')
125
+ op.drop_table('products')
126
+ op.drop_index(op.f('ix_ingredients_name'), table_name='ingredients')
127
+ op.drop_index(op.f('ix_ingredients_id'), table_name='ingredients')
128
+ op.drop_table('ingredients')
129
+ # ### end Alembic commands ###
migrations/versions/6ea4e5e4de4d_updated_db.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: 6ea4e5e4de4d
4
+ Revises: 579f1cd42a8f
5
+ Create Date: 2025-05-09 10:48:51.119673
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '6ea4e5e4de4d'
16
+ down_revision: Union[str, None] = '579f1cd42a8f'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('ingredients',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('name', sa.String(length=255), nullable=True),
27
+ sa.Column('alternate_names', sa.Text(), nullable=True),
28
+ sa.Column('safety_rating', sa.Integer(), nullable=True),
29
+ sa.Column('description', sa.Text(), nullable=True),
30
+ sa.Column('health_effects', sa.Text(), nullable=True),
31
+ sa.Column('allergic_info', sa.Text(), nullable=True),
32
+ sa.Column('diet_type', sa.String(length=255), nullable=True),
33
+ sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('NOW()'), nullable=True),
34
+ sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('NOW()'), nullable=True),
35
+ sa.PrimaryKeyConstraint('id')
36
+ )
37
+ op.create_index(op.f('ix_ingredients_id'), 'ingredients', ['id'], unique=False)
38
+ op.create_index(op.f('ix_ingredients_name'), 'ingredients', ['name'], unique=True)
39
+ op.create_table('products',
40
+ sa.Column('id', sa.Integer(), nullable=False),
41
+ sa.Column('product_name', sa.String(length=255), nullable=False),
42
+ sa.Column('ingredients', sa.Text(), nullable=True),
43
+ sa.Column('ingredients_analysis', sa.Text(), nullable=True),
44
+ sa.Column('overall_safety_score', sa.Integer(), nullable=True),
45
+ sa.Column('suitable_diet_types', sa.String(length=255), nullable=True),
46
+ sa.Column('allergy_warnings', sa.Text(), nullable=True),
47
+ sa.Column('usage_recommendations', sa.Text(), nullable=True),
48
+ sa.Column('health_insights', sa.Text(), nullable=True),
49
+ sa.Column('ingredient_interactions', sa.Text(), nullable=True),
50
+ sa.Column('key_takeaway', sa.Text(), nullable=True),
51
+ sa.Column('ingredients_count', sa.Integer(), nullable=True),
52
+ sa.Column('user_id', sa.Integer(), nullable=True),
53
+ sa.Column('timestamp', sa.DateTime(), nullable=True),
54
+ sa.Column('ingredient_ids', sa.Text(), nullable=True),
55
+ sa.Column('data_quality_warnings', sa.Text(), nullable=True),
56
+ sa.PrimaryKeyConstraint('id')
57
+ )
58
+ op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=False)
59
+ op.create_table('users',
60
+ sa.Column('id', sa.Integer(), nullable=False),
61
+ sa.Column('name', sa.String(length=255), nullable=False),
62
+ sa.Column('email', sa.String(length=255), nullable=False),
63
+ sa.Column('hashed_password', sa.String(length=255), nullable=False),
64
+ sa.Column('is_active', sa.Boolean(), nullable=True),
65
+ sa.PrimaryKeyConstraint('id')
66
+ )
67
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
68
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
69
+ op.create_table('ingredient_sources',
70
+ sa.Column('id', sa.Integer(), nullable=False),
71
+ sa.Column('ingredient_id', sa.Integer(), nullable=True),
72
+ sa.Column('source_name', sa.String(length=255), nullable=False),
73
+ sa.Column('found', sa.Boolean(), nullable=True),
74
+ sa.Column('summary', sa.Text(), nullable=True),
75
+ sa.Column('data', sa.Text(), nullable=True),
76
+ sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),
77
+ sa.PrimaryKeyConstraint('id')
78
+ )
79
+ op.create_index(op.f('ix_ingredient_sources_id'), 'ingredient_sources', ['id'], unique=False)
80
+ op.create_table('markers',
81
+ sa.Column('id', sa.Integer(), nullable=False),
82
+ sa.Column('image_name', sa.String(), nullable=False),
83
+ sa.Column('vuforia_id', sa.String(), nullable=False),
84
+ sa.Column('product_id', sa.Integer(), nullable=False),
85
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
86
+ sa.PrimaryKeyConstraint('id')
87
+ )
88
+ op.create_table('scan_history',
89
+ sa.Column('id', sa.Integer(), nullable=False),
90
+ sa.Column('user_id', sa.Integer(), nullable=True),
91
+ sa.Column('product_id', sa.Integer(), nullable=True),
92
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
93
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
94
+ sa.PrimaryKeyConstraint('id')
95
+ )
96
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
97
+ op.create_table('user_preferences',
98
+ sa.Column('id', sa.Integer(), nullable=False),
99
+ sa.Column('user_id', sa.Integer(), nullable=True),
100
+ sa.Column('dietary_restrictions', sa.String(length=255), nullable=True),
101
+ sa.Column('allergens', sa.Text(), nullable=True),
102
+ sa.Column('preferred_ingredients', sa.Text(), nullable=True),
103
+ sa.Column('disliked_ingredients', sa.Text(), nullable=True),
104
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
105
+ sa.PrimaryKeyConstraint('id')
106
+ )
107
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
108
+ # ### end Alembic commands ###
109
+
110
+
111
+ def downgrade() -> None:
112
+ """Downgrade schema."""
113
+ # ### commands auto generated by Alembic - please adjust! ###
114
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
115
+ op.drop_table('user_preferences')
116
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
117
+ op.drop_table('scan_history')
118
+ op.drop_table('markers')
119
+ op.drop_index(op.f('ix_ingredient_sources_id'), table_name='ingredient_sources')
120
+ op.drop_table('ingredient_sources')
121
+ op.drop_index(op.f('ix_users_id'), table_name='users')
122
+ op.drop_index(op.f('ix_users_email'), table_name='users')
123
+ op.drop_table('users')
124
+ op.drop_index(op.f('ix_products_id'), table_name='products')
125
+ op.drop_table('products')
126
+ op.drop_index(op.f('ix_ingredients_name'), table_name='ingredients')
127
+ op.drop_index(op.f('ix_ingredients_id'), table_name='ingredients')
128
+ op.drop_table('ingredients')
129
+ # ### end Alembic commands ###
migrations/versions/a02cd287eec0_updated_db.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: a02cd287eec0
4
+ Revises: 6ea4e5e4de4d
5
+ Create Date: 2025-05-09 11:03:43.504573
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = 'a02cd287eec0'
16
+ down_revision: Union[str, None] = '6ea4e5e4de4d'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('ingredients',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('name', sa.String(length=255), nullable=True),
27
+ sa.Column('alternate_names', sa.Text(), nullable=True),
28
+ sa.Column('safety_rating', sa.Integer(), nullable=True),
29
+ sa.Column('description', sa.Text(), nullable=True),
30
+ sa.Column('health_effects', sa.Text(), nullable=True),
31
+ sa.Column('allergic_info', sa.Text(), nullable=True),
32
+ sa.Column('diet_type', sa.String(length=255), nullable=True),
33
+ sa.Column('created_at', sa.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),
34
+ sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
35
+ sa.PrimaryKeyConstraint('id')
36
+ )
37
+ op.create_index(op.f('ix_ingredients_id'), 'ingredients', ['id'], unique=False)
38
+ op.create_index(op.f('ix_ingredients_name'), 'ingredients', ['name'], unique=True)
39
+ op.create_table('products',
40
+ sa.Column('id', sa.Integer(), nullable=False),
41
+ sa.Column('product_name', sa.String(length=255), nullable=False),
42
+ sa.Column('ingredients', sa.Text(), nullable=True),
43
+ sa.Column('ingredients_analysis', sa.Text(), nullable=True),
44
+ sa.Column('overall_safety_score', sa.Integer(), nullable=True),
45
+ sa.Column('suitable_diet_types', sa.String(length=255), nullable=True),
46
+ sa.Column('allergy_warnings', sa.Text(), nullable=True),
47
+ sa.Column('usage_recommendations', sa.Text(), nullable=True),
48
+ sa.Column('health_insights', sa.Text(), nullable=True),
49
+ sa.Column('ingredient_interactions', sa.Text(), nullable=True),
50
+ sa.Column('key_takeaway', sa.Text(), nullable=True),
51
+ sa.Column('ingredients_count', sa.Integer(), nullable=True),
52
+ sa.Column('user_id', sa.Integer(), nullable=True),
53
+ sa.Column('timestamp', sa.DateTime(), nullable=True),
54
+ sa.Column('ingredient_ids', sa.Text(), nullable=True),
55
+ sa.Column('data_quality_warnings', sa.Text(), nullable=True),
56
+ sa.PrimaryKeyConstraint('id')
57
+ )
58
+ op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=False)
59
+ op.create_table('users',
60
+ sa.Column('id', sa.Integer(), nullable=False),
61
+ sa.Column('name', sa.String(length=255), nullable=False),
62
+ sa.Column('email', sa.String(length=255), nullable=False),
63
+ sa.Column('hashed_password', sa.String(length=255), nullable=False),
64
+ sa.Column('is_active', sa.Boolean(), nullable=True),
65
+ sa.PrimaryKeyConstraint('id')
66
+ )
67
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
68
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
69
+ op.create_table('ingredient_sources',
70
+ sa.Column('id', sa.Integer(), nullable=False),
71
+ sa.Column('ingredient_id', sa.Integer(), nullable=True),
72
+ sa.Column('source_name', sa.String(length=255), nullable=False),
73
+ sa.Column('found', sa.Boolean(), nullable=True),
74
+ sa.Column('summary', sa.Text(), nullable=True),
75
+ sa.Column('data', sa.Text(), nullable=True),
76
+ sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),
77
+ sa.PrimaryKeyConstraint('id')
78
+ )
79
+ op.create_index(op.f('ix_ingredient_sources_id'), 'ingredient_sources', ['id'], unique=False)
80
+ op.create_table('markers',
81
+ sa.Column('id', sa.Integer(), nullable=False),
82
+ sa.Column('image_name', sa.String(), nullable=False),
83
+ sa.Column('vuforia_id', sa.String(), nullable=False),
84
+ sa.Column('product_id', sa.Integer(), nullable=False),
85
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
86
+ sa.PrimaryKeyConstraint('id')
87
+ )
88
+ op.create_table('scan_history',
89
+ sa.Column('id', sa.Integer(), nullable=False),
90
+ sa.Column('user_id', sa.Integer(), nullable=True),
91
+ sa.Column('product_id', sa.Integer(), nullable=True),
92
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
93
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
94
+ sa.PrimaryKeyConstraint('id')
95
+ )
96
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
97
+ op.create_table('user_preferences',
98
+ sa.Column('id', sa.Integer(), nullable=False),
99
+ sa.Column('user_id', sa.Integer(), nullable=True),
100
+ sa.Column('dietary_restrictions', sa.String(length=255), nullable=True),
101
+ sa.Column('allergens', sa.Text(), nullable=True),
102
+ sa.Column('preferred_ingredients', sa.Text(), nullable=True),
103
+ sa.Column('disliked_ingredients', sa.Text(), nullable=True),
104
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
105
+ sa.PrimaryKeyConstraint('id')
106
+ )
107
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
108
+ # ### end Alembic commands ###
109
+
110
+
111
+ def downgrade() -> None:
112
+ """Downgrade schema."""
113
+ # ### commands auto generated by Alembic - please adjust! ###
114
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
115
+ op.drop_table('user_preferences')
116
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
117
+ op.drop_table('scan_history')
118
+ op.drop_table('markers')
119
+ op.drop_index(op.f('ix_ingredient_sources_id'), table_name='ingredient_sources')
120
+ op.drop_table('ingredient_sources')
121
+ op.drop_index(op.f('ix_users_id'), table_name='users')
122
+ op.drop_index(op.f('ix_users_email'), table_name='users')
123
+ op.drop_table('users')
124
+ op.drop_index(op.f('ix_products_id'), table_name='products')
125
+ op.drop_table('products')
126
+ op.drop_index(op.f('ix_ingredients_name'), table_name='ingredients')
127
+ op.drop_index(op.f('ix_ingredients_id'), table_name='ingredients')
128
+ op.drop_table('ingredients')
129
+ # ### end Alembic commands ###
migrations/versions/a193e9cfa8c5_added_product_and_vuforia_realted.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """added product and vuforia realted
2
+
3
+ Revision ID: a193e9cfa8c5
4
+ Revises: b640d22a64f5
5
+ Create Date: 2025-04-27 10:47:48.941202
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = 'a193e9cfa8c5'
16
+ down_revision: Union[str, None] = 'b640d22a64f5'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('markers',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('image_name', sa.String(), nullable=False),
27
+ sa.Column('vuforia_id', sa.String(), nullable=False),
28
+ sa.Column('product_id', sa.Integer(), nullable=False),
29
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
30
+ sa.PrimaryKeyConstraint('id')
31
+ )
32
+ # ### end Alembic commands ###
33
+
34
+
35
+ def downgrade() -> None:
36
+ """Downgrade schema."""
37
+ # ### commands auto generated by Alembic - please adjust! ###
38
+ op.drop_table('markers')
39
+ # ### end Alembic commands ###
migrations/versions/f4c75c169cf2_updated_db.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: f4c75c169cf2
4
+ Revises: 50e02fec4154
5
+ Create Date: 2025-05-09 10:27:47.732746
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = 'f4c75c169cf2'
16
+ down_revision: Union[str, None] = '50e02fec4154'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('ingredients',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('name', sa.String(), nullable=True),
27
+ sa.Column('alternate_names', sa.JSON(), nullable=True),
28
+ sa.Column('safety_rating', sa.Integer(), nullable=True),
29
+ sa.Column('description', sa.Text(), nullable=True),
30
+ sa.Column('health_effects', sa.JSON(), nullable=True),
31
+ sa.Column('allergic_info', sa.JSON(), nullable=True),
32
+ sa.Column('diet_type', sa.String(), nullable=True),
33
+ sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
34
+ sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
35
+ sa.PrimaryKeyConstraint('id')
36
+ )
37
+ op.create_index(op.f('ix_ingredients_id'), 'ingredients', ['id'], unique=False)
38
+ op.create_index(op.f('ix_ingredients_name'), 'ingredients', ['name'], unique=True)
39
+ op.create_table('products',
40
+ sa.Column('id', sa.Integer(), nullable=False),
41
+ sa.Column('product_name', sa.String(), nullable=False),
42
+ sa.Column('ingredients', sa.JSON(), nullable=True),
43
+ sa.Column('ingredients_analysis', sa.JSON(), nullable=True),
44
+ sa.Column('overall_safety_score', sa.Integer(), nullable=True),
45
+ sa.Column('suitable_diet_types', sa.String(), nullable=True),
46
+ sa.Column('allergy_warnings', sa.JSON(), nullable=True),
47
+ sa.Column('usage_recommendations', sa.String(), nullable=True),
48
+ sa.Column('health_insights', sa.JSON(), nullable=True),
49
+ sa.Column('ingredient_interactions', sa.JSON(), nullable=True),
50
+ sa.Column('key_takeaway', sa.String(), nullable=True),
51
+ sa.Column('ingredients_count', sa.Integer(), nullable=True),
52
+ sa.Column('user_id', sa.Integer(), nullable=True),
53
+ sa.Column('timestamp', sa.DateTime(), nullable=True),
54
+ sa.Column('ingredient_ids', sa.JSON(), nullable=True),
55
+ sa.Column('data_quality_warnings', sa.JSON(), nullable=True),
56
+ sa.PrimaryKeyConstraint('id')
57
+ )
58
+ op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=False)
59
+ op.create_table('users',
60
+ sa.Column('id', sa.Integer(), nullable=False),
61
+ sa.Column('name', sa.String(), nullable=False),
62
+ sa.Column('email', sa.String(), nullable=False),
63
+ sa.Column('hashed_password', sa.String(), nullable=False),
64
+ sa.Column('is_active', sa.Boolean(), nullable=True),
65
+ sa.PrimaryKeyConstraint('id')
66
+ )
67
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
68
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
69
+ op.create_table('ingredient_sources',
70
+ sa.Column('id', sa.Integer(), nullable=False),
71
+ sa.Column('ingredient_id', sa.Integer(), nullable=True),
72
+ sa.Column('source_name', sa.String(), nullable=False),
73
+ sa.Column('found', sa.Boolean(), nullable=True),
74
+ sa.Column('summary', sa.Text(), nullable=True),
75
+ sa.Column('data', sa.JSON(), nullable=True),
76
+ sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),
77
+ sa.PrimaryKeyConstraint('id')
78
+ )
79
+ op.create_index(op.f('ix_ingredient_sources_id'), 'ingredient_sources', ['id'], unique=False)
80
+ op.create_table('markers',
81
+ sa.Column('id', sa.Integer(), nullable=False),
82
+ sa.Column('image_name', sa.String(), nullable=False),
83
+ sa.Column('vuforia_id', sa.String(), nullable=False),
84
+ sa.Column('product_id', sa.Integer(), nullable=False),
85
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
86
+ sa.PrimaryKeyConstraint('id')
87
+ )
88
+ op.create_table('scan_history',
89
+ sa.Column('id', sa.Integer(), nullable=False),
90
+ sa.Column('user_id', sa.Integer(), nullable=True),
91
+ sa.Column('product_id', sa.Integer(), nullable=True),
92
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
93
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
94
+ sa.PrimaryKeyConstraint('id')
95
+ )
96
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
97
+ op.create_table('user_preferences',
98
+ sa.Column('id', sa.Integer(), nullable=False),
99
+ sa.Column('user_id', sa.Integer(), nullable=True),
100
+ sa.Column('dietary_restrictions', sa.String(), nullable=True),
101
+ sa.Column('allergens', sa.String(), nullable=True),
102
+ sa.Column('preferred_ingredients', sa.String(), nullable=True),
103
+ sa.Column('disliked_ingredients', sa.String(), nullable=True),
104
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
105
+ sa.PrimaryKeyConstraint('id')
106
+ )
107
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
108
+ # ### end Alembic commands ###
109
+
110
+
111
+ def downgrade() -> None:
112
+ """Downgrade schema."""
113
+ # ### commands auto generated by Alembic - please adjust! ###
114
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
115
+ op.drop_table('user_preferences')
116
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
117
+ op.drop_table('scan_history')
118
+ op.drop_table('markers')
119
+ op.drop_index(op.f('ix_ingredient_sources_id'), table_name='ingredient_sources')
120
+ op.drop_table('ingredient_sources')
121
+ op.drop_index(op.f('ix_users_id'), table_name='users')
122
+ op.drop_index(op.f('ix_users_email'), table_name='users')
123
+ op.drop_table('users')
124
+ op.drop_index(op.f('ix_products_id'), table_name='products')
125
+ op.drop_table('products')
126
+ op.drop_index(op.f('ix_ingredients_name'), table_name='ingredients')
127
+ op.drop_index(op.f('ix_ingredients_id'), table_name='ingredients')
128
+ op.drop_table('ingredients')
129
+ # ### end Alembic commands ###
migrations/versions/f8aa9e5882a2_updated_db.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """updated DB
2
+
3
+ Revision ID: f8aa9e5882a2
4
+ Revises: 464bdc8c474f
5
+ Create Date: 2025-05-09 10:36:18.602936
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = 'f8aa9e5882a2'
16
+ down_revision: Union[str, None] = '464bdc8c474f'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('ingredients',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('name', sa.String(length=255), nullable=True),
27
+ sa.Column('alternate_names', sa.Text(), nullable=True),
28
+ sa.Column('safety_rating', sa.Integer(), nullable=True),
29
+ sa.Column('description', sa.Text(), nullable=True),
30
+ sa.Column('health_effects', sa.Text(), nullable=True),
31
+ sa.Column('allergic_info', sa.Text(), nullable=True),
32
+ sa.Column('diet_type', sa.String(length=255), nullable=True),
33
+ sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
34
+ sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
35
+ sa.PrimaryKeyConstraint('id')
36
+ )
37
+ op.create_index(op.f('ix_ingredients_id'), 'ingredients', ['id'], unique=False)
38
+ op.create_index(op.f('ix_ingredients_name'), 'ingredients', ['name'], unique=True)
39
+ op.create_table('products',
40
+ sa.Column('id', sa.Integer(), nullable=False),
41
+ sa.Column('product_name', sa.String(length=255), nullable=False),
42
+ sa.Column('ingredients', sa.Text(), nullable=True),
43
+ sa.Column('ingredients_analysis', sa.Text(), nullable=True),
44
+ sa.Column('overall_safety_score', sa.Integer(), nullable=True),
45
+ sa.Column('suitable_diet_types', sa.String(length=255), nullable=True),
46
+ sa.Column('allergy_warnings', sa.Text(), nullable=True),
47
+ sa.Column('usage_recommendations', sa.Text(), nullable=True),
48
+ sa.Column('health_insights', sa.Text(), nullable=True),
49
+ sa.Column('ingredient_interactions', sa.Text(), nullable=True),
50
+ sa.Column('key_takeaway', sa.Text(), nullable=True),
51
+ sa.Column('ingredients_count', sa.Integer(), nullable=True),
52
+ sa.Column('user_id', sa.Integer(), nullable=True),
53
+ sa.Column('timestamp', sa.DateTime(), nullable=True),
54
+ sa.Column('ingredient_ids', sa.Text(), nullable=True),
55
+ sa.Column('data_quality_warnings', sa.Text(), nullable=True),
56
+ sa.PrimaryKeyConstraint('id')
57
+ )
58
+ op.create_index(op.f('ix_products_id'), 'products', ['id'], unique=False)
59
+ op.create_table('users',
60
+ sa.Column('id', sa.Integer(), nullable=False),
61
+ sa.Column('name', sa.String(length=255), nullable=False),
62
+ sa.Column('email', sa.String(length=255), nullable=False),
63
+ sa.Column('hashed_password', sa.String(length=255), nullable=False),
64
+ sa.Column('is_active', sa.Boolean(), nullable=True),
65
+ sa.PrimaryKeyConstraint('id')
66
+ )
67
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
68
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
69
+ op.create_table('ingredient_sources',
70
+ sa.Column('id', sa.Integer(), nullable=False),
71
+ sa.Column('ingredient_id', sa.Integer(), nullable=True),
72
+ sa.Column('source_name', sa.String(length=255), nullable=False),
73
+ sa.Column('found', sa.Boolean(), nullable=True),
74
+ sa.Column('summary', sa.Text(), nullable=True),
75
+ sa.Column('data', sa.Text(), nullable=True),
76
+ sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),
77
+ sa.PrimaryKeyConstraint('id')
78
+ )
79
+ op.create_index(op.f('ix_ingredient_sources_id'), 'ingredient_sources', ['id'], unique=False)
80
+ op.create_table('markers',
81
+ sa.Column('id', sa.Integer(), nullable=False),
82
+ sa.Column('image_name', sa.String(), nullable=False),
83
+ sa.Column('vuforia_id', sa.String(), nullable=False),
84
+ sa.Column('product_id', sa.Integer(), nullable=False),
85
+ sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
86
+ sa.PrimaryKeyConstraint('id')
87
+ )
88
+ op.create_table('scan_history',
89
+ sa.Column('id', sa.Integer(), nullable=False),
90
+ sa.Column('user_id', sa.Integer(), nullable=True),
91
+ sa.Column('product_id', sa.Integer(), nullable=True),
92
+ sa.Column('scan_date', sa.DateTime(), nullable=True),
93
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
94
+ sa.PrimaryKeyConstraint('id')
95
+ )
96
+ op.create_index(op.f('ix_scan_history_id'), 'scan_history', ['id'], unique=False)
97
+ op.create_table('user_preferences',
98
+ sa.Column('id', sa.Integer(), nullable=False),
99
+ sa.Column('user_id', sa.Integer(), nullable=True),
100
+ sa.Column('dietary_restrictions', sa.String(length=255), nullable=True),
101
+ sa.Column('allergens', sa.Text(), nullable=True),
102
+ sa.Column('preferred_ingredients', sa.Text(), nullable=True),
103
+ sa.Column('disliked_ingredients', sa.Text(), nullable=True),
104
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
105
+ sa.PrimaryKeyConstraint('id')
106
+ )
107
+ op.create_index(op.f('ix_user_preferences_id'), 'user_preferences', ['id'], unique=False)
108
+ # ### end Alembic commands ###
109
+
110
+
111
+ def downgrade() -> None:
112
+ """Downgrade schema."""
113
+ # ### commands auto generated by Alembic - please adjust! ###
114
+ op.drop_index(op.f('ix_user_preferences_id'), table_name='user_preferences')
115
+ op.drop_table('user_preferences')
116
+ op.drop_index(op.f('ix_scan_history_id'), table_name='scan_history')
117
+ op.drop_table('scan_history')
118
+ op.drop_table('markers')
119
+ op.drop_index(op.f('ix_ingredient_sources_id'), table_name='ingredient_sources')
120
+ op.drop_table('ingredient_sources')
121
+ op.drop_index(op.f('ix_users_id'), table_name='users')
122
+ op.drop_index(op.f('ix_users_email'), table_name='users')
123
+ op.drop_table('users')
124
+ op.drop_index(op.f('ix_products_id'), table_name='products')
125
+ op.drop_table('products')
126
+ op.drop_index(op.f('ix_ingredients_name'), table_name='ingredients')
127
+ op.drop_index(op.f('ix_ingredients_id'), table_name='ingredients')
128
+ op.drop_table('ingredients')
129
+ # ### end Alembic commands ###
requirements.txt CHANGED
@@ -2,11 +2,14 @@
2
  fastapi==0.115.12
3
  uvicorn==0.34.0
4
  python-multipart==0.0.20
 
5
 
6
  # Database
7
  sqlalchemy==2.0.40
8
  alembic==1.15.2
9
  psycopg2-binary==2.9.10
 
 
10
 
11
  # Authentication
12
  python-jose==3.3.0
 
2
  fastapi==0.115.12
3
  uvicorn==0.34.0
4
  python-multipart==0.0.20
5
+ jinja2
6
 
7
  # Database
8
  sqlalchemy==2.0.40
9
  alembic==1.15.2
10
  psycopg2-binary==2.9.10
11
+ mysqlclient
12
+ pymysql
13
 
14
  # Authentication
15
  python-jose==3.3.0
routers/analysis.py CHANGED
@@ -14,7 +14,7 @@ from interfaces.productModels import ProductIngredientsRequest
14
  from services.auth_service import get_current_user
15
  from PIL import Image
16
  import cv2
17
- from logger_manager import log_info, log_error, logger
18
  from db.database import get_db,SessionLocal
19
  from db.repositories import IngredientRepository
20
  from dotenv import load_dotenv
@@ -49,6 +49,7 @@ def ingredient_db_to_pydantic(db_ingredient):
49
  name=db_ingredient.name,
50
  alternate_names=db_ingredient.alternate_names or [],
51
  is_found=True,
 
52
  safety_rating=db_ingredient.safety_rating or 5,
53
  description=db_ingredient.description or "No description available",
54
  health_effects=db_ingredient.health_effects or ["Unknown"],
@@ -123,7 +124,7 @@ async def process_image(image: UploadFile = File(...)):
123
  {
124
  "message": "Product extracted successfully",
125
  "product_image_name": extracted_product_name,
126
- },status_code=200
127
  )
128
  else:
129
  print("Failed to extract the product.")
@@ -149,14 +150,14 @@ async def get_image(image_name: str):
149
  @traceable
150
  async def process_ingredient_endpoint(request: IngredientRequest, db: Session = Depends(get_db)):
151
  try:
152
- logger.info(f"Received request to process ingredient: {request.name}")
153
 
154
  # Check if we already have this ingredient in the database
155
  repo = IngredientRepository(db)
156
  db_ingredient = repo.get_ingredient_by_name(request.name)
157
 
158
  if db_ingredient:
159
- logger.info(f"Found existing ingredient in database: {request.name}")
160
  # Convert DB model to Pydantic model
161
  # (This would need a function to correctly map the data)
162
  return ingredient_db_to_pydantic(db_ingredient)
@@ -173,11 +174,11 @@ async def process_ingredient_endpoint(request: IngredientRequest, db: Session =
173
 
174
  # Save to database
175
  repo.create_ingredient(result)
176
- logger.info(f"Saved new ingredient to database: {request.name}")
177
 
178
  return result
179
  except Exception as e:
180
- logger.error(f"Error processing ingredient: {e}")
181
  raise HTTPException(status_code=500, detail="Internal Server Error")
182
 
183
  async def process_single_ingredient(ingredient_name: str):
@@ -214,7 +215,7 @@ async def process_single_ingredient(ingredient_name: str):
214
 
215
  return ingredient_data
216
  except Exception as e:
217
- log_error(f"Error processing ingredient {ingredient_name}: {str(e)}")
218
  # Return a minimal result on error to avoid failing the entire batch
219
  return IngredientAnalysisResult(
220
  name=ingredient_name,
@@ -262,10 +263,13 @@ async def process_ingredients_endpoint(product_ingredient: ProductIngredientsReq
262
  } if current_user else {}
263
  )
264
 
 
 
265
  # Step 3: Prepare final response
266
  result = {
267
  "ingredients_count": len(ingredients),
268
  "processed_ingredients": ingredient_results,
 
269
  "overall_analysis": product_analysis,
270
  "user_id": current_user.id if current_user else None,
271
  "timestamp": datetime.now(tz=pytz.timezone('Asia/Kolkata')).isoformat()
@@ -275,5 +279,5 @@ async def process_ingredients_endpoint(product_ingredient: ProductIngredientsReq
275
  return result
276
 
277
  except Exception as e:
278
- log_error(f"Error in process_ingredients_endpoint: {str(e)}")
279
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
14
  from services.auth_service import get_current_user
15
  from PIL import Image
16
  import cv2
17
+ from logger_manager import log_info, log_error
18
  from db.database import get_db,SessionLocal
19
  from db.repositories import IngredientRepository
20
  from dotenv import load_dotenv
 
49
  name=db_ingredient.name,
50
  alternate_names=db_ingredient.alternate_names or [],
51
  is_found=True,
52
+ id=db_ingredient.id,
53
  safety_rating=db_ingredient.safety_rating or 5,
54
  description=db_ingredient.description or "No description available",
55
  health_effects=db_ingredient.health_effects or ["Unknown"],
 
124
  {
125
  "message": "Product extracted successfully",
126
  "product_image_name": extracted_product_name,
127
+ }, status_code=200
128
  )
129
  else:
130
  print("Failed to extract the product.")
 
150
  @traceable
151
  async def process_ingredient_endpoint(request: IngredientRequest, db: Session = Depends(get_db)):
152
  try:
153
+ log_info(f"Received request to process ingredient: {request.name}")
154
 
155
  # Check if we already have this ingredient in the database
156
  repo = IngredientRepository(db)
157
  db_ingredient = repo.get_ingredient_by_name(request.name)
158
 
159
  if db_ingredient:
160
+ log_info(f"Found existing ingredient in database: {request.name}")
161
  # Convert DB model to Pydantic model
162
  # (This would need a function to correctly map the data)
163
  return ingredient_db_to_pydantic(db_ingredient)
 
174
 
175
  # Save to database
176
  repo.create_ingredient(result)
177
+ log_info(f"Saved new ingredient to database: {request.name}")
178
 
179
  return result
180
  except Exception as e:
181
+ log_error(f"Error processing ingredient: {e}",e)
182
  raise HTTPException(status_code=500, detail="Internal Server Error")
183
 
184
  async def process_single_ingredient(ingredient_name: str):
 
215
 
216
  return ingredient_data
217
  except Exception as e:
218
+ log_error(f"Error processing ingredient {ingredient_name}: {str(e)}",e)
219
  # Return a minimal result on error to avoid failing the entire batch
220
  return IngredientAnalysisResult(
221
  name=ingredient_name,
 
263
  } if current_user else {}
264
  )
265
 
266
+ # print("Product analysis result:", product_analysis)
267
+
268
  # Step 3: Prepare final response
269
  result = {
270
  "ingredients_count": len(ingredients),
271
  "processed_ingredients": ingredient_results,
272
+ "ingredient_ids": product_analysis["ingredient_ids"],
273
  "overall_analysis": product_analysis,
274
  "user_id": current_user.id if current_user else None,
275
  "timestamp": datetime.now(tz=pytz.timezone('Asia/Kolkata')).isoformat()
 
279
  return result
280
 
281
  except Exception as e:
282
+ log_error(f"Error in process_ingredients_endpoint: {str(e)}",e)
283
  raise HTTPException(status_code=500, detail="Internal Server Error")
routers/auth.py CHANGED
@@ -23,7 +23,7 @@ def register(user: UserCreate, db: Session = Depends(get_db)):
23
  log_info("User registered successfully")
24
  return {"access_token": access_token, "token_type": "bearer"}
25
  except Exception as e:
26
- log_error(f"Error in register endpoint: {str(e)}")
27
  raise HTTPException(status_code=500, detail="Internal Server Error")
28
 
29
  @router.post("/login", response_model=Token)
@@ -45,7 +45,7 @@ def login(form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depend
45
  log_info("User logged in successfully")
46
  return {"access_token": access_token, "token_type": "bearer"}
47
  except Exception as e:
48
- log_error(f"Error in login endpoint: {str(e)}")
49
  raise HTTPException(status_code=500, detail="Internal Server Error")
50
 
51
  @router.get("/user", response_model=UserResponse)
@@ -54,7 +54,7 @@ def read_users_me(current_user: User = Depends(get_current_active_user)):
54
  try:
55
  return current_user
56
  except Exception as e:
57
- log_error(f"Error in read_users_me endpoint: {str(e)}")
58
  raise HTTPException(status_code=500, detail="Internal Server Error")
59
 
60
  @router.get("/user/email", response_model=UserResponse)
@@ -67,5 +67,5 @@ def read_user_by_email(email: str, db: Session = Depends(get_db)):
67
  raise HTTPException(status_code=404, detail="User not found")
68
  return user
69
  except Exception as e:
70
- log_error(f"Error in read_user_by_email endpoint: {str(e)}")
71
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
23
  log_info("User registered successfully")
24
  return {"access_token": access_token, "token_type": "bearer"}
25
  except Exception as e:
26
+ log_error(f"Error in register endpoint: {str(e)}",e)
27
  raise HTTPException(status_code=500, detail="Internal Server Error")
28
 
29
  @router.post("/login", response_model=Token)
 
45
  log_info("User logged in successfully")
46
  return {"access_token": access_token, "token_type": "bearer"}
47
  except Exception as e:
48
+ log_error(f"Error in login endpoint: {str(e)}",e)
49
  raise HTTPException(status_code=500, detail="Internal Server Error")
50
 
51
  @router.get("/user", response_model=UserResponse)
 
54
  try:
55
  return current_user
56
  except Exception as e:
57
+ log_error(f"Error in read_users_me endpoint: {str(e)}",e)
58
  raise HTTPException(status_code=500, detail="Internal Server Error")
59
 
60
  @router.get("/user/email", response_model=UserResponse)
 
67
  raise HTTPException(status_code=404, detail="User not found")
68
  return user
69
  except Exception as e:
70
+ log_error(f"Error in read_user_by_email endpoint: {str(e)}",e)
71
  raise HTTPException(status_code=500, detail="Internal Server Error")
routers/history.py CHANGED
@@ -26,7 +26,7 @@ def create_scan(scan: ScanHistoryCreate, db: Session = Depends(get_db)):
26
  log_info("Scan recorded successfully")
27
  return scan_entry
28
  except Exception as e:
29
- log_error(f"Error in create_scan endpoint: {str(e)}")
30
  raise HTTPException(status_code=500, detail="Internal Server Error")
31
 
32
  @router.get("/scan/{user_id}", response_model=list[ScanHistoryResponse])
@@ -40,5 +40,5 @@ def read_scan_history(user_id: int, db: Session = Depends(get_db)):
40
  log_info("Scan history retrieved successfully")
41
  return scan_history
42
  except Exception as e:
43
- log_error(f"Error in read_scan_history endpoint: {str(e)}")
44
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
26
  log_info("Scan recorded successfully")
27
  return scan_entry
28
  except Exception as e:
29
+ log_error(f"Error in create_scan endpoint: {str(e)}",e)
30
  raise HTTPException(status_code=500, detail="Internal Server Error")
31
 
32
  @router.get("/scan/{user_id}", response_model=list[ScanHistoryResponse])
 
40
  log_info("Scan history retrieved successfully")
41
  return scan_history
42
  except Exception as e:
43
+ log_error(f"Error in read_scan_history endpoint: {str(e)}",e)
44
  raise HTTPException(status_code=500, detail="Internal Server Error")
routers/product.py CHANGED
@@ -17,6 +17,7 @@ from dotenv import load_dotenv
17
  import requests
18
  import json
19
  from services.ingredients import IngredientService
 
20
  from utils.fetch_data import fetch_product_data_from_api
21
 
22
  load_dotenv()
@@ -75,7 +76,7 @@ async def add_target_to_vuforia(image_name: str, image_path: str) -> str:
75
  log_error(f"Failed to add target {image_name}: {response.text}")
76
  raise Exception(f"Failed to add target {image_name}: {response.text}")
77
  except Exception as e:
78
- log_error(f"Error adding target {image_name}: {e}")
79
  raise
80
 
81
 
@@ -114,7 +115,7 @@ async def add_product_to_database(
114
  return True
115
  except Exception as e:
116
  db.rollback()
117
- log_error(f"Error adding/updating markers for product {product_id} in database: {e}")
118
  raise HTTPException(status_code=500, detail=f"Error adding/updating markers for product {product_id}: {e}")
119
 
120
 
@@ -156,6 +157,23 @@ async def create_product(
156
  if ingredient:
157
  product_create_data.ingredient_ids.append(ingredient.id)
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # use repository to add product
160
  product_repo = ProductRepository(db)
161
  product = product_repo.add_product(product_create_data)
 
17
  import requests
18
  import json
19
  from services.ingredients import IngredientService
20
+ from services.productAnalyzerAgent import analyze_product_ingredients
21
  from utils.fetch_data import fetch_product_data_from_api
22
 
23
  load_dotenv()
 
76
  log_error(f"Failed to add target {image_name}: {response.text}")
77
  raise Exception(f"Failed to add target {image_name}: {response.text}")
78
  except Exception as e:
79
+ log_error(f"Error adding target {image_name}: {e}",e)
80
  raise
81
 
82
 
 
115
  return True
116
  except Exception as e:
117
  db.rollback()
118
+ log_error(f"Error adding/updating markers for product {product_id} in database: {e}",e)
119
  raise HTTPException(status_code=500, detail=f"Error adding/updating markers for product {product_id}: {e}")
120
 
121
 
 
157
  if ingredient:
158
  product_create_data.ingredient_ids.append(ingredient.id)
159
 
160
+ # Analyze product ingredients and store analysis data
161
+ ingredient_results = []
162
+ for ingredient_name in product_create_data.ingredients:
163
+ ingredient = ingredient_repo.get_ingredient_by_name(ingredient_name)
164
+ if ingredient:
165
+ ingredient_results.append(ingredient)
166
+
167
+ product_analysis = await analyze_product_ingredients(
168
+ ingredients_data=ingredient_results,
169
+ user_preferences={
170
+ "user_id": product_create_data.user_id,
171
+ "allergies": None,
172
+ "dietary_restrictions": None
173
+ }
174
+ )
175
+ product_create_data.ingredients_analysis = product_analysis
176
+
177
  # use repository to add product
178
  product_repo = ProductRepository(db)
179
  product = product_repo.add_product(product_create_data)
services/auth_service.py CHANGED
@@ -26,7 +26,7 @@ def verify_password(plain_password, hashed_password):
26
  try:
27
  return pwd_context.verify(plain_password, hashed_password)
28
  except Exception as e:
29
- log_error(f"Error verifying password: {str(e)}")
30
  raise HTTPException(status_code=500, detail=str(e))
31
 
32
  def get_password_hash(password):
@@ -34,7 +34,7 @@ def get_password_hash(password):
34
  try:
35
  return pwd_context.hash(password)
36
  except Exception as e:
37
- log_error(f"Error hashing password: {str(e)}")
38
  raise HTTPException(status_code=500, detail=str(e))
39
 
40
  def get_user(db, email: str):
@@ -42,7 +42,7 @@ def get_user(db, email: str):
42
  try:
43
  return db.query(User).filter(func.lower(User.email) == email.lower()).first()
44
  except Exception as e:
45
- log_error(f"Error getting user: {str(e)}")
46
  raise HTTPException(status_code=500, detail=str(e))
47
 
48
  def authenticate_user(db: Session, username: str, password: str):
@@ -77,10 +77,10 @@ async def get_current_user(db: Session = Depends(get_db), token: str = Depends(o
77
  raise credentials_exception
78
  token_data = TokenData(email=email)
79
  except JWTError as e:
80
- log_error(f"JWT error: {str(e)}")
81
  raise credentials_exception
82
  except Exception as e:
83
- log_error(f"Error decoding token: {str(e)}")
84
  raise HTTPException(status_code=500, detail=str(e))
85
  user = get_user(db, email=token_data.email)
86
  if user is None:
@@ -94,7 +94,7 @@ async def get_current_active_user(current_user: User = Depends(get_current_user)
94
  raise HTTPException(status_code=400, detail="Inactive user")
95
  return UserResponse.from_orm(current_user)
96
  except Exception as e:
97
- log_error(f"Error getting current active user: {str(e)}")
98
  raise HTTPException(status_code=500, detail=str(e))
99
 
100
  def create_user(db: Session, name: str, email: str, password: str):
@@ -107,5 +107,5 @@ def create_user(db: Session, name: str, email: str, password: str):
107
  db.refresh(db_user)
108
  return db_user
109
  except Exception as e:
110
- log_error(f"Error creating user: {str(e)}")
111
  raise HTTPException(status_code=500, detail=str(e))
 
26
  try:
27
  return pwd_context.verify(plain_password, hashed_password)
28
  except Exception as e:
29
+ log_error(f"Error verifying password: {str(e)}",e)
30
  raise HTTPException(status_code=500, detail=str(e))
31
 
32
  def get_password_hash(password):
 
34
  try:
35
  return pwd_context.hash(password)
36
  except Exception as e:
37
+ log_error(f"Error hashing password: {str(e)}",e)
38
  raise HTTPException(status_code=500, detail=str(e))
39
 
40
  def get_user(db, email: str):
 
42
  try:
43
  return db.query(User).filter(func.lower(User.email) == email.lower()).first()
44
  except Exception as e:
45
+ log_error(f"Error getting user: {str(e)}",e)
46
  raise HTTPException(status_code=500, detail=str(e))
47
 
48
  def authenticate_user(db: Session, username: str, password: str):
 
77
  raise credentials_exception
78
  token_data = TokenData(email=email)
79
  except JWTError as e:
80
+ log_error(f"JWT error: {str(e)}",e)
81
  raise credentials_exception
82
  except Exception as e:
83
+ log_error(f"Error decoding token: {str(e)}",e)
84
  raise HTTPException(status_code=500, detail=str(e))
85
  user = get_user(db, email=token_data.email)
86
  if user is None:
 
94
  raise HTTPException(status_code=400, detail="Inactive user")
95
  return UserResponse.from_orm(current_user)
96
  except Exception as e:
97
+ log_error(f"Error getting current active user: {str(e)}",e)
98
  raise HTTPException(status_code=500, detail=str(e))
99
 
100
  def create_user(db: Session, name: str, email: str, password: str):
 
107
  db.refresh(db_user)
108
  return db_user
109
  except Exception as e:
110
+ log_error(f"Error creating user: {str(e)}",e)
111
  raise HTTPException(status_code=500, detail=str(e))
services/ingredientFinderAgent.py CHANGED
@@ -3,261 +3,20 @@ from functools import partial
3
  import os
4
  import json
5
  import traceback
6
- import requests
7
- import pandas as pd
8
  from dotenv import load_dotenv
9
- import aiohttp
10
- import time
11
-
12
  from typing import Dict, Any
 
13
  from langchain_google_genai import ChatGoogleGenerativeAI
14
- from langchain_community.tools import DuckDuckGoSearchRun
15
- from langchain_community.tools import WikipediaQueryRun
16
- from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
17
- from langchain_core.tools import tool
18
 
19
  # modular
20
- from logger_manager import logger
21
  from interfaces.ingredientModels import IngredientAnalysisResult,IngredientState
 
 
22
 
23
  # Load environment variables from .env file
24
  load_dotenv()
25
 
26
- # Load Scraped Database
27
- SCRAPED_DB_PATH = "data/Food_Aditives_E_numbers.csv" # Ensure this file exists
28
- if os.path.exists(SCRAPED_DB_PATH):
29
- additives_df = pd.read_csv(SCRAPED_DB_PATH)
30
- logger.info(f"Loaded database with {len(additives_df)} entries")
31
- else:
32
- additives_df = None
33
- logger.warning("Scraped database not found!")
34
-
35
-
36
- # Define a rate limit (adjust as needed)
37
- PUBCHEM_TIMEOUT = float(os.getenv("PUBCHEM_TIMEOUT", "2.0")) # seconds
38
- PUBCHEM_MAX_RETRIES = int(os.getenv("PUBCHEM_MAX_RETRIES", "3")) # Max retries
39
-
40
- # Rate limiting configuration
41
- DUCKDUCKGO_RATE_LIMIT_DELAY = float(os.getenv("DUCKDUCKGO_RATE_LIMIT_DELAY", "2.0")) # Delay in seconds
42
- DUCKDUCKGO_MAX_RETRIES = int(os.getenv("DUCKDUCKGO_MAX_RETRIES", "3")) # Max retries
43
-
44
-
45
- # Define tool functions
46
- @tool("search_local_db")
47
- def search_local_db(ingredient: str) -> Dict[str, Any]:
48
- """Search local database for ingredient information. E number database scrapped"""
49
- logger.info(f"Searching local DB for: {ingredient}")
50
- if additives_df is not None:
51
- match = additives_df[additives_df['Name of Additive'].str.contains(ingredient, case=False, na=False, regex=False)]
52
- if not match.empty:
53
- return {"source": "Local DB", "found": True, "data": match.iloc[0].to_dict()}
54
- return {"source": "Local DB", "found": False, "data": None}
55
-
56
- @tool("search_open_food_facts")
57
- def search_open_food_facts(ingredient: str) -> Dict[str, Any]:
58
- """Search Open Food Facts database for ingredient information."""
59
- logger.info(f"Searching Open Food Facts for: {ingredient}")
60
-
61
- try:
62
- open_food_facts_api = "https://world.openfoodfacts.org/api/v0"
63
- # Search for the ingredient
64
- search_url = f"{open_food_facts_api}/ingredient/{ingredient.lower().replace(' ', '-')}.json"
65
- response = requests.get(search_url, timeout=10)
66
-
67
- if response.status_code == 200:
68
- data = response.json()
69
- if data.get("status") == 1: # Successfully found
70
- return {
71
- "source": "Open Food Facts",
72
- "found": True,
73
- "data": data
74
- }
75
-
76
- # Try searching products containing this ingredient
77
- product_search_url = f"{open_food_facts_api}/search.json?ingredients_tags={ingredient.lower().replace(' ', '_')}&page_size=5"
78
- response = requests.get(product_search_url, timeout=10)
79
-
80
- if response.status_code == 200:
81
- data = response.json()
82
- if data.get("count") > 0:
83
- return {
84
- "source": "Open Food Facts Products",
85
- "found": True,
86
- "data": data
87
- }
88
-
89
- return {"source": "Open Food Facts", "found": False, "data": None}
90
-
91
- except Exception as e:
92
- logger.error(f"Error searching Open Food Facts: {e}")
93
- return {"source": "Open Food Facts", "found": False, "error": str(e)}
94
-
95
- @tool("search_usda")
96
- def search_usda(ingredient: str) -> Dict[str, Any]:
97
- """Search USDA FoodData Central for ingredient information."""
98
- logger.info(f"Searching USDA for: {ingredient}")
99
-
100
- try:
101
- usda_api = "https://api.nal.usda.gov/fdc/v1"
102
- usda_api_key = os.getenv("USDA_API_KEY", "DEMO_KEY") # Use DEMO_KEY if not provided
103
-
104
- # Search for the ingredient
105
- search_url = f"{usda_api}/foods/search"
106
- params = {
107
- "api_key": usda_api_key,
108
- "query": ingredient,
109
- "dataType": ["Foundation", "SR Legacy", "Branded"],
110
- "pageSize": 5
111
- }
112
-
113
- response = requests.get(search_url, params=params, timeout=10)
114
-
115
- if response.status_code == 200:
116
- data = response.json()
117
- if data.get("totalHits", 0) > 0:
118
- return {
119
- "source": "USDA FoodData Central",
120
- "found": True,
121
- "data": data
122
- }
123
-
124
- return {"source": "USDA FoodData Central", "found": False, "data": None}
125
-
126
- except Exception as e:
127
- logger.error(f"Error searching USDA: {e}")
128
- return {"source": "USDA FoodData Central", "found": False, "error": str(e)}
129
 
130
- async def async_search_pubchem(ingredient: str) -> Dict[str, Any]:
131
- """Asynchronously search PubChem for chemical information about the ingredient."""
132
- logger.info(f"Searching PubChem for: {ingredient}")
133
-
134
- try:
135
- pubchem_api = "https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data"
136
- # https://pubchem.ncbi.nlm.nih.gov/docs/pug-rest#section=Input
137
-
138
- async with aiohttp.ClientSession() as session:
139
- # First try to get compound information by name
140
- search_url = f"{pubchem_api}/compound/name/{ingredient}/JSON"
141
-
142
- async def fetch_data(url: str, timeout: int = PUBCHEM_TIMEOUT, retry_count: int = 0):
143
- try:
144
- async with session.get(url, timeout=timeout) as response:
145
- if response.status == 200:
146
- return await response.json()
147
- else:
148
- logger.warning(f"PubChem returned status: {response.status} for URL: {url}")
149
- return None
150
- except asyncio.TimeoutError:
151
- if retry_count < PUBCHEM_MAX_RETRIES:
152
- delay = (2 ** retry_count) * 5 # Exponential backoff
153
- logger.warning(f"PubChem timeout for URL '{url}'. Retrying in {delay:.2f} seconds (attempt {retry_count + 1}/{PUBCHEM_MAX_RETRIES})")
154
- await asyncio.sleep(delay)
155
- return await fetch_data(url, timeout, retry_count + 1) # Recursive retry
156
- else:
157
- logger.error(f"Max retries reached for PubChem timeout on URL: {url}")
158
- return None
159
- except Exception as e:
160
- logger.error(f"PubChem error for URL '{url}': {e}")
161
- return None
162
-
163
- data = await fetch_data(search_url)
164
-
165
- if data and "PC_Compounds" in data:
166
- compound_id = data["PC_Compounds"][0]["id"]["id"]["cid"]
167
-
168
- # Get more detailed information using the CID
169
- property_url = f"{pubchem_api}/compound/cid/{compound_id}/property/MolecularFormula,MolecularWeight,IUPACName,InChI,InChIKey,CanonicalSMILES/JSON"
170
- properties_data = await fetch_data(property_url)
171
-
172
- # Get classifications and categories
173
- classification_url = f"{pubchem_api}/compound/cid/{compound_id}/classification/JSON"
174
- classification_data = await fetch_data(classification_url)
175
-
176
- return {
177
- "source": "PubChem",
178
- "found": True,
179
- "data": {
180
- "compound_info": data,
181
- "properties": properties_data,
182
- "classification": classification_data
183
- }
184
- }
185
-
186
- return {"source": "PubChem", "found": False, "data": None}
187
-
188
- except Exception as e:
189
- logger.error(f"Error searching PubChem: {e}")
190
- return {"source": "PubChem", "found": False, "error": str(e)}
191
-
192
- @tool("search_pubchem")
193
- def search_pubchem(ingredient: str) -> Dict[str, Any]:
194
- """Search PubChem for chemical information about the ingredient."""
195
- # Use asyncio.run to handle the async operation from synchronous code
196
- try:
197
- # For Python 3.7+
198
- return asyncio.run(async_search_pubchem(ingredient))
199
- except RuntimeError:
200
- # If already in an event loop (e.g., in FastAPI)
201
- loop = asyncio.get_event_loop()
202
- return loop.run_until_complete(async_search_pubchem(ingredient))
203
-
204
- @tool("search_wikipedia")
205
- def search_wikipedia(ingredient: str) -> Dict[str, Any]:
206
- """Search Wikipedia for ingredient information."""
207
- logger.info(f"Searching Wikipedia for: {ingredient}")
208
-
209
- try:
210
- wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
211
- wiki_result = wikipedia.run(ingredient)
212
-
213
- if wiki_result and len(wiki_result) > 100: # Only count substantial results
214
- return {
215
- "source": "Wikipedia",
216
- "found": True,
217
- "data": wiki_result
218
- }
219
- else:
220
- # Try with more specific searches
221
- food_wiki = wikipedia.run(f"{ingredient} food additive")
222
- if food_wiki and len(food_wiki) > 100:
223
- return {
224
- "source": "Wikipedia",
225
- "found": True,
226
- "data": food_wiki
227
- }
228
-
229
- chemical_wiki = wikipedia.run(f"{ingredient} chemical compound")
230
- if chemical_wiki and len(chemical_wiki) > 100:
231
- return {
232
- "source": "Wikipedia",
233
- "found": True,
234
- "data": chemical_wiki
235
- }
236
-
237
- return {"source": "Wikipedia", "found": False, "data": None}
238
-
239
- except Exception as e:
240
- logger.error(f"Error searching Wikipedia: {e}")
241
- return {"source": "Wikipedia", "found": False, "error": str(e)}
242
-
243
- @tool("search_web")
244
- def search_web(ingredient: str) -> Dict[str, Any]:
245
- """Search web for ingredient information using DuckDuckGo."""
246
- logger.info(f"Searching web for: {ingredient}")
247
-
248
- try:
249
- duckduckgo = DuckDuckGoSearchRun()
250
- search_queries = [f"{ingredient} food ingredient safety", f"{ingredient} E-number food additive",f"{ingredient}'s allergic information",f"is {ingredient} vegan,vegetarian or Non-vegetarian"]
251
- all_results = []
252
- for query in search_queries:
253
- time.sleep(DUCKDUCKGO_RATE_LIMIT_DELAY)
254
- result = duckduckgo.run(query)
255
- if result:
256
- all_results.append({"query": query, "result": result})
257
- return {"source": "DuckDuckGo", "found": bool(all_results), "data": all_results}
258
- except Exception as e:
259
- logger.error(f"Web search error: {e}")
260
- return {"source": "DuckDuckGo", "found": False, "error": str(e)}
261
 
262
  def create_summary_from_source(source: Dict[str, Any]) -> str:
263
  """Create a meaningful summary from source data."""
@@ -341,7 +100,7 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
341
 
342
  # Basic validation
343
  if not api_key:
344
- logger.error("No Google API key found in environment variables")
345
  new_state = state.copy()
346
  new_state["result"] = {
347
  "name": state["ingredient"],
@@ -361,7 +120,7 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
361
  # convert_system_message_to_human=True
362
  )
363
  except Exception as e:
364
- logger.error(f"Error initializing LLM: {e}")
365
  new_state = state.copy()
366
  new_state["result"] = {
367
  "name": state["ingredient"],
@@ -374,11 +133,11 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
374
 
375
  # Get sources from state
376
  sources_data = state["sources_data"]
377
- logger.info(f"Analyzing ingredient with {len(sources_data)} total sources")
378
 
379
  # Filter for successful sources only
380
  found_sources = [source for source in sources_data if source.get('found', False)]
381
- logger.info(f"Found {len(found_sources)} sources with usable data")
382
 
383
  # Create default result structure
384
  result = {
@@ -421,12 +180,12 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
421
 
422
  source_texts.append(source_text)
423
  except Exception as e:
424
- logger.error(f"Error formatting source {source_name}: {e}")
425
  source_texts.append(f"--- {source_name} ---\nError formatting data: {str(e)}")
426
 
427
  # Combine all source texts
428
  combined_data = "\n\n".join(source_texts)
429
- logger.info(f"Combined data for analysis:\n{combined_data[:500]}...(truncated)")
430
 
431
  # Create the analysis prompt
432
  analysis_prompt = f"""
@@ -459,14 +218,14 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
459
 
460
  # Process with LLM
461
  try:
462
- logger.info("Sending analysis prompt to LLM")
463
  llm_response = llm.invoke(analysis_prompt)
464
- logger.info("Received LLM response")
465
 
466
  # Extract and parse JSON from LLM response
467
  try:
468
  analysis_text = llm_response.content
469
- logger.debug(f"LLM response: {analysis_text[:500]}...(truncated)")
470
 
471
  # Find JSON in the response
472
  start_idx = analysis_text.find('{')
@@ -485,17 +244,17 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
485
  "allergic_info": analysis.get("allergic_info", []),
486
  "diet_type": analysis.get("diet_type", "unknown"),
487
  })
488
- logger.info(f"Analysis complete - Safety Rating: {result['safety_rating']}")
489
  else:
490
- logger.warning("Could not find JSON in LLM response")
491
  result["description"] = "Error: Failed to parse LLM analysis output."
492
  except json.JSONDecodeError as e:
493
- logger.error(f"JSON parsing error: {e}")
494
  result["description"] = f"Error parsing analysis: {str(e)}"
495
 
496
  except Exception as e:
497
- logger.error(f"Error in LLM analysis: {e}")
498
- logger.error(traceback.format_exc())
499
  result.update({
500
  "description": f"Error in analysis: {str(e)}",
501
  "health_effects": ["Error in analysis"],
@@ -601,7 +360,7 @@ class IngredientInfoAgentLangGraph:
601
  tool_name = str(tool_func).split()[0]
602
 
603
  source_name = tool_name.replace("search_", "").replace("_", " ").title()
604
- logger.info(f"Searching {source_name} for {ingredient}")
605
 
606
  try:
607
  # Run the tool function in a thread pool to avoid blocking
@@ -609,15 +368,15 @@ class IngredientInfoAgentLangGraph:
609
  result = await loop.run_in_executor(None, partial(tool_func.invoke, ingredient))
610
 
611
  if result.get("found", False):
612
- logger.info(f"{source_name} found data for {ingredient}")
613
  return result
614
  except Exception as e:
615
- logger.error(f"Error in {source_name} search: {e}")
616
  return {"source": source_name, "found": False, "error": str(e)}
617
 
618
  async def process_ingredient_async(self, ingredient: str) -> IngredientAnalysisResult:
619
  """Process an ingredient using parallel data fetching."""
620
- logger.info(f"=== Parallel processing for: {ingredient} ===")
621
 
622
  # Define all the tools to run in parallel
623
  tools = [
@@ -658,10 +417,10 @@ class IngredientInfoAgentLangGraph:
658
 
659
  # Extract the result or create a default
660
  if final_state.get("result"):
661
- logger.info(f"Analysis complete for {ingredient}")
662
  return IngredientAnalysisResult(**final_state["result"])
663
  else:
664
- logger.info(f"No result in final state for {ingredient}, returning default")
665
  return IngredientAnalysisResult(
666
  name=ingredient,
667
  is_found=len(sources_data) > 0,
@@ -673,49 +432,49 @@ class IngredientInfoAgentLangGraph:
673
  Process an ingredient using direct sequential approach instead of async.
674
  This method provides compatibility with synchronous code.
675
  """
676
- logger.info(f"=== Sequential processing for: {ingredient} ===")
677
 
678
  # Initialize empty sources data
679
  sources_data = []
680
 
681
  # Run each tool directly in sequence and collect results
682
- logger.info(f"Searching local database for {ingredient}")
683
  result = search_local_db.invoke(ingredient)
684
 
685
  if result.get("found", False):
686
  sources_data.append(result)
687
- logger.info(f"Local DB found data for {ingredient}")
688
 
689
- logger.info(f"Searching web for {ingredient}")
690
  result = search_web.invoke(ingredient)
691
  if result.get("found", False):
692
  sources_data.append(result)
693
- logger.info(f"Web search found data for {ingredient}")
694
 
695
- logger.info(f"Searching Wikipedia for {ingredient}")
696
  result = search_wikipedia.invoke(ingredient)
697
  if result.get("found", False):
698
  sources_data.append(result)
699
- logger.info(f"Wikipedia found data for {ingredient}")
700
 
701
- logger.info(f"Searching Open Food Facts for {ingredient}")
702
  result = search_open_food_facts.invoke(ingredient)
703
  if result.get("found", False):
704
  sources_data.append(result)
705
- logger.info(f"Open Food Facts found data for {ingredient}")
706
 
707
 
708
- logger.info(f"Searching USDA for {ingredient}")
709
  result = search_usda.invoke(ingredient)
710
  if result.get("found", False):
711
  sources_data.append(result)
712
- logger.info(f"USDA found data for {ingredient}")
713
 
714
- logger.info(f"Searching PubChem for {ingredient}")
715
  result = search_pubchem.invoke(ingredient)
716
  if result.get("found", False):
717
  sources_data.append(result)
718
- logger.info(f"PubChem found data for {ingredient}")
719
 
720
  state = IngredientState(ingredient=ingredient,
721
  sources_data=sources_data,
@@ -727,11 +486,11 @@ class IngredientInfoAgentLangGraph:
727
 
728
  # Extract the result or create a default
729
  if final_state.get("result"):
730
- logger.info(f"Analysis complete for {ingredient}")
731
 
732
  return IngredientAnalysisResult(**final_state["result"])
733
  else:
734
- logger.info(f"No result in final state for {ingredient}, returning default")
735
  return IngredientAnalysisResult(
736
  name=ingredient,
737
  is_found=len(sources_data) > 0,
 
3
  import os
4
  import json
5
  import traceback
 
 
6
  from dotenv import load_dotenv
 
 
 
7
  from typing import Dict, Any
8
+
9
  from langchain_google_genai import ChatGoogleGenerativeAI
 
 
 
 
10
 
11
  # modular
 
12
  from interfaces.ingredientModels import IngredientAnalysisResult,IngredientState
13
+ from logger_manager import log_debug, log_error, log_info, log_warning
14
+ from utils.agent_tools import search_local_db,search_web,search_wikipedia,search_open_food_facts,search_usda,search_pubchem
15
 
16
  # Load environment variables from .env file
17
  load_dotenv()
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def create_summary_from_source(source: Dict[str, Any]) -> str:
22
  """Create a meaningful summary from source data."""
 
100
 
101
  # Basic validation
102
  if not api_key:
103
+ log_error("No Google API key found in environment variables")
104
  new_state = state.copy()
105
  new_state["result"] = {
106
  "name": state["ingredient"],
 
120
  # convert_system_message_to_human=True
121
  )
122
  except Exception as e:
123
+ log_error(f"Error initializing LLM: {e}",e)
124
  new_state = state.copy()
125
  new_state["result"] = {
126
  "name": state["ingredient"],
 
133
 
134
  # Get sources from state
135
  sources_data = state["sources_data"]
136
+ log_info(f"Analyzing ingredient with {len(sources_data)} total sources")
137
 
138
  # Filter for successful sources only
139
  found_sources = [source for source in sources_data if source.get('found', False)]
140
+ log_info(f"Found {len(found_sources)} sources with usable data")
141
 
142
  # Create default result structure
143
  result = {
 
180
 
181
  source_texts.append(source_text)
182
  except Exception as e:
183
+ log_error(f"Error formatting source {source_name}: {e}",e)
184
  source_texts.append(f"--- {source_name} ---\nError formatting data: {str(e)}")
185
 
186
  # Combine all source texts
187
  combined_data = "\n\n".join(source_texts)
188
+ log_info(f"Combined data for analysis:\n{combined_data[:500]}...(truncated)")
189
 
190
  # Create the analysis prompt
191
  analysis_prompt = f"""
 
218
 
219
  # Process with LLM
220
  try:
221
+ log_info("Sending analysis prompt to LLM")
222
  llm_response = llm.invoke(analysis_prompt)
223
+ log_info("Received LLM response")
224
 
225
  # Extract and parse JSON from LLM response
226
  try:
227
  analysis_text = llm_response.content
228
+ log_debug(f"LLM response: {analysis_text[:500]}...(truncated)")
229
 
230
  # Find JSON in the response
231
  start_idx = analysis_text.find('{')
 
244
  "allergic_info": analysis.get("allergic_info", []),
245
  "diet_type": analysis.get("diet_type", "unknown"),
246
  })
247
+ log_info(f"Analysis complete - Safety Rating: {result['safety_rating']}")
248
  else:
249
+ log_warning("Could not find JSON in LLM response")
250
  result["description"] = "Error: Failed to parse LLM analysis output."
251
  except json.JSONDecodeError as e:
252
+ log_error(f"JSON parsing error: {e}",e)
253
  result["description"] = f"Error parsing analysis: {str(e)}"
254
 
255
  except Exception as e:
256
+ log_error(f"Error in LLM analysis: {e}",e)
257
+ log_error(traceback.format_exc())
258
  result.update({
259
  "description": f"Error in analysis: {str(e)}",
260
  "health_effects": ["Error in analysis"],
 
360
  tool_name = str(tool_func).split()[0]
361
 
362
  source_name = tool_name.replace("search_", "").replace("_", " ").title()
363
+ log_info(f"Searching {source_name} for {ingredient}")
364
 
365
  try:
366
  # Run the tool function in a thread pool to avoid blocking
 
368
  result = await loop.run_in_executor(None, partial(tool_func.invoke, ingredient))
369
 
370
  if result.get("found", False):
371
+ log_info(f"{source_name} found data for {ingredient}")
372
  return result
373
  except Exception as e:
374
+ log_error(f"Error in {source_name} search: {e}",e)
375
  return {"source": source_name, "found": False, "error": str(e)}
376
 
377
  async def process_ingredient_async(self, ingredient: str) -> IngredientAnalysisResult:
378
  """Process an ingredient using parallel data fetching."""
379
+ log_info(f"=== Parallel processing for: {ingredient} ===")
380
 
381
  # Define all the tools to run in parallel
382
  tools = [
 
417
 
418
  # Extract the result or create a default
419
  if final_state.get("result"):
420
+ log_info(f"Analysis complete for {ingredient}")
421
  return IngredientAnalysisResult(**final_state["result"])
422
  else:
423
+ log_info(f"No result in final state for {ingredient}, returning default")
424
  return IngredientAnalysisResult(
425
  name=ingredient,
426
  is_found=len(sources_data) > 0,
 
432
  Process an ingredient using direct sequential approach instead of async.
433
  This method provides compatibility with synchronous code.
434
  """
435
+ log_info(f"=== Sequential processing for: {ingredient} ===")
436
 
437
  # Initialize empty sources data
438
  sources_data = []
439
 
440
  # Run each tool directly in sequence and collect results
441
+ log_info(f"Searching local database for {ingredient}")
442
  result = search_local_db.invoke(ingredient)
443
 
444
  if result.get("found", False):
445
  sources_data.append(result)
446
+ log_info(f"Local DB found data for {ingredient}")
447
 
448
+ log_info(f"Searching web for {ingredient}")
449
  result = search_web.invoke(ingredient)
450
  if result.get("found", False):
451
  sources_data.append(result)
452
+ log_info(f"Web search found data for {ingredient}")
453
 
454
+ log_info(f"Searching Wikipedia for {ingredient}")
455
  result = search_wikipedia.invoke(ingredient)
456
  if result.get("found", False):
457
  sources_data.append(result)
458
+ log_info(f"Wikipedia found data for {ingredient}")
459
 
460
+ log_info(f"Searching Open Food Facts for {ingredient}")
461
  result = search_open_food_facts.invoke(ingredient)
462
  if result.get("found", False):
463
  sources_data.append(result)
464
+ log_info(f"Open Food Facts found data for {ingredient}")
465
 
466
 
467
+ log_info(f"Searching USDA for {ingredient}")
468
  result = search_usda.invoke(ingredient)
469
  if result.get("found", False):
470
  sources_data.append(result)
471
+ log_info(f"USDA found data for {ingredient}")
472
 
473
+ log_info(f"Searching PubChem for {ingredient}")
474
  result = search_pubchem.invoke(ingredient)
475
  if result.get("found", False):
476
  sources_data.append(result)
477
+ log_info(f"PubChem found data for {ingredient}")
478
 
479
  state = IngredientState(ingredient=ingredient,
480
  sources_data=sources_data,
 
486
 
487
  # Extract the result or create a default
488
  if final_state.get("result"):
489
+ log_info(f"Analysis complete for {ingredient}")
490
 
491
  return IngredientAnalysisResult(**final_state["result"])
492
  else:
493
+ log_info(f"No result in final state for {ingredient}, returning default")
494
  return IngredientAnalysisResult(
495
  name=ingredient,
496
  is_found=len(sources_data) > 0,
services/productAnalyzerAgent.py CHANGED
@@ -3,7 +3,7 @@ from typing import List, Dict, Any, Optional
3
  from dotenv import load_dotenv
4
  from langchain_core.messages import HumanMessage
5
  from langchain_google_genai import ChatGoogleGenerativeAI
6
- from logger_manager import logger
7
  from interfaces.ingredientModels import IngredientAnalysisResult
8
 
9
  # Load environment variables
@@ -17,7 +17,7 @@ async def analyze_product_ingredients(
17
  Analyze multiple ingredients to provide a comprehensive product analysis
18
  for AR display, considering user preferences and dietary restrictions.
19
  """
20
- logger.info(f"Analyzing product with {len(ingredients_data)} ingredients")
21
 
22
  # Initialize LLM
23
  api_key = os.getenv("LLM_API_KEY")
@@ -31,6 +31,7 @@ async def analyze_product_ingredients(
31
 
32
  # Prepare ingredient data for the prompt
33
  ingredients_summary = []
 
34
  for i, ingredient in enumerate(ingredients_data):
35
  ingredient_info = f"""
36
  Ingredient {i+1}: {ingredient.name}
@@ -41,6 +42,7 @@ Health Effects: {', '.join(ingredient.health_effects) if ingredient.health_effec
41
  Description: {ingredient.description[:200] + '...' if len(ingredient.description) > 200 else ingredient.description}
42
  """
43
  ingredients_summary.append(ingredient_info)
 
44
 
45
  # Add user preferences context if available
46
  user_context = ""
@@ -91,9 +93,14 @@ analysis that would be helpful for a consumer viewing this in an AR application.
91
  }}
92
 
93
  Only include factual information based on the provided data. If information is unavailable for any field, use appropriate default values. If the data required is too obvious then give appropriate answer.
 
 
 
 
 
94
  """
95
 
96
- logger.info("Sending product analysis prompt to LLM")
97
 
98
  try:
99
  # Process with LLM
@@ -111,29 +118,32 @@ Only include factual information based on the provided data. If information is u
111
  if json_match:
112
  try:
113
  analysis = json.loads(json_match.group(0))
114
- logger.info("Successfully parsed product analysis")
 
115
  return analysis
116
  except json.JSONDecodeError as e:
117
- logger.error(f"JSON parsing error: {e}")
118
  # Return a simplified analysis on error
119
  return {
120
  "overall_safety_score": calculate_average_safety(ingredients_data),
121
  "error": "Failed to parse complete analysis",
122
  "ingredient_count": len(ingredients_data),
123
- "key_takeaway": "Analysis error occurred, please check individual ingredients"
 
124
  }
125
  else:
126
- logger.error("Could not find JSON in LLM response")
127
  return {
128
  "overall_safety_score": calculate_average_safety(ingredients_data),
129
  "error": "Failed to generate structured analysis",
130
- "ingredient_count": len(ingredients_data)
 
131
  }
132
 
133
  except Exception as e:
134
- logger.error(f"Error in product analysis: {e}")
135
  # Fallback analysis based on simple calculations
136
- return generate_fallback_analysis(ingredients_data)
137
 
138
 
139
  def calculate_average_safety(ingredients_data: List[IngredientAnalysisResult]) -> float:
@@ -144,7 +154,7 @@ def calculate_average_safety(ingredients_data: List[IngredientAnalysisResult]) -
144
  return round(sum(safety_scores) / len(safety_scores), 1)
145
 
146
 
147
- def generate_fallback_analysis(ingredients_data: List[IngredientAnalysisResult]) -> Dict[str, Any]:
148
  """Generate a basic analysis when LLM processing fails."""
149
  # Extract known allergens
150
  allergens = []
@@ -176,5 +186,6 @@ def generate_fallback_analysis(ingredients_data: List[IngredientAnalysisResult])
176
  "benefits": [],
177
  "concerns": ["Analysis system encountered an error, please check individual ingredients"]
178
  },
179
- "key_takeaway": f"Product has {len(ingredients_data)} ingredients with average safety score of {safety_score}/10"
180
- }
 
 
3
  from dotenv import load_dotenv
4
  from langchain_core.messages import HumanMessage
5
  from langchain_google_genai import ChatGoogleGenerativeAI
6
+ from logger_manager import log_error, log_info
7
  from interfaces.ingredientModels import IngredientAnalysisResult
8
 
9
  # Load environment variables
 
17
  Analyze multiple ingredients to provide a comprehensive product analysis
18
  for AR display, considering user preferences and dietary restrictions.
19
  """
20
+ log_info(f"Analyzing product with {len(ingredients_data)} ingredients")
21
 
22
  # Initialize LLM
23
  api_key = os.getenv("LLM_API_KEY")
 
31
 
32
  # Prepare ingredient data for the prompt
33
  ingredients_summary = []
34
+ ingredient_ids = []
35
  for i, ingredient in enumerate(ingredients_data):
36
  ingredient_info = f"""
37
  Ingredient {i+1}: {ingredient.name}
 
42
  Description: {ingredient.description[:200] + '...' if len(ingredient.description) > 200 else ingredient.description}
43
  """
44
  ingredients_summary.append(ingredient_info)
45
+ ingredient_ids.append(ingredient.id)
46
 
47
  # Add user preferences context if available
48
  user_context = ""
 
93
  }}
94
 
95
  Only include factual information based on the provided data. If information is unavailable for any field, use appropriate default values. If the data required is too obvious then give appropriate answer.
96
+ IMPORTANT: Ensure your response is valid JSON with double quotes (") around property names and string values.
97
+ Avoid single quotes (') for JSON properties and values.
98
+ Ensure all elements in arrays and objects are separated by commas, and don't include trailing commas.
99
+ Also strictly follow the JSON format in your response.
100
+
101
  """
102
 
103
+ log_info("Sending product analysis prompt to LLM")
104
 
105
  try:
106
  # Process with LLM
 
118
  if json_match:
119
  try:
120
  analysis = json.loads(json_match.group(0))
121
+ analysis["ingredient_ids"] = ingredient_ids
122
+ log_info("Successfully parsed product analysis")
123
  return analysis
124
  except json.JSONDecodeError as e:
125
+ log_error(f"JSON parsing error: {e}",e)
126
  # Return a simplified analysis on error
127
  return {
128
  "overall_safety_score": calculate_average_safety(ingredients_data),
129
  "error": "Failed to parse complete analysis",
130
  "ingredient_count": len(ingredients_data),
131
+ "key_takeaway": "Analysis error occurred, please check individual ingredients",
132
+ "ingredient_ids": ingredient_ids
133
  }
134
  else:
135
+ log_error("Could not find JSON in LLM response")
136
  return {
137
  "overall_safety_score": calculate_average_safety(ingredients_data),
138
  "error": "Failed to generate structured analysis",
139
+ "ingredient_count": len(ingredients_data),
140
+ "ingredient_ids": ingredient_ids
141
  }
142
 
143
  except Exception as e:
144
+ log_error(f"Error in product analysis: {e}",e)
145
  # Fallback analysis based on simple calculations
146
+ return generate_fallback_analysis(ingredients_data, ingredient_ids)
147
 
148
 
149
  def calculate_average_safety(ingredients_data: List[IngredientAnalysisResult]) -> float:
 
154
  return round(sum(safety_scores) / len(safety_scores), 1)
155
 
156
 
157
+ def generate_fallback_analysis(ingredients_data: List[IngredientAnalysisResult], ingredient_ids: List[int]) -> Dict[str, Any]:
158
  """Generate a basic analysis when LLM processing fails."""
159
  # Extract known allergens
160
  allergens = []
 
186
  "benefits": [],
187
  "concerns": ["Analysis system encountered an error, please check individual ingredients"]
188
  },
189
+ "key_takeaway": f"Product has {len(ingredients_data)} ingredients with average safety score of {safety_score}/10",
190
+ "ingredient_ids": ingredient_ids
191
+ }
services/scan_history.py CHANGED
@@ -19,7 +19,7 @@ def record_scan(db: Session, user_id: int, product_id: int) -> ScanHistory:
19
  log_info("Scan recorded successfully")
20
  return scan_entry
21
  except Exception as e:
22
- log_error(f"Error recording scan: {str(e)}")
23
  raise HTTPException(status_code=500, detail="Internal Server Error")
24
 
25
  def get_scan_history(db: Session, user_id: int) -> list[ScanHistory]:
@@ -32,5 +32,5 @@ def get_scan_history(db: Session, user_id: int) -> list[ScanHistory]:
32
  log_info("Scan history retrieved successfully")
33
  return scan_history
34
  except Exception as e:
35
- log_error(f"Error getting scan history: {str(e)}")
36
  raise HTTPException(status_code=500, detail="Internal Server Error")
 
19
  log_info("Scan recorded successfully")
20
  return scan_entry
21
  except Exception as e:
22
+ log_error(f"Error recording scan: {str(e)}",e)
23
  raise HTTPException(status_code=500, detail="Internal Server Error")
24
 
25
  def get_scan_history(db: Session, user_id: int) -> list[ScanHistory]:
 
32
  log_info("Scan history retrieved successfully")
33
  return scan_history
34
  except Exception as e:
35
+ log_error(f"Error getting scan history: {str(e)}",e)
36
  raise HTTPException(status_code=500, detail="Internal Server Error")
templates/api_docs.html ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+
4
+ <head>
5
+ <title>FoodAnalyzer API Documentation</title>
6
+ <style>
7
+ body {
8
+ font-family: Arial, sans-serif;
9
+ margin: 20px;
10
+ line-height: 1.6;
11
+ }
12
+
13
+ h1,
14
+ h2,
15
+ h3 {
16
+ color: #333;
17
+ }
18
+
19
+ .endpoint {
20
+ margin-bottom: 15px;
21
+ border-left: 4px solid #ddd;
22
+ padding-left: 15px;
23
+ }
24
+
25
+ .method {
26
+ font-weight: bold;
27
+ color: #0066cc;
28
+ }
29
+
30
+ code {
31
+ background: #f4f4f4;
32
+ padding: 2px 5px;
33
+ border-radius: 3px;
34
+ font-family: monospace;
35
+ }
36
+
37
+ pre {
38
+ background: #f9f9f9;
39
+ padding: 10px;
40
+ border-radius: 5px;
41
+ overflow-x: auto;
42
+ }
43
+ </style>
44
+ </head>
45
+
46
+ <body>
47
+ <h1>FoodAnalyzer API Documentation</h1>
48
+
49
+ <h2>Authentication Endpoints</h2>
50
+ <div class="endpoint">
51
+ <p><span class="method">POST</span> <a href="/api/auth/register">/api/auth/register</a></p>
52
+ <p>Register a new user in the system</p>
53
+ <p>Request body:</p>
54
+ <pre><code>{
55
+ "name": "John Doe",
56
+ "email": "john@example.com",
57
+ "password": "securepassword"
58
+ }</code></pre>
59
+ <p>Response:</p>
60
+ <pre><code>{
61
+ "access_token": "eyJhbGciOiJIUzI1NiIs...",
62
+ "token_type": "bearer"
63
+ }</code></pre>
64
+ </div>
65
+
66
+
67
+ <div class="endpoint">
68
+ <p><span class="method">POST</span> <a href="/api/auth/login">/api/auth/login</a></p>
69
+ <p>Login to get an access token (valid for 4 weeks)</p>
70
+ <p>Form data: username (email), password</p>
71
+ <p>Response:</p>
72
+ <pre><code>{
73
+ "access_token": "eyJhbGciOiJIUzI1NiIs...",
74
+ "token_type": "bearer"
75
+ }</code></pre>
76
+ </div>
77
+
78
+ <div class="endpoint">
79
+ <p><span class="method">GET</span> <a href="/api/auth/user">/api/auth/user</a></p>
80
+ <p>Get current authenticated user's information</p>
81
+ <p>Headers: Authorization: Bearer {token}</p>
82
+ <p>Response:</p>
83
+ <pre><code>{
84
+ "name": "John Doe",
85
+ "email": "john@example.com"
86
+ }</code></pre>
87
+ </div>
88
+
89
+ <div class="endpoint">
90
+ <p><span class="method">GET</span> <a href="/api/auth/user/email">/api/auth/user/email</a></p>
91
+ <p>Get user information by email</p>
92
+ <p>Query parameters: email</p>
93
+ </div>
94
+
95
+ <h2>Analysis Endpoints</h2>
96
+ <div class="endpoint">
97
+ <p><span class="method">POST</span> <a href="/api/analyze/process_image">/api/analyze/process_image</a></p>
98
+ <p>Upload and process an image using YOLO object detection</p>
99
+ <p>Form data: image (file)</p>
100
+ <p>Response:</p>
101
+ <pre><code>{
102
+ "message": "Product extracted successfully",
103
+ "product_image_name": "f7e5d4c3-b2a1-4f9e-8d7c-6e5f4d3a2b1c.jpg"
104
+ }</code></pre>
105
+ </div>
106
+
107
+ <div class="endpoint">
108
+ <p><span class="method">GET</span> <a
109
+ href="/api/analyze/get_image/{image_name}">/api/analyze/get_image/{image_name}</a></p>
110
+ <p>Retrieve a processed image by its name</p>
111
+ <p>Path parameters: image_name</p>
112
+ <p>Response: Image file (JPEG)</p>
113
+ </div>
114
+
115
+ <div class="endpoint">
116
+ <p><span class="method">POST</span> <a
117
+ href="/api/analyze/process_ingredient">/api/analyze/process_ingredient</a></p>
118
+ <p>Process a single ingredient and get detailed analysis</p>
119
+ <p>Request body:</p>
120
+ <pre><code>{
121
+ "name": "Monosodium Glutamate"
122
+ }</code></pre>
123
+ </div>
124
+
125
+ <div class="endpoint">
126
+ <p><span class="method">POST</span> <a
127
+ href="/api/analyze/process_product_ingredients">/api/analyze/process_product_ingredients</a></p>
128
+ <p>Process multiple ingredients of a product</p>
129
+ <p>Headers: Authorization: Bearer {token}</p>
130
+ <p>Request body:</p>
131
+ <pre><code>{
132
+ "ingredients": ["Sugar", "Salt", "Monosodium Glutamate"],
133
+ "user_id": 1
134
+ }</code></pre>
135
+ </div>
136
+
137
+ <h2>Product Endpoints</h2>
138
+ <div class="endpoint">
139
+ <p><span class="method">POST</span> <a href="/api/product/add">/api/product/add</a></p>
140
+ <p>Add a new product with ingredients and images</p>
141
+ <p>Request body:</p>
142
+ <pre><code>{
143
+ "name": "Maggi 2-Minute Noodles",
144
+ "image_names": ["maggi_front.jpg", "maggi_ingredients.jpg"],
145
+ "ingredients": [
146
+ "Wheat Flour",
147
+ "Palm Oil",
148
+ "Salt",
149
+ "Monosodium Glutamate"
150
+ ],
151
+ "ingredients_count": 4,
152
+ "overall_safety_score": 6.5,
153
+ "suitable_diet_types": ["Vegetarian"],
154
+ "allergy_warnings": ["Contains Wheat (Gluten)"],
155
+ "usage_recommendations": "Consume in moderation",
156
+ "health_insights": [
157
+ "High sodium content may contribute to high blood pressure"
158
+ ],
159
+ "ingredient_interactions": [
160
+ "No significant harmful interactions between ingredients"
161
+ ],
162
+ "key_takeaway": "Convenient food option but should be consumed occasionally",
163
+ "user_id": 1,
164
+ "timestamp": "2025-04-27T15:30:00Z"
165
+ }</code></pre>
166
+ </div>
167
+
168
+ <h2>History Endpoints</h2>
169
+ <div class="endpoint">
170
+ <p><span class="method">POST</span> <a href="/api/history/scan">/api/history/scan</a></p>
171
+ <p>Record a new product scan in user history</p>
172
+ </div>
173
+
174
+ <div class="endpoint">
175
+ <p><span class="method">GET</span> <a href="/api/history/user/{user_id}">/api/history/user/{user_id}</a></p>
176
+ <p>Retrieve scan history for a specific user</p>
177
+ <p>Path parameters: user_id</p>
178
+ </div>
179
+
180
+ <h2>Authentication</h2>
181
+ <p>Protected endpoints require JWT token in Authorization header:</p>
182
+ <p><code>Authorization: Bearer eyJhbGciOiJIUzI1NiIs...</code></p>
183
+ <p>Tokens are valid for 4 weeks after login.</p>
184
+ </body>
185
+
186
+ </html>
utils/agent_tools.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import os
3
+
4
+ import pandas as pd
5
+ from dotenv import load_dotenv
6
+
7
+ from typing import Dict, Any
8
+ # modular
9
+ from logger_manager import log_error, log_info, log_warning
10
+ from dotenv import load_dotenv
11
+
12
+ import aiohttp
13
+ import time
14
+ import requests
15
+
16
+ from langchain_community.tools import DuckDuckGoSearchRun
17
+ from langchain_community.tools import WikipediaQueryRun
18
+ from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
19
+ from langchain_core.tools import tool
20
+
21
+
22
+ # Load environment variables from .env file
23
+ load_dotenv()
24
+
25
+ # Load Scraped Database
26
+ SCRAPED_DB_PATH = "data/Food_Aditives_E_numbers.csv" # Ensure this file exists
27
+ if os.path.exists(SCRAPED_DB_PATH):
28
+ additives_df = pd.read_csv(SCRAPED_DB_PATH)
29
+ log_info(f"Loaded database with {len(additives_df)} entries")
30
+ else:
31
+ additives_df = None
32
+ log_warning("Scraped database not found!")
33
+
34
+
35
+ # Define a rate limit (adjust as needed)
36
+ PUBCHEM_TIMEOUT = float(os.getenv("PUBCHEM_TIMEOUT", "2.0")) # seconds
37
+ PUBCHEM_MAX_RETRIES = int(os.getenv("PUBCHEM_MAX_RETRIES", "3")) # Max retries
38
+
39
+ # Rate limiting configuration
40
+ DUCKDUCKGO_RATE_LIMIT_DELAY = float(os.getenv("DUCKDUCKGO_RATE_LIMIT_DELAY", "2.0")) # Delay in seconds
41
+ DUCKDUCKGO_MAX_RETRIES = int(os.getenv("DUCKDUCKGO_MAX_RETRIES", "3")) # Max retries
42
+
43
+
44
+ # Define tool functions
45
+ @tool("search_local_db")
46
+ def search_local_db(ingredient: str) -> Dict[str, Any]:
47
+ """Search local database for ingredient information. E number database scrapped"""
48
+ log_info(f"Searching local DB for: {ingredient}")
49
+ if additives_df is not None:
50
+ match = additives_df[additives_df['Name of Additive'].str.contains(ingredient, case=False, na=False, regex=False)]
51
+ if not match.empty:
52
+ return {"source": "Local DB", "found": True, "data": match.iloc[0].to_dict()}
53
+ return {"source": "Local DB", "found": False, "data": None}
54
+
55
+ @tool("search_open_food_facts")
56
+ def search_open_food_facts(ingredient: str) -> Dict[str, Any]:
57
+ """Search Open Food Facts database for ingredient information."""
58
+ log_info(f"Searching Open Food Facts for: {ingredient}")
59
+
60
+ try:
61
+ open_food_facts_api = "https://world.openfoodfacts.org/api/v0"
62
+ # Search for the ingredient
63
+ search_url = f"{open_food_facts_api}/ingredient/{ingredient.lower().replace(' ', '-')}.json"
64
+ response = requests.get(search_url, timeout=10)
65
+
66
+ if response.status_code == 200:
67
+ data = response.json()
68
+ if data.get("status") == 1: # Successfully found
69
+ return {
70
+ "source": "Open Food Facts",
71
+ "found": True,
72
+ "data": data
73
+ }
74
+
75
+ # Try searching products containing this ingredient
76
+ product_search_url = f"{open_food_facts_api}/search.json?ingredients_tags={ingredient.lower().replace(' ', '_')}&page_size=5"
77
+ response = requests.get(product_search_url, timeout=10)
78
+
79
+ if response.status_code == 200:
80
+ data = response.json()
81
+ if data.get("count") > 0:
82
+ return {
83
+ "source": "Open Food Facts Products",
84
+ "found": True,
85
+ "data": data
86
+ }
87
+
88
+ return {"source": "Open Food Facts", "found": False, "data": None}
89
+
90
+ except Exception as e:
91
+ log_error(f"Error searching Open Food Facts: {e}",e)
92
+ return {"source": "Open Food Facts", "found": False, "error": str(e)}
93
+
94
+ @tool("search_usda")
95
+ def search_usda(ingredient: str) -> Dict[str, Any]:
96
+ """Search USDA FoodData Central for ingredient information."""
97
+ log_info(f"Searching USDA for: {ingredient}")
98
+
99
+ try:
100
+ usda_api = "https://api.nal.usda.gov/fdc/v1"
101
+ usda_api_key = os.getenv("USDA_API_KEY", "DEMO_KEY") # Use DEMO_KEY if not provided
102
+
103
+ # Search for the ingredient
104
+ search_url = f"{usda_api}/foods/search"
105
+ params = {
106
+ "api_key": usda_api_key,
107
+ "query": ingredient,
108
+ "dataType": ["Foundation", "SR Legacy", "Branded"],
109
+ "pageSize": 5
110
+ }
111
+
112
+ response = requests.get(search_url, params=params, timeout=10)
113
+
114
+ if response.status_code == 200:
115
+ data = response.json()
116
+ if data.get("totalHits", 0) > 0:
117
+ return {
118
+ "source": "USDA FoodData Central",
119
+ "found": True,
120
+ "data": data
121
+ }
122
+
123
+ return {"source": "USDA FoodData Central", "found": False, "data": None}
124
+
125
+ except Exception as e:
126
+ log_error(f"Error searching USDA: {e}",e)
127
+ return {"source": "USDA FoodData Central", "found": False, "error": str(e)}
128
+
129
+ async def async_search_pubchem(ingredient: str) -> Dict[str, Any]:
130
+ """Asynchronously search PubChem for chemical information about the ingredient."""
131
+ log_info(f"Searching PubChem for: {ingredient}")
132
+
133
+ try:
134
+ pubchem_api = "https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data"
135
+ # https://pubchem.ncbi.nlm.nih.gov/docs/pug-rest#section=Input
136
+
137
+ async with aiohttp.ClientSession() as session:
138
+ # First try to get compound information by name
139
+ search_url = f"{pubchem_api}/compound/name/{ingredient}/JSON"
140
+
141
+ async def fetch_data(url: str, timeout: int = PUBCHEM_TIMEOUT, retry_count: int = 0):
142
+ try:
143
+ async with session.get(url, timeout=timeout) as response:
144
+ if response.status == 200:
145
+ return await response.json()
146
+ else:
147
+ log_warning(f"PubChem returned status: {response.status} for URL: {url}")
148
+ return None
149
+ except asyncio.TimeoutError:
150
+ if retry_count < PUBCHEM_MAX_RETRIES:
151
+ delay = (2 ** retry_count) * 5 # Exponential backoff
152
+ log_warning(f"PubChem timeout for URL '{url}'. Retrying in {delay:.2f} seconds (attempt {retry_count + 1}/{PUBCHEM_MAX_RETRIES})")
153
+ await asyncio.sleep(delay)
154
+ return await fetch_data(url, timeout, retry_count + 1) # Recursive retry
155
+ else:
156
+ log_error(f"Max retries reached for PubChem timeout on URL: {url}",asyncio.TimeoutError)
157
+ return None
158
+ except Exception as e:
159
+ log_error(f"PubChem error for URL '{url}': {e}",e)
160
+ return None
161
+
162
+ data = await fetch_data(search_url)
163
+
164
+ if data and "PC_Compounds" in data:
165
+ compound_id = data["PC_Compounds"][0]["id"]["id"]["cid"]
166
+
167
+ # Get more detailed information using the CID
168
+ property_url = f"{pubchem_api}/compound/cid/{compound_id}/property/MolecularFormula,MolecularWeight,IUPACName,InChI,InChIKey,CanonicalSMILES/JSON"
169
+ properties_data = await fetch_data(property_url)
170
+
171
+ # Get classifications and categories
172
+ classification_url = f"{pubchem_api}/compound/cid/{compound_id}/classification/JSON"
173
+ classification_data = await fetch_data(classification_url)
174
+
175
+ return {
176
+ "source": "PubChem",
177
+ "found": True,
178
+ "data": {
179
+ "compound_info": data,
180
+ "properties": properties_data,
181
+ "classification": classification_data
182
+ }
183
+ }
184
+
185
+ return {"source": "PubChem", "found": False, "data": None}
186
+
187
+ except Exception as e:
188
+ log_error(f"Error searching PubChem: {e}",e)
189
+ return {"source": "PubChem", "found": False, "error": str(e)}
190
+
191
+ @tool("search_pubchem")
192
+ def search_pubchem(ingredient: str) -> Dict[str, Any]:
193
+ """Search PubChem for chemical information about the ingredient."""
194
+ # Use asyncio.run to handle the async operation from synchronous code
195
+ try:
196
+ # For Python 3.7+
197
+ return asyncio.run(async_search_pubchem(ingredient))
198
+ except RuntimeError:
199
+ # If already in an event loop (e.g., in FastAPI)
200
+ loop = asyncio.get_event_loop()
201
+ return loop.run_until_complete(async_search_pubchem(ingredient))
202
+
203
+ @tool("search_wikipedia")
204
+ def search_wikipedia(ingredient: str) -> Dict[str, Any]:
205
+ """Search Wikipedia for ingredient information."""
206
+ log_info(f"Searching Wikipedia for: {ingredient}")
207
+
208
+ try:
209
+ wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
210
+ wiki_result = wikipedia.run(ingredient)
211
+
212
+ if wiki_result and len(wiki_result) > 100: # Only count substantial results
213
+ return {
214
+ "source": "Wikipedia",
215
+ "found": True,
216
+ "data": wiki_result
217
+ }
218
+ else:
219
+ # Try with more specific searches
220
+ food_wiki = wikipedia.run(f"{ingredient} food additive")
221
+ if food_wiki and len(food_wiki) > 100:
222
+ return {
223
+ "source": "Wikipedia",
224
+ "found": True,
225
+ "data": food_wiki
226
+ }
227
+
228
+ chemical_wiki = wikipedia.run(f"{ingredient} chemical compound")
229
+ if chemical_wiki and len(chemical_wiki) > 100:
230
+ return {
231
+ "source": "Wikipedia",
232
+ "found": True,
233
+ "data": chemical_wiki
234
+ }
235
+
236
+ return {"source": "Wikipedia", "found": False, "data": None}
237
+
238
+ except Exception as e:
239
+ log_error(f"Error searching Wikipedia: {e}",e)
240
+ return {"source": "Wikipedia", "found": False, "error": str(e)}
241
+
242
+ @tool("search_web")
243
+ def search_web(ingredient: str) -> Dict[str, Any]:
244
+ """Search web for ingredient information using DuckDuckGo."""
245
+ log_info(f"Searching web for: {ingredient}")
246
+
247
+ try:
248
+ duckduckgo = DuckDuckGoSearchRun()
249
+ search_queries = [f"{ingredient} food ingredient safety", f"{ingredient} E-number food additive",f"{ingredient}'s allergic information",f"is {ingredient} vegan,vegetarian or Non-vegetarian"]
250
+ all_results = []
251
+ for query in search_queries:
252
+ time.sleep(DUCKDUCKGO_RATE_LIMIT_DELAY)
253
+ result = duckduckgo.run(query)
254
+ if result:
255
+ all_results.append({"query": query, "result": result})
256
+ return {"source": "DuckDuckGo", "found": bool(all_results), "data": all_results}
257
+ except Exception as e:
258
+ log_error(f"Web search error: {e}",e)
259
+ return {"source": "DuckDuckGo", "found": False, "error": str(e)}